text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to perform analyses of
the local environments (e.g., finding near neighbors)
of single sites in molecules and structures.
"""
import json
import math
import os
import warnings
from bisect import bisect_left
from collections import defaultdict, namedtuple
from copy import deepcopy
from functools import lru_cache
from math import acos, asin, atan2, cos, exp, fabs, pi, pow, sin, sqrt
from typing import List, Optional, Union, Dict, Any
try:
import ruamel.yaml as yaml
except ImportError:
try:
import ruamel_yaml as yaml # type: ignore # noqa
except ImportError:
import yaml # type: ignore # noqa
import numpy as np
from monty.dev import requires
from monty.serialization import loadfn
from scipy.spatial import Voronoi
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import IStructure, Structure
from pymatgen.analysis.bond_valence import BV_PARAMS, BVAnalyzer
from pymatgen.analysis.molecule_structure_comparator import CovalentRadius
from pymatgen.core.sites import PeriodicSite, Site
from pymatgen.core.structure import PeriodicNeighbor
try:
from openbabel import openbabel as ob
except Exception:
ob = None
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Sai Jayaraman, "
__author__ += "Nils E. R. Zimmermann, Bharat Medasani, Evan Spotte-Smith"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Nils E. R. Zimmermann"
__email__ = "nils.e.r.zimmermann@gmail.com"
__status__ = "Production"
__date__ = "August 17, 2017"
_directory = os.path.join(os.path.dirname(__file__))
with open(os.path.join(_directory, "op_params.yaml"), "rt") as f:
default_op_params = yaml.safe_load(f)
with open(os.path.join(_directory, "cn_opt_params.yaml"), "r") as f:
cn_opt_params = yaml.safe_load(f)
with open(os.path.join(_directory, "ionic_radii.json"), "r") as fp:
_ion_radii = json.load(fp)
class ValenceIonicRadiusEvaluator:
"""
Computes site valences and ionic radii for a structure using bond valence
analyzer
"""
def __init__(self, structure):
"""
Args:
structure: pymatgen.core.structure.Structure
"""
self._structure = structure.copy()
self._valences = self._get_valences()
self._ionic_radii = self._get_ionic_radii()
@property
def radii(self):
"""
List of ionic radii of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
radii_dict = dict(zip(el, self._ionic_radii))
# print radii_dict
return radii_dict
@property
def valences(self):
"""
List of oxidation states of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
valence_dict = dict(zip(el, self._valences))
return valence_dict
@property
def structure(self):
"""
Returns oxidation state decorated structure.
"""
return self._structure.copy()
def _get_ionic_radii(self):
"""
Computes ionic radii of elements for all sites in the structure.
If valence is zero, atomic radius is used.
"""
radii = []
vnn = VoronoiNN()
def nearest_key(sorted_vals, skey):
n = bisect_left(sorted_vals, skey)
if n == len(sorted_vals):
return sorted_vals[-1]
if n == 0:
return sorted_vals[0]
before = sorted_vals[n - 1]
after = sorted_vals[n]
if after - skey < skey - before:
return after
return before
for i in range(len(self._structure.sites)):
site = self._structure.sites[i]
if isinstance(site.specie, Element):
radius = site.specie.atomic_radius
# Handle elements with no atomic_radius
# by using calculated values instead.
if radius is None:
radius = site.specie.atomic_radius_calculated
if radius is None:
raise ValueError("cannot assign radius to element {}".format(site.specie))
radii.append(radius)
continue
el = site.specie.symbol
oxi_state = int(round(site.specie.oxi_state))
coord_no = int(round(vnn.get_cn(self._structure, i)))
try:
tab_oxi_states = sorted(map(int, _ion_radii[el].keys()))
oxi_state = nearest_key(tab_oxi_states, oxi_state)
radius = _ion_radii[el][str(oxi_state)][str(coord_no)]
except KeyError:
if vnn.get_cn(self._structure, i) - coord_no > 0:
new_coord_no = coord_no + 1
else:
new_coord_no = coord_no - 1
try:
radius = _ion_radii[el][str(oxi_state)][str(new_coord_no)]
coord_no = new_coord_no
except Exception:
tab_coords = sorted(map(int, _ion_radii[el][str(oxi_state)].keys()))
new_coord_no = nearest_key(tab_coords, coord_no)
i = 0
for val in tab_coords:
if val > coord_no:
break
i = i + 1
if i == len(tab_coords):
key = str(tab_coords[-1])
radius = _ion_radii[el][str(oxi_state)][key]
elif i == 0:
key = str(tab_coords[0])
radius = _ion_radii[el][str(oxi_state)][key]
else:
key = str(tab_coords[i - 1])
radius1 = _ion_radii[el][str(oxi_state)][key]
key = str(tab_coords[i])
radius2 = _ion_radii[el][str(oxi_state)][key]
radius = (radius1 + radius2) / 2
# implement complex checks later
radii.append(radius)
return radii
def _get_valences(self):
"""
Computes ionic valences of elements for all sites in the structure.
"""
try:
bv = BVAnalyzer()
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except Exception:
try:
bv = BVAnalyzer(symm_tol=0.0)
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except Exception:
valences = []
for site in self._structure.sites:
if len(site.specie.common_oxidation_states) > 0:
valences.append(site.specie.common_oxidation_states[0])
# Handle noble gas species
# which have no entries in common_oxidation_states.
else:
valences.append(0)
if sum(valences):
valences = [0] * self._structure.num_sites
else:
self._structure.add_oxidation_state_by_site(valences)
# raise
# el = [site.specie.symbol for site in self._structure.sites]
# el = [site.species_string for site in self._structure.sites]
# el = [site.specie for site in self._structure.sites]
# valence_dict = dict(zip(el, valences))
# print valence_dict
return valences
class NearNeighbors:
"""
Base class to determine near neighbors that typically include nearest
neighbors and others that are within some tolerable distance.
"""
def __eq__(self, other):
if isinstance(other, type(self)):
return self.__dict__ == other.__dict__
return False
def __hash__(self):
return len(self.__dict__.items())
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
raise NotImplementedError("structures_allowed" " is not defined!")
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
raise NotImplementedError("molecules_allowed" " is not defined!")
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
raise NotImplementedError("extend_structures_molecule is not defined!")
def get_cn(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (integer or float): coordination number.
"""
siw = self.get_nn_info(structure, n)
return sum([e["weight"] for e in siw]) if use_weights else len(siw)
def get_cn_dict(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of each element bonded to site with index n in structure
Args:
structure (Structure): input structure
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (dict): dictionary of CN of each element bonded to site
"""
siw = self.get_nn_info(structure, n)
cn_dict = {}
for i in siw:
site_element = i["site"].species_string
if site_element not in cn_dict:
if use_weights:
cn_dict[site_element] = i["weight"]
else:
cn_dict[site_element] = 1
else:
if use_weights:
cn_dict[site_element] += i["weight"]
else:
cn_dict[site_element] += 1
return cn_dict
def get_nn(self, structure, n):
"""
Get near neighbors of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site in structure for which to determine
neighbors.
Returns:
sites (list of Site objects): near neighbors.
"""
return [e["site"] for e in self.get_nn_info(structure, n)]
def get_weights_of_nn_sites(self, structure, n):
"""
Get weight associated with each near neighbor of site with
index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine the weights.
Returns:
weights (list of floats): near-neighbor weights.
"""
return [e["weight"] for e in self.get_nn_info(structure, n)]
def get_nn_images(self, structure, n):
"""
Get image location of all near neighbors of site with index n in
structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine the image
location of near neighbors.
Returns:
images (list of 3D integer array): image locations of
near neighbors.
"""
return [e["image"] for e in self.get_nn_info(structure, n)]
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
information.
Returns:
siw (list of dicts): each dictionary provides information
about a single near neighbor, where key 'site' gives
access to the corresponding Site object, 'image' gives
the image location, and 'weight' provides the weight
that a given near-neighbor site contributes
to the coordination number (1 or smaller), 'site_index'
gives index of the corresponding site in
the original structure.
"""
raise NotImplementedError("get_nn_info(structure, n)" " is not defined!")
def get_all_nn_info(self, structure):
"""Get a listing of all neighbors for all sites in a structure
Args:
structure (Structure): Input structure
Return:
List of NN site information for each site in the structure. Each
entry has the same format as `get_nn_info`
"""
return [self.get_nn_info(structure, n) for n in range(len(structure))]
def get_nn_shell_info(self, structure, site_idx, shell):
"""Get a certain nearest neighbor shell for a certain site.
Determines all non-backtracking paths through the neighbor network
computed by `get_nn_info`. The weight is determined by multiplying
the weight of the neighbor at each hop through the network. For
example, a 2nd-nearest-neighbor that has a weight of 1 from its
1st-nearest-neighbor and weight 0.5 from the original site will
be assigned a weight of 0.5.
As this calculation may involve computing the nearest neighbors of
atoms multiple times, the calculation starts by computing all of the
neighbor info and then calling `_get_nn_shell_info`. If you are likely
to call this method for more than one site, consider calling `get_all_nn`
first and then calling this protected method yourself.
Args:
structure (Structure): Input structure
site_idx (int): index of site for which to determine neighbor
information.
shell (int): Which neighbor shell to retrieve (1 == 1st NN shell)
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`.
"""
all_nn_info = self.get_all_nn_info(structure)
sites = self._get_nn_shell_info(structure, all_nn_info, site_idx, shell)
# Update the site positions
# Did not do this during NN options because that can be slower
output = []
for info in sites:
orig_site = structure[info["site_index"]]
info["site"] = PeriodicSite(
orig_site.species,
np.add(orig_site.frac_coords, info["image"]),
structure.lattice,
properties=orig_site.properties,
)
output.append(info)
return output
def _get_nn_shell_info(
self,
structure,
all_nn_info,
site_idx,
shell,
_previous_steps=frozenset(),
_cur_image=(0, 0, 0),
):
"""Private method for computing the neighbor shell information
Args:
structure (Structure) - Structure being assessed
all_nn_info ([[dict]]) - Results from `get_all_nn_info`
site_idx (int) - index of site for which to determine neighbor
information.
shell (int) - Which neighbor shell to retrieve (1 == 1st NN shell)
_previous_steps ({(site_idx, image}) - Internal use only: Set of
sites that have already been traversed.
_cur_image (tuple) - Internal use only Image coordinates of current atom
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`. Does not update the site positions
"""
if shell <= 0:
raise ValueError("Shell must be positive")
# Append this site to the list of previously-visited sites
_previous_steps = _previous_steps.union({(site_idx, _cur_image)})
# Get all the neighbors of this site
possible_steps = list(all_nn_info[site_idx])
for i, step in enumerate(possible_steps):
# Update the image information
# Note: We do not update the site position yet, as making a
# PeriodicSite for each intermediate step is too costly
step = dict(step)
step["image"] = tuple(np.add(step["image"], _cur_image).tolist())
possible_steps[i] = step
# Get only the non-backtracking steps
allowed_steps = [x for x in possible_steps if (x["site_index"], x["image"]) not in _previous_steps]
# If we are the last step (i.e., shell == 1), done!
if shell == 1:
# No further work needed, just package these results
return allowed_steps
# If not, Get the N-1 NNs of these allowed steps
terminal_neighbors = [
self._get_nn_shell_info(
structure,
all_nn_info,
x["site_index"],
shell - 1,
_previous_steps,
x["image"],
)
for x in allowed_steps
]
# Each allowed step results in many terminal neighbors
# And, different first steps might results in the same neighbor
# Now, we condense those neighbors into a single entry per neighbor
all_sites = dict()
for first_site, term_sites in zip(allowed_steps, terminal_neighbors):
for term_site in term_sites:
key = (term_site["site_index"], tuple(term_site["image"]))
# The weight for this site is equal to the weight of the
# first step multiplied by the weight of the terminal neighbor
term_site["weight"] *= first_site["weight"]
# Check if this site is already known
value = all_sites.get(key)
if value is not None:
# If so, add to its weight
value["weight"] += term_site["weight"]
else:
# If not, prepare to add it
value = term_site
all_sites[key] = value
return list(all_sites.values())
@staticmethod
def _get_image(structure, site):
"""Private convenience method for get_nn_info,
gives lattice image from provided PeriodicSite and Structure.
Image is defined as displacement from original site in structure to a given site.
i.e. if structure has a site at (-0.1, 1.0, 0.3), then (0.9, 0, 2.3) -> jimage = (1, -1, 2).
Note that this method takes O(number of sites) due to searching an original site.
Args:
structure: Structure Object
site: PeriodicSite Object
Returns:
image: ((int)*3) Lattice image
"""
original_site = structure[NearNeighbors._get_original_site(structure, site)]
image = np.around(np.subtract(site.frac_coords, original_site.frac_coords))
image = tuple(image.astype(int))
return image
@staticmethod
def _get_original_site(structure, site):
"""Private convenience method for get_nn_info,
gives original site index from ProvidedPeriodicSite."""
for i, s in enumerate(structure):
if site.is_periodic_image(s):
return i
raise Exception("Site not found!")
def get_bonded_structure(self, structure, decorate=False, weights=True):
"""
Obtain a StructureGraph object using this NearNeighbor
class. Requires the optional dependency networkx
(pip install networkx).
Args:
structure: Structure object.
decorate (bool): whether to annotate site properties
with order parameters using neighbors determined by
this NearNeighbor class
weights (bool): whether to include edge weights from
NearNeighbor class in StructureGraph
Returns: a pymatgen.analysis.graphs.StructureGraph object
"""
# requires optional dependency which is why it's not a top-level import
from pymatgen.analysis.graphs import StructureGraph
if decorate:
# Decorate all sites in the underlying structure
# with site properties that provides information on the
# coordination number and coordination pattern based
# on the (current) structure of this graph.
order_parameters = [self.get_local_order_parameters(structure, n) for n in range(len(structure))]
structure.add_site_property("order_parameters", order_parameters)
sg = StructureGraph.with_local_env_strategy(structure, self, weights=weights)
return sg
def get_local_order_parameters(self, structure, n):
"""
Calculate those local structure order parameters for
the given site whose ideal CN corresponds to the
underlying motif (e.g., CN=4, then calculate the
square planar, tetrahedral, see-saw-like,
rectangular see-saw-like order paramters).
Args:
structure: Structure object
n (int): site index.
Returns (Dict[str, float]):
A dict of order parameters (values) and the
underlying motif type (keys; for example, tetrahedral).
"""
# code from @nisse3000, moved here from graphs to avoid circular
# import, also makes sense to have this as a general NN method
cn = self.get_cn(structure, n)
int_cn = [int(k_cn) for k_cn in cn_opt_params.keys()]
if cn in int_cn:
names = list(cn_opt_params[cn].keys())
types = []
params = []
for name in names:
types.append(cn_opt_params[cn][name][0])
tmp = cn_opt_params[cn][name][1] if len(cn_opt_params[cn][name]) > 1 else None
params.append(tmp)
lostops = LocalStructOrderParams(types, parameters=params)
sites = [structure[n]] + self.get_nn(structure, n)
lostop_vals = lostops.get_order_parameters(sites, 0, indices_neighs=list(range(1, cn + 1)))
d = {}
for i, lostop in enumerate(lostop_vals):
d[names[i]] = lostop
return d
return None
class VoronoiNN(NearNeighbors):
"""
Uses a Voronoi algorithm to determine near neighbors for each site in a
structure.
"""
def __init__(
self,
tol=0,
targets=None,
cutoff=13.0,
allow_pathological=False,
weight="solid_angle",
extra_nn_info=True,
compute_adj_neighbors=True,
):
"""
Args:
tol (float): tolerance parameter for near-neighbor finding. Faces that are
smaller than `tol` fraction of the largest face are not included in the
tessellation. (default: 0).
targets (Element or list of Elements): target element(s).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 13.0.
allow_pathological (bool): whether to allow infinite vertices in
determination of Voronoi coordination.
weight (string) - Statistic used to weigh neighbors (see the statistics
available in get_voronoi_polyhedra)
extra_nn_info (bool) - Add all polyhedron info to `get_nn_info`
compute_adj_neighbors (bool) - Whether to compute which neighbors are
adjacent. Turn off for faster performance
"""
super().__init__()
self.tol = tol
self.cutoff = cutoff
self.allow_pathological = allow_pathological
self.targets = targets
self.weight = weight
self.extra_nn_info = extra_nn_info
self.compute_adj_neighbors = compute_adj_neighbors
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_voronoi_polyhedra(self, structure, n):
"""
Gives a weighted polyhedra around a site.
See ref: A Proposed Rigorous Definition of Coordination Number,
M. O'Keeffe, Acta Cryst. (1979). A35, 772-775
Args:
structure (Structure): structure for which to evaluate the
coordination environment.
n (integer): site index.
Returns:
A dict of sites sharing a common Voronoi facet with the site
n mapped to a directory containing statistics about the facet:
- solid_angle - Solid angle subtended by face
- angle_normalized - Solid angle normalized such that the
faces with the largest
- area - Area of the facet
- face_dist - Distance between site n and the facet
- volume - Volume of Voronoi cell for this face
- n_verts - Number of vertices on the facet
"""
# Assemble the list of neighbors used in the tessellation
# Gets all atoms within a certain radius
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
center = structure[n]
cutoff = self.cutoff
# max cutoff is the longest diagonal of the cell + room for noise
corners = [[1, 1, 1], [-1, 1, 1], [1, -1, 1], [1, 1, -1]]
d_corners = [np.linalg.norm(structure.lattice.get_cartesian_coords(c)) for c in corners]
max_cutoff = max(d_corners) + 0.01
while True:
try:
neighbors = structure.get_sites_in_sphere(center.coords, cutoff)
neighbors = [i[0] for i in sorted(neighbors, key=lambda s: s[1])]
# Run the Voronoi tessellation
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(qvoronoi_input) # can give seg fault if cutoff is too small
# Extract data about the site in question
cell_info = self._extract_cell_info(structure, 0, neighbors, targets, voro, self.compute_adj_neighbors)
break
except RuntimeError as e:
if cutoff >= max_cutoff:
if e.args and "vertex" in e.args[0]:
# pass through the error raised by _extract_cell_info
raise e
raise RuntimeError("Error in Voronoi neighbor finding; " "max cutoff exceeded")
cutoff = min(cutoff * 2, max_cutoff + 0.001)
return cell_info
def get_all_voronoi_polyhedra(self, structure):
"""Get the Voronoi polyhedra for all site in a simulation cell
Args:
structure (Structure): Structure to be evaluated
Returns:
A dict of sites sharing a common Voronoi facet with the site
n mapped to a directory containing statistics about the facet:
- solid_angle - Solid angle subtended by face
- angle_normalized - Solid angle normalized such that the
faces with the largest
- area - Area of the facet
- face_dist - Distance between site n and the facet
- volume - Volume of Voronoi cell for this face
- n_verts - Number of vertices on the facet
"""
# Special case: For atoms with 1 site, the atom in the root image is not
# included in the get_all_neighbors output. Rather than creating logic to add
# that atom to the neighbor list, which requires detecting whether it will be
# translated to reside within the unit cell before neighbor detection, it is
# less complex to just call the one-by-one operation
if len(structure) == 1:
return [self.get_voronoi_polyhedra(structure, 0)]
# Assemble the list of neighbors used in the tessellation
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
# Initialize the list of sites with the atoms in the origin unit cell
# The `get_all_neighbors` function returns neighbors for each site's image in
# the original unit cell. We start off with these central atoms to ensure they
# are included in the tessellation
sites = [x.to_unit_cell() for x in structure]
indices = [(i, 0, 0, 0) for i, _ in enumerate(structure)]
# Get all neighbors within a certain cutoff
# Record both the list of these neighbors, and the site indices
all_neighs = structure.get_all_neighbors(self.cutoff, include_index=True, include_image=True)
for neighs in all_neighs:
sites.extend([x[0] for x in neighs])
indices.extend([(x[2],) + x[3] for x in neighs])
# Get the non-duplicates (using the site indices for numerical stability)
indices = np.array(indices, dtype=np.int_)
indices, uniq_inds = np.unique(indices, return_index=True, axis=0)
sites = [sites[i] for i in uniq_inds]
# Sort array such that atoms in the root image are first
# Exploit the fact that the array is sorted by the unique operation such that
# the images associated with atom 0 are first, followed by atom 1, etc.
(root_images,) = np.nonzero(np.abs(indices[:, 1:]).max(axis=1) == 0)
del indices # Save memory (tessellations can be costly)
# Run the tessellation
qvoronoi_input = [s.coords for s in sites]
voro = Voronoi(qvoronoi_input)
# Get the information for each neighbor
return [
self._extract_cell_info(structure, i, sites, targets, voro, self.compute_adj_neighbors)
for i in root_images.tolist()
]
def _extract_cell_info(self, structure, site_idx, sites, targets, voro, compute_adj_neighbors=False):
"""Get the information about a certain atom from the results of a tessellation
Args:
structure (Structure) - Structure being assessed
site_idx (int) - Index of the atom in question
sites ([Site]) - List of all sites in the tessellation
targets ([Element]) - Target elements
voro - Output of qvoronoi
compute_adj_neighbors (boolean) - Whether to compute which neighbors are adjacent
Returns:
A dict of sites sharing a common Voronoi facet. Key is facet id
(not useful) and values are dictionaries containing statistics
about the facet:
- site: Pymatgen site
- solid_angle - Solid angle subtended by face
- angle_normalized - Solid angle normalized such that the
faces with the largest
- area - Area of the facet
- face_dist - Distance between site n and the facet
- volume - Volume of Voronoi cell for this face
- n_verts - Number of vertices on the facet
- adj_neighbors - Facet id's for the adjacent neighbors
"""
# Get the coordinates of every vertex
all_vertices = voro.vertices
# Get the coordinates of the central site
center_coords = sites[site_idx].coords
# Iterate through all the faces in the tessellation
results = {}
for nn, vind in voro.ridge_dict.items():
# Get only those that include the site in question
if site_idx in nn:
other_site = nn[0] if nn[1] == site_idx else nn[1]
if -1 in vind:
# -1 indices correspond to the Voronoi cell
# missing a face
if self.allow_pathological:
continue
raise RuntimeError(
"This structure is pathological," " infinite vertex in the voronoi " "construction"
)
# Get the solid angle of the face
facets = [all_vertices[i] for i in vind]
angle = solid_angle(center_coords, facets)
# Compute the volume of associated with this face
volume = 0
# qvoronoi returns vertices in CCW order, so I can break
# the face up in to segments (0,1,2), (0,2,3), ... to compute
# its area where each number is a vertex size
for j, k in zip(vind[1:], vind[2:]):
volume += vol_tetra(
center_coords,
all_vertices[vind[0]],
all_vertices[j],
all_vertices[k],
)
# Compute the distance of the site to the face
face_dist = np.linalg.norm(center_coords - sites[other_site].coords) / 2
# Compute the area of the face (knowing V=Ad/3)
face_area = 3 * volume / face_dist
# Compute the normal of the facet
normal = np.subtract(sites[other_site].coords, center_coords)
normal /= np.linalg.norm(normal)
# Store by face index
results[other_site] = {
"site": sites[other_site],
"normal": normal,
"solid_angle": angle,
"volume": volume,
"face_dist": face_dist,
"area": face_area,
"n_verts": len(vind),
}
# If we are computing which neighbors are adjacent, store the vertices
if compute_adj_neighbors:
results[other_site]["verts"] = vind
# all sites should have atleast two connected ridges in periodic system
if not results:
raise ValueError("No Voronoi neighbours found for site - try increasing cutoff")
# Get only target elements
resultweighted = {}
for nn_index, nstats in results.items():
# Check if this is a target site
nn = nstats["site"]
if nn.is_ordered:
if nn.specie in targets:
resultweighted[nn_index] = nstats
else: # is nn site is disordered
for disordered_sp in nn.species.keys():
if disordered_sp in targets:
resultweighted[nn_index] = nstats
# If desired, determine which neighbors are adjacent
if compute_adj_neighbors:
# Initialize storage for the adjacent neighbors
adj_neighbors = dict((i, []) for i in resultweighted.keys())
# Find the neighbors that are adjacent by finding those
# that contain exactly two vertices
for a_ind, a_nninfo in resultweighted.items():
# Get the indices for this site
a_verts = set(a_nninfo["verts"])
# Loop over all neighbors that have an index lower that this one
# The goal here is to exploit the fact that neighbor adjacency is
# symmetric (if A is adj to B, B is adj to A)
for b_ind, b_nninfo in resultweighted.items():
if b_ind > a_ind:
continue
if len(a_verts.intersection(b_nninfo["verts"])) == 2:
adj_neighbors[a_ind].append(b_ind)
adj_neighbors[b_ind].append(a_ind)
# Store the results in the nn_info
for key, neighbors in adj_neighbors.items():
resultweighted[key]["adj_neighbors"] = neighbors
return resultweighted
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure
using Voronoi decomposition.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
# Run the tessellation
nns = self.get_voronoi_polyhedra(structure, n)
# Extract the NN info
return self._extract_nn_info(structure, nns)
def get_all_nn_info(self, structure):
"""
Args:
structure (Structure): input structure.
Returns:
All nn info for all sites.
"""
all_voro_cells = self.get_all_voronoi_polyhedra(structure)
return [self._extract_nn_info(structure, cell) for cell in all_voro_cells]
def _extract_nn_info(self, structure, nns):
"""Given Voronoi NNs, extract the NN info in the form needed by NearestNeighbors
Args:
structure (Structure): Structure being evaluated
nns ([dicts]): Nearest neighbor information for a structure
Returns:
(list of tuples (Site, array, float)): See nn_info
"""
# Get the target information
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
# Extract the NN info
siw = []
max_weight = max(nn[self.weight] for nn in nns.values())
for nstats in nns.values():
site = nstats["site"]
if nstats[self.weight] > self.tol * max_weight and _is_in_targets(site, targets):
nn_info = {
"site": site,
"image": self._get_image(structure, site),
"weight": nstats[self.weight] / max_weight,
"site_index": self._get_original_site(structure, site),
}
if self.extra_nn_info:
# Add all the information about the site
poly_info = nstats
del poly_info["site"]
nn_info["poly_info"] = poly_info
siw.append(nn_info)
return siw
class IsayevNN(VoronoiNN):
"""
Uses the algorithm defined in 10.1038/ncomms15679
Sites are considered neighbors if (i) they share a Voronoi facet and (ii) the
bond distance is less than the sum of the Cordero covalent radii + 0.25 Å.
"""
def __init__(
self,
tol: float = 0.25,
targets: Optional[Union[Element, List[Element]]] = None,
cutoff: float = 13.0,
allow_pathological: bool = False,
extra_nn_info: bool = True,
compute_adj_neighbors: bool = True,
):
"""
Args:
tol: Tolerance in Å for bond distances that are considered coordinated.
targets: Target element(s).
cutoff: Cutoff radius in Angstrom to look for near-neighbor atoms.
allow_pathological: Whether to allow infinite vertices in Voronoi
coordination.
extra_nn_info: Add all polyhedron info to `get_nn_info`.
compute_adj_neighbors: Whether to compute which neighbors are adjacent. Turn
off for faster performance.
"""
super().__init__()
self.tol = tol
self.cutoff = cutoff
self.allow_pathological = allow_pathological
self.targets = targets
self.extra_nn_info = extra_nn_info
self.compute_adj_neighbors = compute_adj_neighbors
def get_nn_info(self, structure: Structure, n: int) -> List[Dict[str, Any]]:
"""
Get all near-neighbor site information.
Gets the the associated image locations and weights of the site with index n
in structure using Voronoi decomposition and distance cutoff.
Args:
structure: Input structure.
n: Index of site for which to determine near-neighbor sites.
Returns:
List of dicts containing the near-neighbor information. Each dict has the
keys:
- "site": The near-neighbor site.
- "image": The periodic image of the near-neighbor site.
- "weight": The face weight of the Voronoi decomposition.
- "site_index": The index of the near-neighbor site in the original
structure.
"""
nns = self.get_voronoi_polyhedra(structure, n)
return self._filter_nns(structure, n, nns)
def get_all_nn_info(self, structure: Structure) -> List[List[Dict[str, Any]]]:
"""
Args:
structure (Structure): input structure.
Returns:
List of near neighbor information for each site. See get_nn_info for the
format of the data for each site.
"""
all_nns = self.get_all_voronoi_polyhedra(structure)
return [self._filter_nns(structure, n, nns) for n, nns in enumerate(all_nns)]
def _filter_nns(self, structure: Structure, n: int, nns: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Extract and filter the NN info into the format needed by NearestNeighbors.
Args:
structure: The structure.
n: The central site index.
nns: Nearest neighbor information for the structure.
Returns:
See get_nn_info for the format of the returned data.
"""
# Get the target information
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
site = structure[n]
# Extract the NN info
siw = []
max_weight = max(nn["area"] for nn in nns.values())
for nstats in nns.values():
nn = nstats.pop("site")
# use the Cordero radius if it is available, otherwise the atomic radius
cov_distance = _get_default_radius(site) + _get_default_radius(nn)
nn_distance = np.linalg.norm(site.coords - nn.coords)
# by default VoronoiNN only returns neighbors which share a Voronoi facet
# therefore we don't need do to additional filtering based on the weight
if _is_in_targets(nn, targets) and nn_distance <= cov_distance + self.tol:
nn_info = {
"site": nn,
"image": self._get_image(structure, nn),
"weight": nstats["area"] / max_weight,
"site_index": self._get_original_site(structure, nn),
}
if self.extra_nn_info:
nn_info["poly_info"] = nstats
siw.append(nn_info)
return siw
def _is_in_targets(site, targets):
"""
Test whether a site contains elements in the target list
Args:
site (Site): Site to assess
targets ([Element]) List of elements
Returns:
(boolean) Whether this site contains a certain list of elements
"""
elems = _get_elements(site)
for elem in elems:
if elem not in targets:
return False
return True
def _get_elements(site):
"""
Get the list of elements for a Site
Args:
site (Site): Site to assess
Returns:
[Element]: List of elements
"""
try:
if isinstance(site.specie, Element):
return [site.specie]
return [Element(site.specie)]
except Exception:
return site.species.elements
class JmolNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using an emulation
of Jmol's default autoBond() algorithm. This version of the algorithm
does not take into account any information regarding known charge
states.
"""
def __init__(self, tol=0.45, min_bond_distance=0.4, el_radius_updates=None):
"""
Args:
tol (float): tolerance parameter for bond determination
(default: 0.56).
el_radius_updates: (dict) symbol->float to override default atomic
radii table values
"""
self.tol = tol
self.min_bond_distance = min_bond_distance
# Load elemental radii table
bonds_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "bonds_jmol_ob.yaml")
with open(bonds_file, "r") as f:
self.el_radius = yaml.safe_load(f)
# Update any user preference elemental radii
if el_radius_updates:
self.el_radius.update(el_radius_updates)
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
def get_max_bond_distance(self, el1_sym, el2_sym):
"""
Use Jmol algorithm to determine bond length from atomic parameters
Args:
el1_sym: (str) symbol of atom 1
el2_sym: (str) symbol of atom 2
Returns: (float) max bond length
"""
return sqrt((self.el_radius[el1_sym] + self.el_radius[el2_sym] + self.tol) ** 2)
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the bond identification
algorithm underlying Jmol.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
# Determine relevant bond lengths based on atomic radii table
bonds = {}
for el in structure.composition.elements:
bonds[site.specie, el] = self.get_max_bond_distance(site.specie.symbol, el.symbol)
# Search for neighbors up to max bond length + tolerance
max_rad = max(bonds.values()) + self.tol
min_rad = min(bonds.values())
siw = []
for nn in structure.get_neighbors(site, max_rad):
dist = nn.nn_distance
# Confirm neighbor based on bond length specific to atom pair
if dist <= (bonds[(site.specie, nn.specie)]) and (nn.nn_distance > self.min_bond_distance):
weight = min_rad / dist
siw.append(
{
"site": nn,
"image": self._get_image(structure, nn),
"weight": weight,
"site_index": self._get_original_site(structure, nn),
}
)
return siw
class MinimumDistanceNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
nearest neighbor(s) at distance, d_min, plus all neighbors
within a distance (1 + tol) * d_min, where tol is a
(relative) distance tolerance parameter.
"""
def __init__(self, tol=0.1, cutoff=10.0, get_all_sites=False):
"""
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
get_all_sites (boolean): If this is set to True then the neighbor
sites are only determined by the cutoff radius, tol is ignored
"""
self.tol = tol
self.cutoff = cutoff
self.get_all_sites = get_all_sites
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest neighbor
distance-based method.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
siw = []
if self.get_all_sites:
for nn in neighs_dists:
w = nn.nn_distance
siw.append(
{
"site": nn,
"image": self._get_image(structure, nn),
"weight": w,
"site_index": self._get_original_site(structure, nn),
}
)
else:
min_dist = min([nn.nn_distance for nn in neighs_dists])
for nn in neighs_dists:
dist = nn.nn_distance
if dist < (1.0 + self.tol) * min_dist:
w = min_dist / dist
siw.append(
{
"site": nn,
"image": self._get_image(structure, nn),
"weight": w,
"site_index": self._get_original_site(structure, nn),
}
)
return siw
class OpenBabelNN(NearNeighbors):
"""
Determine near-neighbor sites and bond orders using OpenBabel API.
NOTE: This strategy is only appropriate for molecules, and not for
structures.
"""
@requires(
ob,
"BabelMolAdaptor requires openbabel to be installed with "
"Python bindings. Please get it at http://openbabel.org "
"(version >=3.0.0).",
)
def __init__(self, order=True):
"""
Args:
order (bool): True if bond order should be returned as a weight, False
if bond length should be used as a weight.
"""
self.order = order
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return False
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites and weights (orders) of bonds for a given
atom.
Args:
structure: Molecule object.
n: index of site for which to determine near neighbors.
Returns:
(dict): representing a neighboring site and the type of
bond present between site n and the neighboring site.
"""
from pymatgen.io.babel import BabelMolAdaptor
obmol = BabelMolAdaptor(structure).openbabel_mol
siw = []
# Get only the atom of interest
site_atom = [
a
for i, a in enumerate(ob.OBMolAtomDFSIter(obmol))
if [a.GetX(), a.GetY(), a.GetZ()] == list(structure[n].coords)
][0]
for neighbor in ob.OBAtomAtomIter(site_atom):
coords = [neighbor.GetX(), neighbor.GetY(), neighbor.GetZ()]
site = [a for a in structure if list(a.coords) == coords][0]
index = structure.index(site)
bond = site_atom.GetBond(neighbor)
if self.order:
obmol.PerceiveBondOrders()
weight = bond.GetBondOrder()
else:
weight = bond.GetLength()
siw.append(
{
"site": site,
"image": (0, 0, 0),
"weight": weight,
"site_index": index,
}
)
return siw
def get_bonded_structure(self, structure, decorate=False):
"""
Obtain a MoleculeGraph object using this NearNeighbor
class. Requires the optional dependency networkx
(pip install networkx).
Args:
structure: Molecule object.
decorate (bool): whether to annotate site properties
with order parameters using neighbors determined by
this NearNeighbor class
Returns: a pymatgen.analysis.graphs.MoleculeGraph object
"""
# requires optional dependency which is why it's not a top-level import
from pymatgen.analysis.graphs import MoleculeGraph
if decorate:
# Decorate all sites in the underlying structure
# with site properties that provides information on the
# coordination number and coordination pattern based
# on the (current) structure of this graph.
order_parameters = [self.get_local_order_parameters(structure, n) for n in range(len(structure))]
structure.add_site_property("order_parameters", order_parameters)
mg = MoleculeGraph.with_local_env_strategy(structure, self)
return mg
def get_nn_shell_info(self, structure, site_idx, shell):
"""Get a certain nearest neighbor shell for a certain site.
Determines all non-backtracking paths through the neighbor network
computed by `get_nn_info`. The weight is determined by multiplying
the weight of the neighbor at each hop through the network. For
example, a 2nd-nearest-neighbor that has a weight of 1 from its
1st-nearest-neighbor and weight 0.5 from the original site will
be assigned a weight of 0.5.
As this calculation may involve computing the nearest neighbors of
atoms multiple times, the calculation starts by computing all of the
neighbor info and then calling `_get_nn_shell_info`. If you are likely
to call this method for more than one site, consider calling `get_all_nn`
first and then calling this protected method yourself.
Args:
structure (Molecule): Input structure
site_idx (int): index of site for which to determine neighbor
information.
shell (int): Which neighbor shell to retrieve (1 == 1st NN shell)
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`.
"""
all_nn_info = self.get_all_nn_info(structure)
sites = self._get_nn_shell_info(structure, all_nn_info, site_idx, shell)
# Update the site positions
# Did not do this during NN options because that can be slower
output = []
for info in sites:
orig_site = structure[info["site_index"]]
info["site"] = Site(orig_site.species, orig_site._coords, properties=orig_site.properties)
output.append(info)
return output
class CovalentBondNN(NearNeighbors):
"""
Determine near-neighbor sites and bond orders using built-in
pymatgen.Molecule CovalentBond functionality.
NOTE: This strategy is only appropriate for molecules, and not for
structures.
"""
def __init__(self, tol=0.2, order=True):
"""
Args:
tol (float): Tolerance for covalent bond checking.
order (bool): If True (default), this class will compute bond
orders. If False, bond lengths will be computed
"""
self.tol = tol
self.order = order
self.bonds = None
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return False
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites and weights (orders) of bonds for a given
atom.
:param structure: input Molecule.
:param n: index of site for which to determine near neighbors.
:return: [dict] representing a neighboring site and the type of
bond present between site n and the neighboring site.
"""
# This is unfortunately inefficient, but is the best way to fit the
# current NearNeighbors scheme
self.bonds = structure.get_covalent_bonds(tol=self.tol)
siw = []
for bond in self.bonds:
capture_bond = False
if bond.site1 == structure[n]:
site = bond.site2
capture_bond = True
elif bond.site2 == structure[n]:
site = bond.site1
capture_bond = True
if capture_bond:
index = structure.index(site)
if self.order:
weight = bond.get_bond_order()
else:
weight = bond.length
siw.append(
{
"site": site,
"image": (0, 0, 0),
"weight": weight,
"site_index": index,
}
)
return siw
def get_bonded_structure(self, structure, decorate=False):
"""
Obtain a MoleculeGraph object using this NearNeighbor
class.
Args:
structure: Molecule object.
decorate (bool): whether to annotate site properties
with order parameters using neighbors determined by
this NearNeighbor class
Returns: a pymatgen.analysis.graphs.MoleculeGraph object
"""
# requires optional dependency which is why it's not a top-level import
from pymatgen.analysis.graphs import MoleculeGraph
if decorate:
# Decorate all sites in the underlying structure
# with site properties that provides information on the
# coordination number and coordination pattern based
# on the (current) structure of this graph.
order_parameters = [self.get_local_order_parameters(structure, n) for n in range(len(structure))]
structure.add_site_property("order_parameters", order_parameters)
mg = MoleculeGraph.with_local_env_strategy(structure, self)
return mg
def get_nn_shell_info(self, structure, site_idx, shell):
"""Get a certain nearest neighbor shell for a certain site.
Determines all non-backtracking paths through the neighbor network
computed by `get_nn_info`. The weight is determined by multiplying
the weight of the neighbor at each hop through the network. For
example, a 2nd-nearest-neighbor that has a weight of 1 from its
1st-nearest-neighbor and weight 0.5 from the original site will
be assigned a weight of 0.5.
As this calculation may involve computing the nearest neighbors of
atoms multiple times, the calculation starts by computing all of the
neighbor info and then calling `_get_nn_shell_info`. If you are likely
to call this method for more than one site, consider calling `get_all_nn`
first and then calling this protected method yourself.
Args:
structure (Molecule): Input structure
site_idx (int): index of site for which to determine neighbor
information.
shell (int): Which neighbor shell to retrieve (1 == 1st NN shell)
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`.
"""
all_nn_info = self.get_all_nn_info(structure)
sites = self._get_nn_shell_info(structure, all_nn_info, site_idx, shell)
# Update the site positions
# Did not do this during NN options because that can be slower
output = []
for info in sites:
orig_site = structure[info["site_index"]]
info["site"] = Site(orig_site.species, orig_site._coords, properties=orig_site.properties)
output.append(info)
return output
class MinimumOKeeffeNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
neighbor(s) at closest relative distance, d_min_OKeffee, plus some
relative tolerance, where bond valence parameters from O'Keeffe's
bond valence method (J. Am. Chem. Soc. 1991, 3226-3229) are used
to calculate relative distances.
"""
def __init__(self, tol=0.1, cutoff=10.0):
"""
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
"""
self.tol = tol
self.cutoff = cutoff
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with O'Keeffe parameters.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
try:
eln = site.specie.element
except Exception:
eln = site.species_string
reldists_neighs = []
for nn in neighs_dists:
neigh = nn
dist = nn.nn_distance
try:
el2 = neigh.specie.element
except Exception:
el2 = neigh.species_string
reldists_neighs.append([dist / get_okeeffe_distance_prediction(eln, el2), neigh])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append(
{
"site": s,
"image": self._get_image(structure, s),
"weight": w,
"site_index": self._get_original_site(structure, s),
}
)
return siw
class MinimumVIRENN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
neighbor(s) at closest relative distance, d_min_VIRE, plus some
relative tolerance, where atom radii from the
ValenceIonicRadiusEvaluator (VIRE) are used
to calculate relative distances.
"""
def __init__(self, tol=0.1, cutoff=10.0):
"""
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
"""
self.tol = tol
self.cutoff = cutoff
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with VIRE atomic/ionic radii.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
vire = _get_vire(structure)
site = vire.structure[n]
neighs_dists = vire.structure.get_neighbors(site, self.cutoff)
rn = vire.radii[vire.structure[n].species_string]
reldists_neighs = []
for nn in neighs_dists:
reldists_neighs.append([nn.nn_distance / (vire.radii[nn.species_string] + rn), nn])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append(
{
"site": s,
"image": self._get_image(vire.structure, s),
"weight": w,
"site_index": self._get_original_site(vire.structure, s),
}
)
return siw
def _get_vire(structure: Union[Structure, IStructure]):
"""Get the ValenceIonicRadiusEvaluator object for an structure taking
advantage of caching.
Args:
structure: A structure.
Returns:
Output of `ValenceIonicRadiusEvaluator(structure)`
"""
# pymatgen does not hash Structure objects, so we need
# to cast from Structure to the immutable IStructure
if isinstance(structure, Structure):
structure = IStructure.from_sites(structure)
return _get_vire_istructure(structure)
@lru_cache(maxsize=1)
def _get_vire_istructure(structure: IStructure):
"""Get the ValenceIonicRadiusEvaluator object for an immutable structure
taking advantage of caching.
Args:
structure: A structure.
Returns:
Output of `ValenceIonicRadiusEvaluator(structure)`
"""
return ValenceIonicRadiusEvaluator(structure)
def solid_angle(center, coords):
"""
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle.
"""
# Compute the displacement from the center
r = [np.subtract(c, center) for c in coords]
# Compute the magnitude of each vector
r_norm = [np.linalg.norm(i) for i in r]
# Compute the solid angle for each tetrahedron that makes up the facet
# Following: https://en.wikipedia.org/wiki/Solid_angle#Tetrahedron
angle = 0
for i in range(1, len(r) - 1):
j = i + 1
tp = np.abs(np.dot(r[0], np.cross(r[i], r[j])))
de = (
r_norm[0] * r_norm[i] * r_norm[j]
+ r_norm[j] * np.dot(r[0], r[i])
+ r_norm[i] * np.dot(r[0], r[j])
+ r_norm[0] * np.dot(r[i], r[j])
)
if de == 0:
my_angle = 0.5 * pi if tp > 0 else -0.5 * pi
else:
my_angle = np.arctan(tp / de)
angle += (my_angle if my_angle > 0 else my_angle + np.pi) * 2
return angle
def vol_tetra(vt1, vt2, vt3, vt4):
"""
Calculate the volume of a tetrahedron, given the four vertices of vt1,
vt2, vt3 and vt4.
Args:
vt1 (array-like): coordinates of vertex 1.
vt2 (array-like): coordinates of vertex 2.
vt3 (array-like): coordinates of vertex 3.
vt4 (array-like): coordinates of vertex 4.
Returns:
(float): volume of the tetrahedron.
"""
vol_tetra = np.abs(np.dot((vt1 - vt4), np.cross((vt2 - vt4), (vt3 - vt4)))) / 6
return vol_tetra
def get_okeeffe_params(el_symbol):
"""
Returns the elemental parameters related to atom size and
electronegativity which are used for estimating bond-valence
parameters (bond length) of pairs of atoms on the basis of data
provided in 'Atoms Sizes and Bond Lengths in Molecules and Crystals'
(O'Keeffe & Brese, 1991).
Args:
el_symbol (str): element symbol.
Returns:
(dict): atom-size ('r') and electronegativity-related ('c')
parameter.
"""
el = Element(el_symbol)
if el not in list(BV_PARAMS.keys()):
raise RuntimeError(
"Could not find O'Keeffe parameters for element"
' "{}" in "BV_PARAMS"dictonary'
" provided by pymatgen".format(el_symbol)
)
return BV_PARAMS[el]
def get_okeeffe_distance_prediction(el1, el2):
"""
Returns an estimate of the bond valence parameter (bond length) using
the derived parameters from 'Atoms Sizes and Bond Lengths in Molecules
and Crystals' (O'Keeffe & Brese, 1991). The estimate is based on two
experimental parameters: r and c. The value for r is based off radius,
while c is (usually) the Allred-Rochow electronegativity. Values used
are *not* generated from pymatgen, and are found in
'okeeffe_params.json'.
Args:
el1, el2 (Element): two Element objects
Returns:
a float value of the predicted bond length
"""
el1_okeeffe_params = get_okeeffe_params(el1)
el2_okeeffe_params = get_okeeffe_params(el2)
r1 = el1_okeeffe_params["r"]
r2 = el2_okeeffe_params["r"]
c1 = el1_okeeffe_params["c"]
c2 = el2_okeeffe_params["c"]
return r1 + r2 - r1 * r2 * pow(sqrt(c1) - sqrt(c2), 2) / (c1 * r1 + c2 * r2)
def get_neighbors_of_site_with_index(struct, n, approach="min_dist", delta=0.1, cutoff=10.0):
"""
Returns the neighbors of a given site using a specific neighbor-finding
method.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which motif type
is to be determined.
approach (str): type of neighbor-finding approach, where
"min_dist" will use the MinimumDistanceNN class,
"voronoi" the VoronoiNN class, "min_OKeeffe" the
MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class.
delta (float): tolerance involved in neighbor finding.
cutoff (float): (large) radius to find tentative neighbors.
Returns: neighbor sites.
"""
if approach == "min_dist":
return MinimumDistanceNN(tol=delta, cutoff=cutoff).get_nn(struct, n)
if approach == "voronoi":
return VoronoiNN(tol=delta, cutoff=cutoff).get_nn(struct, n)
if approach == "min_OKeeffe":
return MinimumOKeeffeNN(tol=delta, cutoff=cutoff).get_nn(struct, n)
if approach == "min_VIRE":
return MinimumVIRENN(tol=delta, cutoff=cutoff).get_nn(struct, n)
raise RuntimeError("unsupported neighbor-finding method ({}).".format(approach))
def site_is_of_motif_type(struct, n, approach="min_dist", delta=0.1, cutoff=10.0, thresh=None):
"""
Returns the motif type of the site with index n in structure struct;
currently featuring "tetrahedral", "octahedral", "bcc", and "cp"
(close-packed: fcc and hcp) as well as "square pyramidal" and
"trigonal bipyramidal". If the site is not recognized,
"unrecognized" is returned. If a site should be assigned to two
different motifs, "multiple assignments" is returned.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which motif type
is to be determined.
approach (str): type of neighbor-finding approach, where
"min_dist" will use the MinimumDistanceNN class,
"voronoi" the VoronoiNN class, "min_OKeeffe" the
MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class.
delta (float): tolerance involved in neighbor finding.
cutoff (float): (large) radius to find tentative neighbors.
thresh (dict): thresholds for motif criteria (currently, required
keys and their default values are "qtet": 0.5,
"qoct": 0.5, "qbcc": 0.5, "q6": 0.4).
Returns: motif type (str).
"""
if thresh is None:
thresh = {
"qtet": 0.5,
"qoct": 0.5,
"qbcc": 0.5,
"q6": 0.4,
"qtribipyr": 0.8,
"qsqpyr": 0.8,
}
ops = LocalStructOrderParams(["cn", "tet", "oct", "bcc", "q6", "sq_pyr", "tri_bipyr"])
neighs_cent = get_neighbors_of_site_with_index(struct, n, approach=approach, delta=delta, cutoff=cutoff)
neighs_cent.append(struct.sites[n])
opvals = ops.get_order_parameters(
neighs_cent,
len(neighs_cent) - 1,
indices_neighs=list(range(len(neighs_cent) - 1)),
)
cn = int(opvals[0] + 0.5)
motif_type = "unrecognized"
nmotif = 0
if cn == 4 and opvals[1] > thresh["qtet"]:
motif_type = "tetrahedral"
nmotif += 1
if cn == 5 and opvals[5] > thresh["qsqpyr"]:
motif_type = "square pyramidal"
nmotif += 1
if cn == 5 and opvals[6] > thresh["qtribipyr"]:
motif_type = "trigonal bipyramidal"
nmotif += 1
if cn == 6 and opvals[2] > thresh["qoct"]:
motif_type = "octahedral"
nmotif += 1
if cn == 8 and (opvals[3] > thresh["qbcc"] and opvals[1] < thresh["qtet"]):
motif_type = "bcc"
nmotif += 1
if cn == 12 and (
opvals[4] > thresh["q6"] and opvals[1] < thresh["q6"] and opvals[2] < thresh["q6"] and opvals[3] < thresh["q6"]
):
motif_type = "cp"
nmotif += 1
if nmotif > 1:
motif_type = "multiple assignments"
return motif_type
def gramschmidt(vin, uin):
"""
Returns that part of the first input vector
that is orthogonal to the second input vector.
The output vector is not normalized.
Args:
vin (numpy array):
first input vector
uin (numpy array):
second input vector
"""
vin_uin = np.inner(vin, uin)
uin_uin = np.inner(uin, uin)
if uin_uin <= 0.0:
raise ValueError("Zero or negative inner product!")
return vin - (vin_uin / uin_uin) * uin
class LocalStructOrderParams:
"""
This class permits the calculation of various types of local
structure order parameters.
"""
__supported_types = (
"cn",
"sgl_bd",
"bent",
"tri_plan",
"tri_plan_max",
"reg_tri",
"sq_plan",
"sq_plan_max",
"pent_plan",
"pent_plan_max",
"sq",
"tet",
"tet_max",
"tri_pyr",
"sq_pyr",
"sq_pyr_legacy",
"tri_bipyr",
"sq_bipyr",
"oct",
"oct_legacy",
"pent_pyr",
"hex_pyr",
"pent_bipyr",
"hex_bipyr",
"T",
"cuboct",
"cuboct_max",
"see_saw_rect",
"bcc",
"q2",
"q4",
"q6",
"oct_max",
"hex_plan_max",
"sq_face_cap_trig_pris",
)
def __init__(self, types, parameters=None, cutoff=-10.0):
"""
Args:
types ([string]): list of strings representing the types of
order parameters to be calculated. Note that multiple
mentions of the same type may occur. Currently available
types recognize following environments:
"cn": simple coordination number---normalized
if desired;
"sgl_bd": single bonds;
"bent": bent (angular) coordinations
(Zimmermann & Jain, in progress, 2017);
"T": T-shape coordinations;
"see_saw_rect": see saw-like coordinations;
"tet": tetrahedra
(Zimmermann et al., submitted, 2017);
"oct": octahedra
(Zimmermann et al., submitted, 2017);
"bcc": body-centered cubic environments (Peters,
J. Chem. Phys., 131, 244103, 2009);
"tri_plan": trigonal planar environments;
"sq_plan": square planar environments;
"pent_plan": pentagonal planar environments;
"tri_pyr": trigonal pyramids (coordinated atom is in
the center of the basal plane);
"sq_pyr": square pyramids;
"pent_pyr": pentagonal pyramids;
"hex_pyr": hexagonal pyramids;
"tri_bipyr": trigonal bipyramids;
"sq_bipyr": square bipyramids;
"pent_bipyr": pentagonal bipyramids;
"hex_bipyr": hexagonal bipyramids;
"cuboct": cuboctahedra;
"q2": motif-unspecific bond orientational order
parameter (BOOP) of weight l=2 (Steinhardt
et al., Phys. Rev. B, 28, 784-805, 1983);
"q4": BOOP of weight l=4;
"q6": BOOP of weight l=6.
"reg_tri": regular triangle with varying height
to basal plane;
"sq": square coordination (cf., "reg_tri");
"oct_legacy": original Peters-style OP recognizing
octahedral coordination environments
(Zimmermann et al., J. Am. Chem. Soc.,
137, 13352-13361, 2015) that can, however,
produce small negative values sometimes.
"sq_pyr_legacy": square pyramids (legacy);
parameters ([dict]): list of dictionaries
that store float-type parameters associated with the
definitions of the different order parameters
(length of list = number of OPs). If an entry
is None, default values are used that are read from
the op_params.yaml file. With few exceptions, 9 different
parameters are used across all OPs:
"norm": normalizing constant (used in "cn"
(default value: 1)).
"TA": target angle (TA) in fraction of 180 degrees
("bent" (1), "tet" (0.6081734479693927),
"tri_plan" (0.66666666667), "pent_plan" (0.6),
"sq_pyr_legacy" (0.5)).
"IGW_TA": inverse Gaussian width (IGW) for penalizing
angles away from the target angle in inverse
fractions of 180 degrees to ("bent" and "tet" (15),
"tri_plan" (13.5), "pent_plan" (18),
"sq_pyr_legacy" (30)).
"IGW_EP": IGW for penalizing angles away from the
equatorial plane (EP) at 90 degrees ("T", "see_saw_rect",
"oct", "sq_plan", "tri_pyr", "sq_pyr", "pent_pyr",
"hex_pyr", "tri_bipyr", "sq_bipyr", "pent_bipyr",
"hex_bipyr", and "oct_legacy" (18)).
"fac_AA": factor applied to azimuth angle (AA) in cosine
term ("T", "tri_plan", and "sq_plan" (1), "tet",
"tri_pyr", and "tri_bipyr" (1.5), "oct", "sq_pyr",
"sq_bipyr", and "oct_legacy" (2), "pent_pyr"
and "pent_bipyr" (2.5), "hex_pyr" and
"hex_bipyr" (3)).
"exp_cos_AA": exponent applied to cosine term of AA
("T", "tet", "oct", "tri_plan", "sq_plan",
"tri_pyr", "sq_pyr", "pent_pyr", "hex_pyr",
"tri_bipyr", "sq_bipyr", "pent_bipyr", "hex_bipyr",
and "oct_legacy" (2)).
"min_SPP": smallest angle (in radians) to consider
a neighbor to be
at South pole position ("see_saw_rect", "oct", "bcc",
"sq_plan", "tri_bipyr", "sq_bipyr", "pent_bipyr",
"hex_bipyr", "cuboct", and "oct_legacy"
(2.792526803190927)).
"IGW_SPP": IGW for penalizing angles away from South
pole position ("see_saw_rect", "oct", "bcc", "sq_plan",
"tri_bipyr", "sq_bipyr", "pent_bipyr", "hex_bipyr",
"cuboct", and "oct_legacy" (15)).
"w_SPP": weight for South pole position relative to
equatorial positions ("see_saw_rect" and "sq_plan" (1),
"cuboct" (1.8), "tri_bipyr" (2), "oct",
"sq_bipyr", and "oct_legacy" (3), "pent_bipyr" (4),
"hex_bipyr" (5), "bcc" (6)).
cutoff (float): Cutoff radius to determine which nearest
neighbors are supposed to contribute to the order
parameters. If the value is negative the neighboring
sites found by distance and cutoff radius are further
pruned using the get_nn method from the
VoronoiNN class.
"""
for t in types:
if t not in LocalStructOrderParams.__supported_types:
raise ValueError("Unknown order parameter type (" + t + ")!")
self._types = tuple(types)
self._comp_azi = False
self._params = []
for i, t in enumerate(self._types):
d = deepcopy(default_op_params[t]) if default_op_params[t] is not None else None
if parameters is None:
self._params.append(d)
elif parameters[i] is None:
self._params.append(d)
else:
self._params.append(deepcopy(parameters[i]))
self._computerijs = self._computerjks = self._geomops = False
self._geomops2 = self._boops = False
self._max_trig_order = -1
# Add here any additional flags to be used during calculation.
if "sgl_bd" in self._types:
self._computerijs = True
if not set(self._types).isdisjoint(
[
"tet",
"oct",
"bcc",
"sq_pyr",
"sq_pyr_legacy",
"tri_bipyr",
"sq_bipyr",
"oct_legacy",
"tri_plan",
"sq_plan",
"pent_plan",
"tri_pyr",
"pent_pyr",
"hex_pyr",
"pent_bipyr",
"hex_bipyr",
"T",
"cuboct",
"oct_max",
"tet_max",
"tri_plan_max",
"sq_plan_max",
"pent_plan_max",
"cuboct_max",
"bent",
"see_saw_rect",
"hex_plan_max",
"sq_face_cap_trig_pris",
]
):
self._computerijs = self._geomops = True
if "sq_face_cap_trig_pris" in self._types:
self._comp_azi = True
if not set(self._types).isdisjoint(["reg_tri", "sq"]):
self._computerijs = self._computerjks = self._geomops2 = True
if not set(self._types).isdisjoint(["q2", "q4", "q6"]):
self._computerijs = self._boops = True
if "q2" in self._types:
self._max_trig_order = 2
if "q4" in self._types:
self._max_trig_order = 4
if "q6" in self._types:
self._max_trig_order = 6
# Finish parameter treatment.
if cutoff < 0.0:
self._cutoff = -cutoff
self._voroneigh = True
elif cutoff > 0.0:
self._cutoff = cutoff
self._voroneigh = False
else:
raise ValueError("Cutoff radius is zero!")
# Further variable definitions.
self._last_nneigh = -1
self._pow_sin_t = {}
self._pow_cos_t = {}
self._sin_n_p = {}
self._cos_n_p = {}
@property
def num_ops(self):
"""
Returns:
int: the number of different order parameters that are targeted
to be calculated.
"""
return len(self._types)
@property
def last_nneigh(self):
"""
Returns:
int: the number of neighbors encountered during the most
recent order parameter calculation. A value of -1 indicates
that no such calculation has yet been performed for this
instance.
"""
return len(self._last_nneigh)
def compute_trigonometric_terms(self, thetas, phis):
"""
Computes trigonometric terms that are required to
calculate bond orientational order parameters using
internal variables.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
The list of
azimuth angles of all neighbors in radians. The list of
azimuth angles is expected to have the same size as the
list of polar angles; otherwise, a ValueError is raised.
Also, the two lists of angles have to be coherent in
order. That is, it is expected that the order in the list
of azimuth angles corresponds to a distinct sequence of
neighbors. And, this sequence has to equal the sequence
of neighbors in the list of polar angles.
"""
if len(thetas) != len(phis):
raise ValueError("List of polar and azimuthal angles have to be" " equal!")
self._pow_sin_t.clear()
self._pow_cos_t.clear()
self._sin_n_p.clear()
self._cos_n_p.clear()
self._pow_sin_t[1] = [sin(float(t)) for t in thetas]
self._pow_cos_t[1] = [cos(float(t)) for t in thetas]
self._sin_n_p[1] = [sin(float(p)) for p in phis]
self._cos_n_p[1] = [cos(float(p)) for p in phis]
for i in range(2, self._max_trig_order + 1):
self._pow_sin_t[i] = [e[0] * e[1] for e in zip(self._pow_sin_t[i - 1], self._pow_sin_t[1])]
self._pow_cos_t[i] = [e[0] * e[1] for e in zip(self._pow_cos_t[i - 1], self._pow_cos_t[1])]
self._sin_n_p[i] = [sin(float(i) * float(p)) for p in phis]
self._cos_n_p[i] = [cos(float(i) * float(p)) for p in phis]
def get_q2(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=2. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
Returns:
float: bond orientational order parameter of weight l=2
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
sqrt_15_2pi = sqrt(15.0 / (2.0 * pi))
sqrt_5_pi = sqrt(5.0 / pi)
pre_y_2_2 = [0.25 * sqrt_15_2pi * val for val in self._pow_sin_t[2]]
pre_y_2_1 = [0.5 * sqrt_15_2pi * val[0] * val[1] for val in zip(self._pow_sin_t[1], self._pow_cos_t[1])]
acc = 0.0
# Y_2_-2
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_2[i] * self._cos_n_p[2][i]
imag -= pre_y_2_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
# Y_2_-1
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_1[i] * self._cos_n_p[1][i]
imag -= pre_y_2_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_2_0
real = imag = 0.0
for i in nnn_range:
real += 0.25 * sqrt_5_pi * (3.0 * self._pow_cos_t[2][i] - 1.0)
acc += real * real
# Y_2_1
real = imag = 0.0
for i in nnn_range:
real -= pre_y_2_1[i] * self._cos_n_p[1][i]
imag -= pre_y_2_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_2_2
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_2[i] * self._cos_n_p[2][i]
imag += pre_y_2_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
q2 = sqrt(4.0 * pi * acc / (5.0 * float(nnn * nnn)))
return q2
def get_q4(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=4. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
Returns:
float: bond orientational order parameter of weight l=4
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
i16_3 = 3.0 / 16.0
i8_3 = 3.0 / 8.0
sqrt_35_pi = sqrt(35.0 / pi)
sqrt_35_2pi = sqrt(35.0 / (2.0 * pi))
sqrt_5_pi = sqrt(5.0 / pi)
sqrt_5_2pi = sqrt(5.0 / (2.0 * pi))
sqrt_1_pi = sqrt(1.0 / pi)
pre_y_4_4 = [i16_3 * sqrt_35_2pi * val for val in self._pow_sin_t[4]]
pre_y_4_3 = [i8_3 * sqrt_35_pi * val[0] * val[1] for val in zip(self._pow_sin_t[3], self._pow_cos_t[1])]
pre_y_4_2 = [
i8_3 * sqrt_5_2pi * val[0] * (7.0 * val[1] - 1.0) for val in zip(self._pow_sin_t[2], self._pow_cos_t[2])
]
pre_y_4_1 = [
i8_3 * sqrt_5_pi * val[0] * (7.0 * val[1] - 3.0 * val[2])
for val in zip(self._pow_sin_t[1], self._pow_cos_t[3], self._pow_cos_t[1])
]
acc = 0.0
# Y_4_-4
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_4[i] * self._cos_n_p[4][i]
imag -= pre_y_4_4[i] * self._sin_n_p[4][i]
acc += real * real + imag * imag
# Y_4_-3
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_3[i] * self._cos_n_p[3][i]
imag -= pre_y_4_3[i] * self._sin_n_p[3][i]
acc += real * real + imag * imag
# Y_4_-2
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_2[i] * self._cos_n_p[2][i]
imag -= pre_y_4_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
# Y_4_-1
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_1[i] * self._cos_n_p[1][i]
imag -= pre_y_4_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_4_0
real = imag = 0.0
for i in nnn_range:
real += i16_3 * sqrt_1_pi * (35.0 * self._pow_cos_t[4][i] - 30.0 * self._pow_cos_t[2][i] + 3.0)
acc += real * real
# Y_4_1
real = imag = 0.0
for i in nnn_range:
real -= pre_y_4_1[i] * self._cos_n_p[1][i]
imag -= pre_y_4_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_4_2
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_2[i] * self._cos_n_p[2][i]
imag += pre_y_4_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
# Y_4_3
real = imag = 0.0
for i in nnn_range:
real -= pre_y_4_3[i] * self._cos_n_p[3][i]
imag -= pre_y_4_3[i] * self._sin_n_p[3][i]
acc += real * real + imag * imag
# Y_4_4
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_4[i] * self._cos_n_p[4][i]
imag += pre_y_4_4[i] * self._sin_n_p[4][i]
acc += real * real + imag * imag
q4 = sqrt(4.0 * pi * acc / (9.0 * float(nnn * nnn)))
return q4
def get_q6(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=6. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
Returns:
float: bond orientational order parameter of weight l=6
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
i64 = 1.0 / 64.0
i32 = 1.0 / 32.0
i32_3 = 3.0 / 32.0
i16 = 1.0 / 16.0
sqrt_3003_pi = sqrt(3003.0 / pi)
sqrt_1001_pi = sqrt(1001.0 / pi)
sqrt_91_2pi = sqrt(91.0 / (2.0 * pi))
sqrt_1365_pi = sqrt(1365.0 / pi)
sqrt_273_2pi = sqrt(273.0 / (2.0 * pi))
sqrt_13_pi = sqrt(13.0 / pi)
pre_y_6_6 = [i64 * sqrt_3003_pi * val for val in self._pow_sin_t[6]]
pre_y_6_5 = [i32_3 * sqrt_1001_pi * val[0] * val[1] for val in zip(self._pow_sin_t[5], self._pow_cos_t[1])]
pre_y_6_4 = [
i32_3 * sqrt_91_2pi * val[0] * (11.0 * val[1] - 1.0) for val in zip(self._pow_sin_t[4], self._pow_cos_t[2])
]
pre_y_6_3 = [
i32 * sqrt_1365_pi * val[0] * (11.0 * val[1] - 3.0 * val[2])
for val in zip(self._pow_sin_t[3], self._pow_cos_t[3], self._pow_cos_t[1])
]
pre_y_6_2 = [
i64 * sqrt_1365_pi * val[0] * (33.0 * val[1] - 18.0 * val[2] + 1.0)
for val in zip(self._pow_sin_t[2], self._pow_cos_t[4], self._pow_cos_t[2])
]
pre_y_6_1 = [
i16 * sqrt_273_2pi * val[0] * (33.0 * val[1] - 30.0 * val[2] + 5.0 * val[3])
for val in zip(
self._pow_sin_t[1],
self._pow_cos_t[5],
self._pow_cos_t[3],
self._pow_cos_t[1],
)
]
acc = 0.0
# Y_6_-6
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_6[i] * self._cos_n_p[6][i] # cos(x) = cos(-x)
imag -= pre_y_6_6[i] * self._sin_n_p[6][i] # sin(x) = -sin(-x)
acc += real * real + imag * imag
# Y_6_-5
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_5[i] * self._cos_n_p[5][i]
imag -= pre_y_6_5[i] * self._sin_n_p[5][i]
acc += real * real + imag * imag
# Y_6_-4
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_4[i] * self._cos_n_p[4][i]
imag -= pre_y_6_4[i] * self._sin_n_p[4][i]
acc += real * real + imag * imag
# Y_6_-3
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_3[i] * self._cos_n_p[3][i]
imag -= pre_y_6_3[i] * self._sin_n_p[3][i]
acc += real * real + imag * imag
# Y_6_-2
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_2[i] * self._cos_n_p[2][i]
imag -= pre_y_6_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
# Y_6_-1
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_1[i] * self._cos_n_p[1][i]
imag -= pre_y_6_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_6_0
real = 0.0
imag = 0.0
for i in nnn_range:
real += (
i32
* sqrt_13_pi
* (231.0 * self._pow_cos_t[6][i] - 315.0 * self._pow_cos_t[4][i] + 105.0 * self._pow_cos_t[2][i] - 5.0)
)
acc += real * real
# Y_6_1
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_1[i] * self._cos_n_p[1][i]
imag -= pre_y_6_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_6_2
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_2[i] * self._cos_n_p[2][i]
imag += pre_y_6_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
# Y_6_3
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_3[i] * self._cos_n_p[3][i]
imag -= pre_y_6_3[i] * self._sin_n_p[3][i]
acc += real * real + imag * imag
# Y_6_4
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_4[i] * self._cos_n_p[4][i]
imag += pre_y_6_4[i] * self._sin_n_p[4][i]
acc += real * real + imag * imag
# Y_6_5
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_5[i] * self._cos_n_p[5][i]
imag -= pre_y_6_5[i] * self._sin_n_p[5][i]
acc += real * real + imag * imag
# Y_6_6
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_6[i] * self._cos_n_p[6][i]
imag += pre_y_6_6[i] * self._sin_n_p[6][i]
acc += real * real + imag * imag
q6 = sqrt(4.0 * pi * acc / (13.0 * float(nnn * nnn)))
return q6
def get_type(self, index):
"""
Return type of order parameter at the index provided and
represented by a short string.
Args:
index (int): index of order parameter for which type is
to be returned.
Returns:
str: OP type.
"""
if index < 0 or index >= len(self._types):
raise ValueError("Index for getting order parameter type" " out-of-bounds!")
return self._types[index]
def get_parameters(self, index):
"""
Returns list of floats that represents
the parameters associated
with calculation of the order
parameter that was defined at the index provided.
Attention: the parameters do not need to equal those originally
inputted because of processing out of efficiency reasons.
Args:
index (int):
index of order parameter for which associated parameters
are to be returned.
Returns:
[float]: parameters of a given OP.
"""
if index < 0 or index >= len(self._types):
raise ValueError(
"Index for getting parameters associated with" " order parameter calculation out-of-bounds!"
)
return self._params[index]
def get_order_parameters(self, structure, n, indices_neighs=None, tol=0.0, target_spec=None):
"""
Compute all order parameters of site n.
Args:
structure (Structure): input structure.
n (int): index of site in input structure,
for which OPs are to be
calculated. Note that we do not use the sites iterator
here, but directly access sites via struct[index].
indices_neighs ([int]): list of indices of those neighbors
in Structure object
structure that are to be considered for OP computation.
This optional argument overwrites the way neighbors are
to be determined as defined in the constructor (i.e.,
Voronoi coordination finder via negative cutoff radius
vs constant cutoff radius if cutoff was positive).
We do not use information about the underlying
structure lattice if the neighbor indices are explicitly
provided. This has two important consequences. First,
the input Structure object can, in fact, be a
simple list of Site objects. Second, no nearest images
of neighbors are determined when providing an index list.
Note furthermore that this neighbor
determination type ignores the optional target_spec
argument.
tol (float): threshold of weight
(= solid angle / maximal solid angle)
to determine if a particular pair is
considered neighbors; this is relevant only in the case
when Voronoi polyhedra are used to determine coordination
target_spec (Species): target species to be considered
when calculating the order
parameters of site n; None includes all species of input
structure.
Returns:
[floats]: representing order parameters. Should it not be
possible to compute a given OP for a conceptual reason, the
corresponding entry is None instead of a float. For Steinhardt
et al.'s bond orientational OPs and the other geometric OPs
("tet", "oct", "bcc", etc.),
this can happen if there is a single
neighbor around site n in the structure because that
does not permit calculation of angles between multiple
neighbors.
"""
# Do error-checking and initialization.
if n < 0:
raise ValueError("Site index smaller zero!")
if n >= len(structure):
raise ValueError("Site index beyond maximum!")
if indices_neighs is not None:
for index in indices_neighs:
if index >= len(structure):
raise ValueError("Neighbor site index beyond maximum!")
if tol < 0.0:
raise ValueError("Negative tolerance for weighted solid angle!")
left_of_unity = 1.0 - 1.0e-12
# The following threshold has to be adapted to non-Angstrom units.
very_small = 1.0e-12
fac_bcc = 1.0 / exp(-0.5)
# Find central site and its neighbors.
# Note that we adopt the same way of accessing sites here as in
# VoronoiNN; that is, not via the sites iterator.
centsite = structure[n]
if indices_neighs is not None:
neighsites = [structure[index] for index in indices_neighs]
elif self._voroneigh:
vnn = VoronoiNN(tol=tol, targets=target_spec)
neighsites = vnn.get_nn(structure, n)
else:
# Structure.get_sites_in_sphere --> also other periodic images
neighsitestmp = [i[0] for i in structure.get_sites_in_sphere(centsite.coords, self._cutoff)]
neighsites = []
if centsite not in neighsitestmp:
raise ValueError("Could not find center site!")
neighsitestmp.remove(centsite)
if target_spec is None:
neighsites = list(neighsitestmp)
else:
neighsites[:] = [site for site in neighsitestmp if site.specie.symbol == target_spec]
nneigh = len(neighsites)
self._last_nneigh = nneigh
# Prepare angle calculations, if applicable.
rij = []
rjk = []
rijnorm = []
rjknorm = []
dist = []
distjk_unique = []
distjk = []
centvec = centsite.coords
if self._computerijs:
for j, neigh in enumerate(neighsites):
rij.append((neigh.coords - centvec))
dist.append(np.linalg.norm(rij[j]))
rijnorm.append((rij[j] / dist[j]))
if self._computerjks:
for j, neigh in enumerate(neighsites):
rjk.append([])
rjknorm.append([])
distjk.append([])
kk = 0
for k, neigh_2 in enumerate(neighsites):
if j != k:
rjk[j].append(neigh_2.coords - neigh.coords)
distjk[j].append(np.linalg.norm(rjk[j][kk]))
if k > j:
distjk_unique.append(distjk[j][kk])
rjknorm[j].append(rjk[j][kk] / distjk[j][kk])
kk = kk + 1
# Initialize OP list and, then, calculate OPs.
ops = [0.0 for t in self._types]
# norms = [[[] for j in range(nneigh)] for t in self._types]
# First, coordination number and distance-based OPs.
for i, t in enumerate(self._types):
if t == "cn":
ops[i] = nneigh / self._params[i]["norm"]
elif t == "sgl_bd":
dist_sorted = sorted(dist)
if len(dist_sorted) == 1:
ops[i] = 1.0
elif len(dist_sorted) > 1:
ops[i] = 1.0 - dist_sorted[0] / dist_sorted[1]
# Then, bond orientational OPs based on spherical harmonics
# according to Steinhardt et al., Phys. Rev. B, 28, 784-805, 1983.
if self._boops:
thetas = []
phis = []
for j, vec in enumerate(rijnorm):
# z is North pole --> theta between vec and (0, 0, 1)^T.
# Because vec is normalized, dot product is simply vec[2].
thetas.append(acos(max(-1.0, min(vec[2], 1.0))))
tmpphi = 0.0
# Compute phi only if it is not (almost) perfectly
# aligned with z-axis.
if -left_of_unity < vec[2] < left_of_unity:
# x is prime meridian --> phi between projection of vec
# into x-y plane and (1, 0, 0)^T
tmpphi = acos(
max(
-1.0,
min(vec[0] / (sqrt(vec[0] * vec[0] + vec[1] * vec[1])), 1.0),
)
)
if vec[1] < 0.0:
tmpphi = -tmpphi
phis.append(tmpphi)
# Note that None flags that we have too few neighbors
# for calculating BOOPS.
for i, t in enumerate(self._types):
if t == "q2":
ops[i] = self.get_q2(thetas, phis) if len(thetas) > 0 else None
elif t == "q4":
ops[i] = self.get_q4(thetas, phis) if len(thetas) > 0 else None
elif t == "q6":
ops[i] = self.get_q6(thetas, phis) if len(thetas) > 0 else None
# Then, deal with the Peters-style OPs that are tailor-made
# to recognize common structural motifs
# (Peters, J. Chem. Phys., 131, 244103, 2009;
# Zimmermann et al., J. Am. Chem. Soc., under revision, 2015).
if self._geomops:
gaussthetak = [0.0 for t in self._types] # not used by all OPs
qsptheta = [[[] for j in range(nneigh)] for t in self._types]
norms = [[[] for j in range(nneigh)] for t in self._types]
ipi = 1.0 / pi
piover2 = pi / 2.0
onethird = 1.0 / 3.0
twothird = 2.0 / 3.0
for j in range(nneigh): # Neighbor j is put to the North pole.
zaxis = rijnorm[j]
kc = 0
for k in range(nneigh): # From neighbor k, we construct
if j != k: # the prime meridian.
for i in range(len(self._types)):
qsptheta[i][j].append(0.0)
norms[i][j].append(0)
tmp = max(-1.0, min(np.inner(zaxis, rijnorm[k]), 1.0))
thetak = acos(tmp)
xaxis = gramschmidt(rijnorm[k], zaxis)
if np.linalg.norm(xaxis) < very_small:
flag_xaxis = True
else:
xaxis = xaxis / np.linalg.norm(xaxis)
flag_xaxis = False
if self._comp_azi:
flag_yaxis = True
yaxis = np.cross(zaxis, xaxis)
if np.linalg.norm(yaxis) > very_small:
yaxis = yaxis / np.linalg.norm(yaxis)
flag_yaxis = False
# Contributions of j-i-k angles, where i represents the
# central atom and j and k two of the neighbors.
for i, t in enumerate(self._types):
if t in ["bent", "sq_pyr_legacy"]:
tmp = self._params[i]["IGW_TA"] * (thetak * ipi - self._params[i]["TA"])
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
elif t in ["tri_plan", "tri_plan_max", "tet", "tet_max"]:
tmp = self._params[i]["IGW_TA"] * (thetak * ipi - self._params[i]["TA"])
gaussthetak[i] = exp(-0.5 * tmp * tmp)
if t in ["tri_plan_max", "tet_max"]:
qsptheta[i][j][kc] += gaussthetak[i]
norms[i][j][kc] += 1
elif t in ["T", "tri_pyr", "sq_pyr", "pent_pyr", "hex_pyr"]:
tmp = self._params[i]["IGW_EP"] * (thetak * ipi - 0.5)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
elif t in [
"sq_plan",
"oct",
"oct_legacy",
"cuboct",
"cuboct_max",
]:
if thetak >= self._params[i]["min_SPP"]:
tmp = self._params[i]["IGW_SPP"] * (thetak * ipi - 1.0)
qsptheta[i][j][kc] += self._params[i]["w_SPP"] * exp(-0.5 * tmp * tmp)
norms[i][j][kc] += self._params[i]["w_SPP"]
elif t in [
"see_saw_rect",
"tri_bipyr",
"sq_bipyr",
"pent_bipyr",
"hex_bipyr",
"oct_max",
"sq_plan_max",
"hex_plan_max",
]:
if thetak < self._params[i]["min_SPP"]:
tmp = (
self._params[i]["IGW_EP"] * (thetak * ipi - 0.5)
if t != "hex_plan_max"
else self._params[i]["IGW_TA"]
* (fabs(thetak * ipi - 0.5) - self._params[i]["TA"])
)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
elif t in ["pent_plan", "pent_plan_max"]:
tmp = 0.4 if thetak <= self._params[i]["TA"] * pi else 0.8
tmp2 = self._params[i]["IGW_TA"] * (thetak * ipi - tmp)
gaussthetak[i] = exp(-0.5 * tmp2 * tmp2)
if t == "pent_plan_max":
qsptheta[i][j][kc] += gaussthetak[i]
norms[i][j][kc] += 1
elif t == "bcc" and j < k:
if thetak >= self._params[i]["min_SPP"]:
tmp = self._params[i]["IGW_SPP"] * (thetak * ipi - 1.0)
qsptheta[i][j][kc] += self._params[i]["w_SPP"] * exp(-0.5 * tmp * tmp)
norms[i][j][kc] += self._params[i]["w_SPP"]
elif t == "sq_face_cap_trig_pris":
if thetak < self._params[i]["TA3"]:
tmp = self._params[i]["IGW_TA1"] * (thetak * ipi - self._params[i]["TA1"])
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
for m in range(nneigh):
if (m != j) and (m != k) and (not flag_xaxis):
tmp = max(-1.0, min(np.inner(zaxis, rijnorm[m]), 1.0))
thetam = acos(tmp)
xtwoaxistmp = gramschmidt(rijnorm[m], zaxis)
l = np.linalg.norm(xtwoaxistmp)
if l < very_small:
flag_xtwoaxis = True
else:
xtwoaxis = xtwoaxistmp / l
phi = acos(max(-1.0, min(np.inner(xtwoaxis, xaxis), 1.0)))
flag_xtwoaxis = False
if self._comp_azi:
phi2 = atan2(
np.dot(xtwoaxis, yaxis),
np.dot(xtwoaxis, xaxis),
)
# South pole contributions of m.
if t in [
"tri_bipyr",
"sq_bipyr",
"pent_bipyr",
"hex_bipyr",
"oct_max",
"sq_plan_max",
"hex_plan_max",
"see_saw_rect",
]:
if thetam >= self._params[i]["min_SPP"]:
tmp = self._params[i]["IGW_SPP"] * (thetam * ipi - 1.0)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
# Contributions of j-i-m angle and
# angles between plane j-i-k and i-m vector.
if not flag_xaxis and not flag_xtwoaxis:
for i, t in enumerate(self._types):
if t in [
"tri_plan",
"tri_plan_max",
"tet",
"tet_max",
]:
tmp = self._params[i]["IGW_TA"] * (thetam * ipi - self._params[i]["TA"])
tmp2 = cos(self._params[i]["fac_AA"] * phi) ** self._params[i]["exp_cos_AA"]
tmp3 = 1 if t in ["tri_plan_max", "tet_max"] else gaussthetak[i]
qsptheta[i][j][kc] += tmp3 * exp(-0.5 * tmp * tmp) * tmp2
norms[i][j][kc] += 1
elif t in ["pent_plan", "pent_plan_max"]:
tmp = 0.4 if thetam <= self._params[i]["TA"] * pi else 0.8
tmp2 = self._params[i]["IGW_TA"] * (thetam * ipi - tmp)
tmp3 = cos(phi)
tmp4 = 1 if t == "pent_plan_max" else gaussthetak[i]
qsptheta[i][j][kc] += tmp4 * exp(-0.5 * tmp2 * tmp2) * tmp3 * tmp3
norms[i][j][kc] += 1
elif t in [
"T",
"tri_pyr",
"sq_pyr",
"pent_pyr",
"hex_pyr",
]:
tmp = cos(self._params[i]["fac_AA"] * phi) ** self._params[i]["exp_cos_AA"]
tmp3 = self._params[i]["IGW_EP"] * (thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp3 * tmp3)
norms[i][j][kc] += 1
elif t in ["sq_plan", "oct", "oct_legacy"]:
if (
thetak < self._params[i]["min_SPP"]
and thetam < self._params[i]["min_SPP"]
):
tmp = (
cos(self._params[i]["fac_AA"] * phi)
** self._params[i]["exp_cos_AA"]
)
tmp2 = self._params[i]["IGW_EP"] * (thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp2 * tmp2)
if t == "oct_legacy":
qsptheta[i][j][kc] -= tmp * self._params[i][6] * self._params[i][7]
norms[i][j][kc] += 1
elif t in [
"tri_bipyr",
"sq_bipyr",
"pent_bipyr",
"hex_bipyr",
"oct_max",
"sq_plan_max",
"hex_plan_max",
]:
if thetam < self._params[i]["min_SPP"]:
if thetak < self._params[i]["min_SPP"]:
tmp = (
cos(self._params[i]["fac_AA"] * phi)
** self._params[i]["exp_cos_AA"]
)
tmp2 = (
self._params[i]["IGW_EP"] * (thetam * ipi - 0.5)
if t != "hex_plan_max"
else self._params[i]["IGW_TA"]
* (fabs(thetam * ipi - 0.5) - self._params[i]["TA"])
)
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1
elif t == "bcc" and j < k:
if thetak < self._params[i]["min_SPP"]:
if thetak > piover2:
fac = 1.0
else:
fac = -1.0
tmp = (thetam - piover2) / asin(1 / 3)
qsptheta[i][j][kc] += (
fac * cos(3.0 * phi) * fac_bcc * tmp * exp(-0.5 * tmp * tmp)
)
norms[i][j][kc] += 1
elif t == "see_saw_rect":
if thetam < self._params[i]["min_SPP"]:
if thetak < self._params[i]["min_SPP"] and phi < 0.75 * pi:
tmp = (
cos(self._params[i]["fac_AA"] * phi)
** self._params[i]["exp_cos_AA"]
)
tmp2 = self._params[i]["IGW_EP"] * (thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1.0
elif t in ["cuboct", "cuboct_max"]:
if (
thetam < self._params[i]["min_SPP"]
and self._params[i][4] < thetak < self._params[i][2]
):
if self._params[i][4] < thetam < self._params[i][2]:
tmp = cos(phi)
tmp2 = self._params[i][5] * (thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * tmp * exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1.0
elif thetam < self._params[i][4]:
tmp = 0.0556 * (cos(phi - 0.5 * pi) - 0.81649658)
tmp2 = self._params[i][6] * (thetam * ipi - onethird)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp) * exp(
-0.5 * tmp2 * tmp2
)
norms[i][j][kc] += 1.0
elif thetam > self._params[i][2]:
tmp = 0.0556 * (cos(phi - 0.5 * pi) - 0.81649658)
tmp2 = self._params[i][6] * (thetam * ipi - twothird)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp) * exp(
-0.5 * tmp2 * tmp2
)
norms[i][j][kc] += 1.0
elif t == "sq_face_cap_trig_pris" and not flag_yaxis:
if thetak < self._params[i]["TA3"]:
if thetam < self._params[i]["TA3"]:
tmp = (
cos(self._params[i]["fac_AA1"] * phi2)
** self._params[i]["exp_cos_AA1"]
)
tmp2 = self._params[i]["IGW_TA1"] * (
thetam * ipi - self._params[i]["TA1"]
)
else:
tmp = (
cos(
self._params[i]["fac_AA2"]
* (phi2 + self._params[i]["shift_AA2"])
)
** self._params[i]["exp_cos_AA2"]
)
tmp2 = self._params[i]["IGW_TA2"] * (
thetam * ipi - self._params[i]["TA2"]
)
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1
kc += 1
# Normalize Peters-style OPs.
for i, t in enumerate(self._types):
if t in [
"tri_plan",
"tet",
"bent",
"sq_plan",
"oct",
"oct_legacy",
"cuboct",
"pent_plan",
]:
ops[i] = tmp_norm = 0.0
for j in range(nneigh):
ops[i] += sum(qsptheta[i][j])
tmp_norm += float(sum(norms[i][j]))
ops[i] = ops[i] / tmp_norm if tmp_norm > 1.0e-12 else None
elif t in [
"T",
"tri_pyr",
"see_saw_rect",
"sq_pyr",
"tri_bipyr",
"sq_bipyr",
"pent_pyr",
"hex_pyr",
"pent_bipyr",
"hex_bipyr",
"oct_max",
"tri_plan_max",
"tet_max",
"sq_plan_max",
"pent_plan_max",
"cuboct_max",
"hex_plan_max",
"sq_face_cap_trig_pris",
]:
ops[i] = None
if nneigh > 1:
for j in range(nneigh):
for k in range(len(qsptheta[i][j])):
qsptheta[i][j][k] = (
qsptheta[i][j][k] / norms[i][j][k] if norms[i][j][k] > 1.0e-12 else 0.0
)
ops[i] = max(qsptheta[i][j]) if j == 0 else max(ops[i], max(qsptheta[i][j]))
elif t == "bcc":
ops[i] = 0.0
for j in range(nneigh):
ops[i] += sum(qsptheta[i][j])
ops[i] = (
ops[i] / float(0.5 * float(nneigh * (6 + (nneigh - 2) * (nneigh - 3)))) if nneigh > 3 else None
)
elif t == "sq_pyr_legacy":
if nneigh > 1:
dmean = np.mean(dist)
acc = 0.0
for d in dist:
tmp = self._params[i][2] * (d - dmean)
acc = acc + exp(-0.5 * tmp * tmp)
for j in range(nneigh):
ops[i] = max(qsptheta[i][j]) if j == 0 else max(ops[i], max(qsptheta[i][j]))
ops[i] = acc * ops[i] / float(nneigh)
# nneigh * (nneigh - 1))
else:
ops[i] = None
# Then, deal with the new-style OPs that require vectors between
# neighbors.
if self._geomops2:
# Compute all (unique) angles and sort the resulting list.
aij = []
for ir, r in enumerate(rijnorm):
for j in range(ir + 1, len(rijnorm)):
aij.append(acos(max(-1.0, min(np.inner(r, rijnorm[j]), 1.0))))
aijs = sorted(aij)
# Compute height, side and diagonal length estimates.
neighscent = np.array([0.0, 0.0, 0.0])
for j, neigh in enumerate(neighsites):
neighscent = neighscent + neigh.coords
if nneigh > 0:
neighscent = neighscent / float(nneigh)
h = np.linalg.norm(neighscent - centvec)
b = min(distjk_unique) if len(distjk_unique) > 0 else 0
dhalf = max(distjk_unique) / 2.0 if len(distjk_unique) > 0 else 0
for i, t in enumerate(self._types):
if t in ("reg_tri", "sq"):
if nneigh < 3:
ops[i] = None
else:
ops[i] = 1.0
if t == "reg_tri":
a = 2.0 * asin(b / (2.0 * sqrt(h * h + (b / (2.0 * cos(3.0 * pi / 18.0))) ** 2.0)))
nmax = 3
elif t == "sq":
a = 2.0 * asin(b / (2.0 * sqrt(h * h + dhalf * dhalf)))
nmax = 4
for j in range(min([nneigh, nmax])):
ops[i] = ops[i] * exp(-0.5 * ((aijs[j] - a) * self._params[i][0]) ** 2)
return ops
class BrunnerNN_reciprocal(NearNeighbors):
"""
Determine coordination number using Brunner's algorithm which counts the
atoms that are within the largest gap in differences in real space
interatomic distances. This algorithm uses Brunner's method of
largest reciprocal gap in interatomic distances.
"""
def __init__(self, tol=1.0e-4, cutoff=8.0):
"""
Args:
tol (float): tolerance parameter for bond determination
(default: 1E-4).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 8.0.
"""
self.tol = tol
self.cutoff = cutoff
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
ds = sorted([i.nn_distance for i in neighs_dists])
ns = [1.0 / ds[i] - 1.0 / ds[i + 1] for i in range(len(ds) - 1)]
d_max = ds[ns.index(max(ns))]
siw = []
for nn in neighs_dists:
s, dist = nn, nn.nn_distance
if dist < d_max + self.tol:
w = ds[0] / dist
siw.append(
{
"site": s,
"image": self._get_image(structure, s),
"weight": w,
"site_index": self._get_original_site(structure, s),
}
)
return siw
class BrunnerNN_relative(NearNeighbors):
"""
Determine coordination number using Brunner's algorithm which counts the
atoms that are within the largest gap in differences in real space
interatomic distances. This algorithm uses Brunner's method of
of largest relative gap in interatomic distances.
"""
def __init__(self, tol=1.0e-4, cutoff=8.0):
"""
Args:
tol (float): tolerance parameter for bond determination
(default: 1E-4).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 8.0.
"""
self.tol = tol
self.cutoff = cutoff
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
ds = sorted([i.nn_distance for i in neighs_dists])
ns = [ds[i + 1] / ds[i] for i in range(len(ds) - 1)]
d_max = ds[ns.index(max(ns))]
siw = []
for nn in neighs_dists:
s, dist = nn, nn.nn_distance
if dist < d_max + self.tol:
w = ds[0] / dist
siw.append(
{
"site": s,
"image": self._get_image(structure, s),
"weight": w,
"site_index": self._get_original_site(structure, s),
}
)
return siw
class BrunnerNN_real(NearNeighbors):
"""
Determine coordination number using Brunner's algorithm which counts the
atoms that are within the largest gap in differences in real space
interatomic distances. This algorithm uses Brunner's method of
largest gap in interatomic distances.
"""
def __init__(self, tol=1.0e-4, cutoff=8.0):
"""
Args:
tol (float): tolerance parameter for bond determination
(default: 1E-4).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 8.0.
"""
self.tol = tol
self.cutoff = cutoff
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
ds = sorted([i.nn_distance for i in neighs_dists])
ns = [ds[i + 1] - ds[i] for i in range(len(ds) - 1)]
d_max = ds[ns.index(max(ns))]
siw = []
for nn in neighs_dists:
s, dist = nn, nn.nn_distance
if dist < d_max + self.tol:
w = ds[0] / dist
siw.append(
{
"site": s,
"image": self._get_image(structure, s),
"weight": w,
"site_index": self._get_original_site(structure, s),
}
)
return siw
class EconNN(NearNeighbors):
"""
Determines the average effective coordination number for each cation in a
given structure using Hoppe's algorithm.
This method follows the procedure outlined in:
Hoppe, Rudolf. "Effective coordination numbers (ECoN) and mean fictive ionic
radii (MEFIR)." Zeitschrift für Kristallographie-Crystalline Materials
150.1-4 (1979): 23-52.
"""
def __init__(
self,
tol: float = 0.2,
cutoff: float = 10.0,
cation_anion: bool = False,
use_fictive_radius: bool = False,
):
"""
Args:
tol: Tolerance parameter for bond determination.
cutoff: Cutoff radius in Angstrom to look for near-neighbor atoms.
cation_anion: If set to True, will restrict bonding targets to
sites with opposite or zero charge. Requires an oxidation states
on all sites in the structure.
use_fictive_radius: Whether to use the fictive radius in the
EcoN calculation. If False, the bond distance will be used.
"""
self.tol = tol
self.cutoff = cutoff
self.cation_anion = cation_anion
self.use_fictive_radius = use_fictive_radius
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
site = structure[n]
neighbors = structure.get_neighbors(site, self.cutoff)
if self.cation_anion and hasattr(site.specie, "oxi_state"):
# filter out neighbor of like charge (except for neutral sites)
if site.specie.oxi_state >= 0:
neighbors = [n for n in neighbors if n.oxi_state <= 0]
elif site.specie.oxi_state <= 0:
neighbors = [n for n in neighbors if n.oxi_state >= 0]
if self.use_fictive_radius:
# calculate fictive ionic radii
firs = [_get_fictive_ionic_radius(site, neighbor) for neighbor in neighbors]
else:
# just use the bond distance
firs = [neighbor.nn_distance for neighbor in neighbors]
# calculate mean fictive ionic radius
mefir = _get_mean_fictive_ionic_radius(firs)
# # iteratively solve MEFIR; follows equation 4 in Hoppe's EconN paper
prev_mefir = float("inf")
while abs(prev_mefir - mefir) > 1e-4:
# this is guaranteed to converge
prev_mefir = mefir
mefir = _get_mean_fictive_ionic_radius(firs, minimum_fir=mefir)
siw = []
for nn, fir in zip(neighbors, firs):
if nn.nn_distance < self.cutoff:
w = exp(1 - (fir / mefir) ** 6)
if w > self.tol:
bonded_site = {
"site": nn,
"image": self._get_image(structure, nn),
"weight": w,
"site_index": self._get_original_site(structure, nn),
}
siw.append(bonded_site)
return siw
def _get_fictive_ionic_radius(site: Site, neighbor: PeriodicNeighbor) -> float:
"""
Get fictive ionic radius.
Follows equation 1 of:
Hoppe, Rudolf. "Effective coordination numbers (ECoN) and mean fictive ionic
radii (MEFIR)." Zeitschrift für Kristallographie-Crystalline Materials
150.1-4 (1979): 23-52.
Args:
site: The central site.
neighbor neighboring site.
Returns:
Hoppe's fictive ionic radius.
"""
r_h = _get_radius(site)
if r_h == 0:
r_h = _get_default_radius(site)
r_i = _get_radius(neighbor)
if r_i == 0:
r_i = _get_default_radius(neighbor)
return neighbor.nn_distance * (r_h / (r_h + r_i))
def _get_mean_fictive_ionic_radius(
fictive_ionic_radii: List[float],
minimum_fir: Optional[float] = None,
) -> float:
"""
Returns the mean fictive ionic radius.
Follows equation 2:
Hoppe, Rudolf. "Effective coordination numbers (ECoN) and mean fictive ionic
radii (MEFIR)." Zeitschrift für Kristallographie-Crystalline Materials
150.1-4 (1979): 23-52.
Args:
fictive_ionic_radii: List of fictive ionic radii for a center site
and its neighbors.
minimum_fir: Minimum fictive ionic radius to use.
Returns:
Hoppe's mean fictive ionic radius.
"""
if not minimum_fir:
minimum_fir = min(fictive_ionic_radii)
weighted_sum = 0.0
total_sum = 0.0
for fir in fictive_ionic_radii:
weighted_sum += fir * exp(1 - (fir / minimum_fir) ** 6)
total_sum += exp(1 - (fir / minimum_fir) ** 6)
return weighted_sum / total_sum
class CrystalNN(NearNeighbors):
"""
This is custom near neighbor method intended for use in all kinds of
periodic structures (metals, minerals, porous structures, etc). It is based
on a Voronoi algorithm and uses the solid angle weights to determine the
probability of various coordination environments. The algorithm can also
modify probability using smooth distance cutoffs as well as Pauling
electronegativity differences. The output can either be the most probable
coordination environment or a weighted list of coordination environments.
"""
NNData = namedtuple("NNData", ["all_nninfo", "cn_weights", "cn_nninfo"])
def __init__(
self,
weighted_cn=False,
cation_anion=False,
distance_cutoffs=(0.5, 1),
x_diff_weight=3.0,
porous_adjustment=True,
search_cutoff=7,
fingerprint_length=None,
):
"""
Initialize CrystalNN with desired parameters. Default parameters assume
"chemical bond" type behavior is desired. For geometric neighbor
finding (e.g., structural framework), set (i) distance_cutoffs=None,
(ii) x_diff_weight=0.0 and (optionally) (iii) porous_adjustment=False
which will disregard the atomic identities and perform best for a purely
geometric match.
Args:
weighted_cn: (bool) if set to True, will return fractional weights
for each potential near neighbor.
cation_anion: (bool) if set True, will restrict bonding targets to
sites with opposite or zero charge. Requires an oxidation states
on all sites in the structure.
distance_cutoffs: ([float, float]) - if not None, penalizes neighbor
distances greater than sum of covalent radii plus
distance_cutoffs[0]. Distances greater than covalent radii sum
plus distance_cutoffs[1] are enforced to have zero weight.
x_diff_weight: (float) - if multiple types of neighbor elements are
possible, this sets preferences for targets with higher
electronegativity difference.
porous_adjustment: (bool) - if True, readjusts Voronoi weights to
better describe layered / porous structures
search_cutoff: (float) cutoff in Angstroms for initial neighbor
search; this will be adjusted if needed internally
fingerprint_length: (int) if a fixed_length CN "fingerprint" is
desired from get_nn_data(), set this parameter
"""
self.weighted_cn = weighted_cn
self.cation_anion = cation_anion
self.distance_cutoffs = distance_cutoffs
self.x_diff_weight = x_diff_weight if x_diff_weight is not None else 0
self.search_cutoff = search_cutoff
self.porous_adjustment = porous_adjustment
self.fingerprint_length = fingerprint_length
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor information.
Args:
structure: (Structure) pymatgen Structure
n: (int) index of target site
Returns:
siw (list of dicts): each dictionary provides information
about a single near neighbor, where key 'site' gives
access to the corresponding Site object, 'image' gives
the image location, and 'weight' provides the weight
that a given near-neighbor site contributes
to the coordination number (1 or smaller), 'site_index'
gives index of the corresponding site in
the original structure.
"""
nndata = self.get_nn_data(structure, n)
if not self.weighted_cn:
max_key = max(nndata.cn_weights, key=lambda k: nndata.cn_weights[k])
nn = nndata.cn_nninfo[max_key]
for entry in nn:
entry["weight"] = 1
return nn
for entry in nndata.all_nninfo:
weight = 0
for cn in nndata.cn_nninfo:
for cn_entry in nndata.cn_nninfo[cn]:
if entry["site"] == cn_entry["site"]:
weight += nndata.cn_weights[cn]
entry["weight"] = weight
return nndata.all_nninfo
def get_nn_data(self, structure, n, length=None):
"""
The main logic of the method to compute near neighbor.
Args:
structure: (Structure) enclosing structure object
n: (int) index of target site to get NN info for
length: (int) if set, will return a fixed range of CN numbers
Returns:
a namedtuple (NNData) object that contains:
- all near neighbor sites with weights
- a dict of CN -> weight
- a dict of CN -> associated near neighbor sites
"""
length = length or self.fingerprint_length
# determine possible bond targets
target = None
if self.cation_anion:
target = []
m_oxi = structure[n].specie.oxi_state
for site in structure:
if site.specie.oxi_state * m_oxi <= 0: # opposite charge
target.append(site.specie)
if not target:
raise ValueError("No valid targets for site within cation_anion constraint!")
# get base VoronoiNN targets
cutoff = self.search_cutoff
vnn = VoronoiNN(weight="solid_angle", targets=target, cutoff=cutoff)
nn = vnn.get_nn_info(structure, n)
# solid angle weights can be misleading in open / porous structures
# adjust weights to correct for this behavior
if self.porous_adjustment:
for x in nn:
x["weight"] *= x["poly_info"]["solid_angle"] / x["poly_info"]["area"]
# adjust solid angle weight based on electronegativity difference
if self.x_diff_weight > 0:
for entry in nn:
X1 = structure[n].specie.X
X2 = entry["site"].specie.X
if math.isnan(X1) or math.isnan(X2):
chemical_weight = 1
else:
# note: 3.3 is max deltaX between 2 elements
chemical_weight = 1 + self.x_diff_weight * math.sqrt(abs(X1 - X2) / 3.3)
entry["weight"] = entry["weight"] * chemical_weight
# sort nearest neighbors from highest to lowest weight
nn = sorted(nn, key=lambda x: x["weight"], reverse=True)
if nn[0]["weight"] == 0:
return self.transform_to_length(self.NNData([], {0: 1.0}, {0: []}), length)
# renormalize weights so the highest weight is 1.0
highest_weight = nn[0]["weight"]
for entry in nn:
entry["weight"] = entry["weight"] / highest_weight
# adjust solid angle weights based on distance
if self.distance_cutoffs:
r1 = _get_radius(structure[n])
for entry in nn:
r2 = _get_radius(entry["site"])
if r1 > 0 and r2 > 0:
d = r1 + r2
else:
warnings.warn(
"CrystalNN: cannot locate an appropriate radius, "
"covalent or atomic radii will be used, this can lead "
"to non-optimal results."
)
d = _get_default_radius(structure[n]) + _get_default_radius(entry["site"])
dist = np.linalg.norm(structure[n].coords - entry["site"].coords)
dist_weight = 0
cutoff_low = d + self.distance_cutoffs[0]
cutoff_high = d + self.distance_cutoffs[1]
if dist <= cutoff_low:
dist_weight = 1
elif dist < cutoff_high:
dist_weight = (math.cos((dist - cutoff_low) / (cutoff_high - cutoff_low) * math.pi) + 1) * 0.5
entry["weight"] = entry["weight"] * dist_weight
# sort nearest neighbors from highest to lowest weight
nn = sorted(nn, key=lambda x: x["weight"], reverse=True)
if nn[0]["weight"] == 0:
return self.transform_to_length(self.NNData([], {0: 1.0}, {0: []}), length)
for entry in nn:
entry["weight"] = round(entry["weight"], 3)
del entry["poly_info"] # trim
# remove entries with no weight
nn = [x for x in nn if x["weight"] > 0]
# get the transition distances, i.e. all distinct weights
dist_bins = []
for entry in nn:
if not dist_bins or dist_bins[-1] != entry["weight"]:
dist_bins.append(entry["weight"])
dist_bins.append(0)
# main algorithm to determine fingerprint from bond weights
cn_weights = {} # CN -> score for that CN
cn_nninfo = {} # CN -> list of nearneighbor info for that CN
for idx, val in enumerate(dist_bins):
if val != 0:
nn_info = []
for entry in nn:
if entry["weight"] >= val:
nn_info.append(entry)
cn = len(nn_info)
cn_nninfo[cn] = nn_info
cn_weights[cn] = self._semicircle_integral(dist_bins, idx)
# add zero coord
cn0_weight = 1.0 - sum(cn_weights.values())
if cn0_weight > 0:
cn_nninfo[0] = []
cn_weights[0] = cn0_weight
return self.transform_to_length(self.NNData(nn, cn_weights, cn_nninfo), length)
def get_cn(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (integer or float): coordination number.
"""
if self.weighted_cn != use_weights:
raise ValueError("The weighted_cn parameter and use_weights " "parameter should match!")
return super().get_cn(structure, n, use_weights)
def get_cn_dict(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of each element bonded to site with index n in structure
Args:
structure (Structure): input structure
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (dict): dictionary of CN of each element bonded to site
"""
if self.weighted_cn != use_weights:
raise ValueError("The weighted_cn parameter and use_weights " "parameter should match!")
return super().get_cn_dict(structure, n, use_weights)
@staticmethod
def _semicircle_integral(dist_bins, idx):
"""
An internal method to get an integral between two bounds of a unit
semicircle. Used in algorithm to determine bond probabilities.
Args:
dist_bins: (float) list of all possible bond weights
idx: (float) index of starting bond weight
Returns:
(float) integral of portion of unit semicircle
"""
r = 1
x1 = dist_bins[idx]
x2 = dist_bins[idx + 1]
if dist_bins[idx] == 1:
area1 = 0.25 * math.pi * r ** 2
else:
area1 = 0.5 * ((x1 * math.sqrt(r ** 2 - x1 ** 2)) + (r ** 2 * math.atan(x1 / math.sqrt(r ** 2 - x1 ** 2))))
area2 = 0.5 * ((x2 * math.sqrt(r ** 2 - x2 ** 2)) + (r ** 2 * math.atan(x2 / math.sqrt(r ** 2 - x2 ** 2))))
return (area1 - area2) / (0.25 * math.pi * r ** 2)
@staticmethod
def transform_to_length(nndata, length):
"""
Given NNData, transforms data to the specified fingerprint length
Args:
nndata: (NNData)
length: (int) desired length of NNData
"""
if length is None:
return nndata
if length:
for cn in range(length):
if cn not in nndata.cn_weights:
nndata.cn_weights[cn] = 0
nndata.cn_nninfo[cn] = []
return nndata
def _get_default_radius(site):
"""
An internal method to get a "default" covalent/element radius
Args:
site: (Site)
Returns:
Covalent radius of element on site, or Atomic radius if unavailable
"""
try:
return CovalentRadius.radius[site.specie.symbol]
except Exception:
return site.specie.atomic_radius
def _get_radius(site):
"""
An internal method to get the expected radius for a site with
oxidation state.
Args:
site: (Site)
Returns:
Oxidation-state dependent radius: ionic, covalent, or atomic.
Returns 0 if no oxidation state or appropriate radius is found.
"""
if hasattr(site.specie, "oxi_state"):
el = site.specie.element
oxi = site.specie.oxi_state
if oxi == 0:
return _get_default_radius(site)
if oxi in el.ionic_radii:
return el.ionic_radii[oxi]
# e.g., oxi = 2.667, average together 2+ and 3+ radii
if int(math.floor(oxi)) in el.ionic_radii and int(math.ceil(oxi)) in el.ionic_radii:
oxi_low = el.ionic_radii[int(math.floor(oxi))]
oxi_high = el.ionic_radii[int(math.ceil(oxi))]
x = oxi - int(math.floor(oxi))
return (1 - x) * oxi_low + x * oxi_high
if oxi > 0 and el.average_cationic_radius > 0:
return el.average_cationic_radius
if el.average_anionic_radius > 0 > oxi:
return el.average_anionic_radius
else:
warnings.warn(
"No oxidation states specified on sites! For better results, set "
"the site oxidation states in the structure."
)
return 0
class CutOffDictNN(NearNeighbors):
"""
A very basic NN class using a dictionary of fixed
cut-off distances. Can also be used with no dictionary
defined for a Null/Empty NN class.
"""
def __init__(self, cut_off_dict=None):
"""
Args:
cut_off_dict (Dict[str, float]): a dictionary
of cut-off distances, e.g. {('Fe','O'): 2.0} for
a maximum Fe-O bond length of 2.0 Angstroms.
Note that if your structure is oxidation state
decorated, the cut-off distances will have to
explicitly include the oxidation state, e.g.
{('Fe2+', 'O2-'): 2.0}
"""
self.cut_off_dict = cut_off_dict or {}
# for convenience
self._max_dist = 0.0
lookup_dict = defaultdict(dict)
for (sp1, sp2), dist in self.cut_off_dict.items():
lookup_dict[sp1][sp2] = dist
lookup_dict[sp2][sp1] = dist
if dist > self._max_dist:
self._max_dist = dist
self._lookup_dict = lookup_dict
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
@staticmethod
def from_preset(preset):
"""
Initialise a CutOffDictNN according to a preset set of cut-offs.
Args:
preset (str): A preset name. The list of supported presets are:
- "vesta_2019": The distance cut-offs used by the VESTA
visualisation program.
Returns:
A CutOffDictNN using the preset cut-off dictionary.
"""
if preset == "vesta_2019":
cut_offs = loadfn(os.path.join(_directory, "vesta_cutoffs.yaml"))
return CutOffDictNN(cut_off_dict=cut_offs)
raise ValueError("Unrecognised preset: {}".format(preset))
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self._max_dist)
nn_info = []
for nn in neighs_dists:
n_site = nn
dist = nn.nn_distance
neigh_cut_off_dist = self._lookup_dict.get(site.species_string, {}).get(n_site.species_string, 0.0)
if dist < neigh_cut_off_dist:
nn_info.append(
{
"site": n_site,
"image": self._get_image(structure, n_site),
"weight": dist,
"site_index": self._get_original_site(structure, n_site),
}
)
return nn_info
class Critic2NN(NearNeighbors):
"""
Performs a topological analysis using critic2 to obtain
neighbor information, using a sum of atomic charge
densities. If an actual charge density is available
(e.g. from a VASP CHGCAR), see Critic2Caller directly
instead.
"""
def __init__(self):
"""
Init for Critic2NN.
"""
# we cache the last-used structure, in case user
# calls get_nn_info() repeatedly for different
# sites in the same structure to save redundant
# computations
self.__last_structure = None
self.__last_bonded_structure = None
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
def get_bonded_structure(self, structure, decorate=False):
"""
:param structure: Input structure
:param decorate: Whether to decorate the structure
:return: Bonded structure
"""
# not a top-level import because critic2 is an optional
# dependency, only want to raise an import error if
# Critic2NN() is used
from pymatgen.command_line.critic2_caller import Critic2Caller
if structure == self.__last_structure:
sg = self.__last_bonded_structure
else:
c2_output = Critic2Caller(structure).output
sg = c2_output.structure_graph()
self.__last_structure = structure
self.__last_bonded_structure = sg
if decorate:
order_parameters = [self.get_local_order_parameters(structure, n) for n in range(len(structure))]
sg.structure.add_site_property("order_parameters", order_parameters)
return sg
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
sg = self.get_bonded_structure(structure)
return [
{
"site": connected_site.site,
"image": connected_site.jimage,
"weight": connected_site.weight,
"site_index": connected_site.index,
}
for connected_site in sg.get_connected_sites(n)
]
def metal_edge_extender(mol_graph):
"""
Function to identify and add missed coordinate bond edges for metals
Args:
mol_graph: pymatgen.analysis.graphs.MoleculeGraph object
Returns:
mol_graph: pymatgen.analysis.graphs.MoleculeGraph object with additional
metal bonds (if any found) added
"""
metal_sites = {"Li": {}, "Mg": {}, "Ca": {}, "Zn": {}, "B": {}, "Al": {}}
coordinators = ["O", "N", "F", "S", "Cl"]
num_new_edges = 0
for idx in mol_graph.graph.nodes():
if mol_graph.graph.nodes()[idx]["specie"] in metal_sites:
metal_sites[mol_graph.graph.nodes()[idx]["specie"]][idx] = [
site[2] for site in mol_graph.get_connected_sites(idx)
]
for metal in metal_sites:
for idx in metal_sites[metal]:
for ii, site in enumerate(mol_graph.molecule):
if ii != idx and ii not in metal_sites[metal][idx]:
if str(site.specie) in coordinators:
if site.distance(mol_graph.molecule[idx]) < 2.5:
mol_graph.add_edge(idx, ii)
num_new_edges += 1
metal_sites[metal][idx].append(ii)
total_metal_edges = 0
for metal in metal_sites:
for idx in metal_sites[metal]:
total_metal_edges += len(metal_sites[metal][idx])
if total_metal_edges == 0:
for metal in metal_sites:
for idx in metal_sites[metal]:
for ii, site in enumerate(mol_graph.molecule):
if ii != idx and ii not in metal_sites[metal][idx]:
if str(site.specie) in coordinators:
if site.distance(mol_graph.molecule[idx]) < 3.5:
mol_graph.add_edge(idx, ii)
num_new_edges += 1
metal_sites[metal][idx].append(ii)
total_metal_edges = 0
for metal in metal_sites:
for idx in metal_sites[metal]:
total_metal_edges += len(metal_sites[metal][idx])
return mol_graph
|
richardtran415/pymatgen
|
pymatgen/analysis/local_env.py
|
Python
|
mit
| 171,647
|
[
"Gaussian",
"Jmol",
"VASP",
"pymatgen"
] |
d212a21e20d699b0e91f5799e5cdfb5967f22a5868f0e5c4d4b9390f8edc0f7e
|
"""
Building candidate models
"""
import itertools
from pycalphad.core.cache import cacheit
import symengine
from espei.sublattice_tools import interaction_test
def make_successive(xs):
"""
Return a list of successive combinations
Parameters
----------
xs : list
List of elements, e.g. [X, Y, Z]
Returns
-------
list
List of combinations where each combination include all the preceding elements
Examples
--------
>>> make_successive(['W', 'X', 'Y', 'Z'])
[['W'], ['W', 'X'], ['W', 'X', 'Y'], ['W', 'X', 'Y', 'Z']]
"""
return [xs[:(i+1)] for i in range(len(xs))]
@cacheit # This can be expensive if run from an inner loop, so it is cached
def build_feature_sets(temperature_features, interaction_features):
"""
Return a list of broadcasted features
Parameters
----------
temperature_features : list
List of temperature features that will become a successive_list, such as [TlogT, T-1, T2]
interaction_features : list
List of interaction features that will become a successive_list, such as [YS, YS*Z, YS*Z**2]
Returns
-------
list
Notes
-----
This allows two sets of features, e.g. [TlogT, T-1, T2] and [YS, YS*Z, YS*Z**2]
and generates a list of feature sets where the temperatures and interactions
are broadcasted successively.
Generates candidate feature sets like:
L0: A + BT, L1: A
L0: A , L1: A + BT
but **not** lists that are not successive:
L0: A + BT, L1: Nothing, L2: A
L0: Nothing, L1: A + BT
There's still some debate whether it makes sense from an information theory
perspective to add a L1 B term without an L0 B term. However this might be
more representative of how people usually model thermodynamics.
Does not distribute multiplication/sums or make assumptions about the elements
of the feature lists. They can be strings, ints, objects, tuples, etc..
The number of features (related to the complexity) is a geometric series.
For :math:`N` temperature features and :math:`M` interaction features, the total
number of feature sets should be :math:`N(1-N^M)/(1-N)`. If :math:`N=1`, then there
are :math:`M` total feature sets.
"""
# [[A], [A, B], [A, B, C], ...]
temps = make_successive(temperature_features)
# [ [temps for L0], [temps for L1], [temps for L2], ...]
feats = [list(itertools.product(temps, [inter])) for inter in interaction_features]
# [ [temps for L0], [temps for L0 and L1], [temps for L0, L1 and L2], ...
model_sets = make_successive(feats)
# models that are not distributed or summed
candidate_feature_sets = list(itertools.chain(*[list(itertools.product(*model_set)) for model_set in model_sets]))
candidate_models = []
for feat_set in candidate_feature_sets:
# multiply the interactions through and flatten the feature list
candidate_models.append(list(itertools.chain(*[[param_order[1]*temp_feat for temp_feat in param_order[0]] for param_order in feat_set])))
return candidate_models
def build_candidate_models(configuration, features):
"""
Return a dictionary of features and candidate models
Parameters
----------
configuration : tuple
Configuration tuple, e.g. (('A', 'B', 'C'), 'A')
features : dict
Dictionary of {str: list} of generic features for a model, not
considering the configuration. For example:
{'CPM_FORM': [symengine.S.One, v.T, v.T**2, v.T**3]}
Returns
-------
dict
Dictionary of {feature: [candidate_models])
Notes
-----
Currently only works for binary and ternary interactions.
Candidate models match the following spec:
1. Candidates with multiple features specified will have
2. orders of parameters (L0, L0 and L1, ...) have the same number of temperatures
Note that high orders of parameters with multiple temperatures are not
required to contain all the temperatures of the low order parameters. For
example, the following parameters can be generated
L0: A
L1: A + BT
"""
feature_candidate_models = {}
if not interaction_test(configuration): # endmembers only
for feature_name, temperature_features in features.items():
interaction_features = (symengine.S.One,)
feature_candidate_models[feature_name] = build_feature_sets(temperature_features, interaction_features)
elif interaction_test(configuration, 2): # has a binary interaction
YS = symengine.Symbol('YS') # Product of all nonzero site fractions in all sublattices
Z = symengine.Symbol('Z')
for feature_name, temperature_features in features.items():
# generate increasingly complex interactions (power of Z is Redlich-Kister order)
interaction_features = (YS, YS*Z, YS*(Z**2), YS*(Z**3)) # L0, L1, L2, L3
feature_candidate_models[feature_name] = build_feature_sets(temperature_features, interaction_features)
elif interaction_test(configuration, 3): # has a ternary interaction
# Ternaries interactions should have exactly two interaction sets:
# 1. a single symmetric ternary parameter (YS)
YS = symengine.Symbol('YS') # Product of all nonzero site fractions in all sublattices
# 2. L0, L1, and L2 parameters
V_I, V_J, V_K = symengine.Symbol('V_I'), symengine.Symbol('V_J'), symengine.Symbol('V_K')
symmetric_interactions = (YS,) # symmetric L0
for feature_name, temperature_features in features.items():
# We are ignoring cases where we have L0 == L1 != L2 (and like
# permutations) because these cases (where two elements exactly the
# same behavior) don't exist in reality. Tthe symmetric case is
# mainly for small corrections and dimensionality reduction.
# Because we don't want our parameter interactions to be successive
# (i.e. products of symmetric and asymmetric terms), we'll candidates in two steps
tern_ix_cands = []
tern_ix_cands += build_feature_sets(temperature_features, symmetric_interactions)
# special handling for asymmetric features, we don't want a successive V_I, V_J, V_K, but all three should be present
asym_feats = (
build_feature_sets(temperature_features, (YS * V_I,)), # asymmetric L0
build_feature_sets(temperature_features, (YS * V_J,)), # asymmetric L1
build_feature_sets(temperature_features, (YS * V_K,)), # asymmetric L2
)
for v_i_feats, v_j_feats, v_k_feats in zip(*asym_feats):
tern_ix_cands.append(v_i_feats + v_j_feats + v_k_feats)
feature_candidate_models[feature_name] = tern_ix_cands
else:
raise ValueError(f"Interaction order not known for configuration {configuration}")
return feature_candidate_models
|
PhasesResearchLab/ESPEI
|
espei/parameter_selection/model_building.py
|
Python
|
mit
| 7,017
|
[
"pycalphad"
] |
d28866d42cf72ab372c89da1c645169b712a27820ce4e03be8416fa01e28d44d
|
#! /usr/bin/env python
########################################################################
# File : dirac-admin-set-site-protocols
# Author : Stuart Paterson
########################################################################
"""
Defined protocols for each SE for a given site.
Example:
$ dirac-admin-set-site-protocols --Site=LCG.IN2P3.fr SRM2
"""
import DIRAC
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
Script.registerSwitch("", "Site=", "Site for which protocols are to be set (mandatory)")
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(["Protocol: SE access protocol"], mandatory=False)
switches, args = Script.parseCommandLine(ignoreErrors=True)
site = None
for switch in switches:
if switch[0].lower() == "site":
site = switch[1]
if not site or not args:
Script.showHelp(exitCode=1)
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
result = diracAdmin.setSiteProtocols(site, args, printOutput=True)
if not result["OK"]:
print("ERROR: %s" % result["Message"])
exitCode = 2
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_admin_set_site_protocols.py
|
Python
|
gpl-3.0
| 1,310
|
[
"DIRAC"
] |
6eacb145bab78334ef9e77221576ce579c947cc1b218576b99034a5633580ee8
|
import os, sys, math
import numpy as np
import scipy
import pylab
import scipy.optimize
import lib.LammpsIO as lmp_io
import lib.DataAnalysis as da
import lib.DataMorphing as dm
def generate_interaction_list(num_of_types):
int_set = []
for ii in xrange(1, num_of_types+1):
for iii in xrange(ii, num_of_types+1):
int_set.append([ii, iii])
return int_set
def interaction_list_from_file(interaction_filename):
int_set = []
LIST_IN = open(interaction_filename, 'r')
for line in LIST_IN:
if line[0:1] != "#":
NewRow = (line.strip()).split()
if len(NewRow) == 2:
int_set.append([int(NewRow[0]), int(NewRow[1])])
LIST_IN.close()
return int_set
#-----------------------------------------------------------------------------------------------------
def read_in_rdf_file(filename, number_of_types, interaction_list):
""" subroutine to read in the RDF file - note, the format must be distance is first column, and RDF is the third column
ASSUMPTION: ALL RDF FILES HAVE THE SAME NUMBER OF BINS AND CUTOFF. """
index = 0
print "reading RDF %s" % (filename)
numofbins, cutoff, o = lmp_io.get_number_of_bins_and_cutoff("%s.1.1" % (filename), 0)
rdf_array = np.zeros((number_of_types+1, number_of_types+1, numofbins+1))
for i in interaction_list:
LIST_IN = open("%s.%d.%d" % (filename, i[0], i[1]), 'r')
index = 0
for line in LIST_IN:
NewRow = (line.strip()).split()
mystring = NewRow[0][0:1]
if mystring != "#":
if len(NewRow)>2:
index += 1
rdf_array[i[0]][i[1]][index] = float(NewRow[2])
LIST_IN.close()
return rdf_array, int(numofbins), float(cutoff)
def read_CG_log_file(CG_file, label="Press"):
""" reads a lammps CG thermodynamic log file, and calculates the average pressure """
print "CG LOG FILE LABEL:", label
index = 0
pressure_total = 0
pressure_number = 0
LIST_IN = open(CG_file,'r')
for line in LIST_IN:
if line[0] != '#':
NewRow = (line.strip()).split()
number_of_cols = len(NewRow)
for n in xrange(0, number_of_cols):
mystring = NewRow[n][0:len(label)]
# if (mystring == "Pre"):
# print "Pressure =", float(NewRow[n+2])
# if (mystring == "Pzz"):
# print "Pzz =", float(NewRow[n+2])
if (mystring == label):
#print label, len(label)
index += 1
if (index > 100):
# ignore the first 100 values
pressure_total += float(NewRow[n+2])
pressure_number += 1
LIST_IN.close()
final_pressure = pressure_total / pressure_number
print "For %d pressure calculations of the CG system, the average pressure (%s) is %f bar" % (pressure_number, label, final_pressure)
return final_pressure
def modify_lammps_in_file(in_file, out_file, number, interaction_list, num_of_types):
""" Create a new lammps file that will read the next iteration of potentials """
print "modify_lammps_in_file", in_file, number, interaction_list
count = 0
NEW_FILE = open("%s.%s" % (out_file, number+1),'w+')
# LIST_IN = open("%s.%s" % (in_file, number_old), 'r')
LIST_IN = open("%s" % (in_file), 'r')
for line in LIST_IN:
NewRow = (line.strip()).split()
if (len(NewRow) > 0):
if NewRow[0].lower() == "pair_coeff":
if count < 1:
count += 1
for ii in xrange(1, num_of_types+1):
for iii in xrange(ii, num_of_types+1):
if [ii, iii] in interaction_list:
print "pair_coeff %d %d pot.%d.new.%d.%d TABLE_%d.%d \n" % (ii, iii, number+1, ii, iii, ii, iii)
NEW_FILE.write("pair_coeff %d %d pot.%d.new.%d.%d TABLE_%d.%d \n" % (ii, iii, number+1, ii, iii, ii, iii))
else:
print "pair_coeff %d %d pot.converged.%d.%d TABLE_%d.%d \n" % (ii,iii,ii,iii,ii,iii)
NEW_FILE.write("pair_coeff %d %d pot.converged.%d.%d TABLE_%d.%d \n" % (ii,iii,ii,iii,ii,iii))
else:
NEW_FILE.write("%s\n" % (line.strip()))
LIST_IN.close()
NEW_FILE.close() # ensure files are close before using 'sed' so that buffers are written to disk.
os.system("sed -i s/.%d.rdf/.%d.rdf/g %s.%d" % (number, number+1, out_file, number+1))
os.system("sed -i s/prod%d/prod%d/g %s.%d" % (number, number+1, out_file, number+1))
def calc_pressure_correction(new_g_r, numofbins, DeltaR, number_of_types_ii, number_of_types_iii, scale_factor, p_now, p_target, volume, temperature):
print "P+) Applying pressure function."
pressure_pot = np.zeros((numofbins+1))
# apply pressure correction if requested
rcut = float(numofbins) * float(DeltaR)
integral = 0.0
x = 0.0
nktv2p = 68568.415 #LAMMPS unit conversion [no. atoms per volume] -> [Bar]
#bar_to_SI = 0.06022/4.1868 # 1bar=0.06022 kJ /(nm mol) - then converted to Kcal mol / (nm mol)
for i in xrange(1, int(numofbins+1)):
x = i * DeltaR
if len(new_g_r) > i: #RDF == G(r)
integral += x * x * x * DeltaR * new_g_r[i] #Eq. 6 Fu et al., 164106, 2012
partDens_ii = number_of_types_ii / volume
partDens_iii = number_of_types_iii / volume
# integral += (delta_r / 2 * rdf_cur[max/delta_r]*max*max*max)
# print "pref values:"
# print math.pi, partDens_ii, partDens_iii, integral
pref = -3 * rcut * (p_now - p_target) * 1 / nktv2p
pref /= 2 * math.pi * partDens_ii * partDens_iii * integral
# use max(pref, +-0.1kt) as prefactor
temp = pref
kB = 0.0019858775
kBT = kB * temperature #0.0019872067
print "Pressure correction factor1: A =", pref
if temp < 0:
temp = -1 * temp
if temp > (0.1 * kBT):
if (pref > 0):
pref = 0.1 * kBT
else:
pref = -0.1 * kBT
pref = pref * scale_factor
print "Pressure correction factor: A =", pref, scale_factor
for i in xrange(0, numofbins+1):
x = i * DeltaR
pressure_pot[i] = pref * (1 - x / rcut)
return pressure_pot
def update_one_file(out_path, target_g_r, new_g_r, old_distance, old_potential, num_of_types, number, DeltaR, numofbins, number_of_types1, number_of_types2,
lattice, LJ_file_flag, p_flag, p_now, p_target, temperature, atom1, atom2):
number = int(number)
potential = np.zeros((numofbins+1))
derivative = np.zeros((numofbins+1))
new_number = number + 1
volume = float(lattice[0]) * float(lattice[1]) * float(lattice[2])
print "Lengths: ", len(target_g_r), len(new_g_r), len(old_distance), len(old_potential), numofbins
index = length = 0
x_data = np.zeros((numofbins+1))
y_data = np.zeros((numofbins+1))
success = 0
# smooth the new CG radial distribution function and calculate where the old CG rdf starts (it will be zero at low distance values).
filtered_rdf = dm.smooth_data(new_g_r)
np.append(filtered_rdf, 1)
conversion_extrapolate_tmp = {}
pressure_pot = np.zeros((numofbins+1))
if abs(float(p_flag)) > 0.00001:
print "FabMD: P+) Applying pressure function."
pressure_pot = calc_pressure_correction(new_g_r, numofbins, DeltaR, number_of_types1, number_of_types2, abs(p_flag), p_now, p_target, volume, temperature)
else:
print "FabMD: P-) Not applying any pressure correction."
# use_data = 0
if float(p_flag) < -0.00001:
print "FabMD: I-) IBI is disabled!"
pot_write_threshold = -1 # slot where we start the pot functions
kB = 0.0019858775
for i in xrange(0, numofbins+1):
# print old_distance[1], i, i*DeltaR
if old_distance[1] <= ((i+0.1) * DeltaR): #0.1 is here to prevent tiny bugs in float comparisons, causing the list to get shorter and shorter...
if pot_write_threshold == -1:
pot_write_threshold = i
length += 1
# the IBI update to the potential
target_g_r_i = 1.0
if len(target_g_r) > i:
target_g_r_i = target_g_r[i]
fri = 1.0 #filtered rdf shorthand for beyond the cutoff.
if len(filtered_rdf) > i:
fri = filtered_rdf[i]
if float(p_flag) < -0.00001: # Disable IBI part.
print "old potential:", old_potential[length]
print "pressure modification:", pressure_pot[i-1]
potential[i] = old_potential[length] + pressure_pot[i-1]
#print i, (abs(target_g_r_i) > 0), (fri > 0.15), i*DeltaR, old_potential[length], pressure_pot[i]
if (abs(target_g_r_i) > 0) and (fri > 0.15):
if float(p_flag) > -0.00001: # Enable IBI part.
# print "FabMD: I+) IBI is enabled!"
potential[i] = old_potential[length] + (kB * temperature) * math.log(fri / target_g_r_i) + pressure_pot[i-1]
# Debug check
# if abs(old_distance[length] - i*DeltaR)>0.00001:
# print "Error: old_distance seems to be wrongly mapped!"
# exit()
x_data[index] = i * DeltaR
y_data[index] = potential[i]
index += 1
#print i, potential[i]
else:
# this array indicates which values we need to an extrapolation for the potential and for the forces (negative of the potential derivative) - defined as where
# the RDF is less than 0.15, yet is defined in the old potential file.
conversion_extrapolate_tmp[i] = 1
#exit()
x_data.resize((index))
y_data.resize((index))
dy = da.derivatives(x_data, y_data)
# print y_data, dy, len(y_data), len(dy)
# exit()
parameters = {}
square_residual = 0
if LJ_file_flag == 1:
# read in Lennard-Jones parameters from file if requested.
parameters = {}
LJ_IN = open("LJ_parameters", 'r')
for line in LJ_IN:
NewRow = (line.strip()).split()
if (NewRow[2] == atom1) and (NewRow[3] == atom2):
parameters[0][1] = NewRow[6]
parameters[1][1] = NewRow[9]
LJ_IN.close()
else:
# fitting the potential derivative (i.e. negative forces) using CurveFit.pm to a Lennard Jones 6 - 3 potential (i.e. 7 - 4 when differentiated )
fitfunc = lambda p, x: - 6 * (( ( 4 * p[0] * p[1]**6) / x**7) - ( (4 * p[0] * p[1]**3) / (2*x**4)) )
errfunc = lambda p, x, y: fitfunc(p, x) - y
#print "X_DATA = ", x_data, dy, y_data
p0 = np.array([0.5, 4.5]) #was 0.5,4.5
p1, success = scipy.optimize.leastsq(errfunc, p0[:], maxfev=5000, args=(x_data, dy)) #use [:int(4.0/DeltaR)] to optimize up to a cutoff of 4.
if success == 0:
print "Scipy.optimize did not manage to converge the fit on dataset", atom1, atom2, "! Exiting now."
exit()
LJ_OUT = open("%s/LJ_parameters" % (out_path),'w')
LJ_OUT.write("LJ PARAMETERS %d %d p0 = %f, p1 = %f\n" % (atom1, atom2, p1[0], p1[1]))
LJ_OUT.close()
for i in xrange(numofbins+1, 1, -1):
if i in conversion_extrapolate_tmp.keys(): #77-31
#print i
if conversion_extrapolate_tmp[i] > 0:
new_distance = i * DeltaR
# These Lennard-Jones forces are then numerically integrated to get the potential
derivative[i] = -np.abs(fitfunc(p1, new_distance))
diff = x_data[0] - new_distance
ave = 0.5 * fitfunc(p1, new_distance) - 0.5 * dy[0]
r_y = np.abs(y_data[0] - diff * ave)
potential[i] = r_y
# print i, derivative[i], potential[i], "!"
index = 0
for i in xrange(pot_write_threshold, numofbins+1):
if i not in conversion_extrapolate_tmp.keys():
derivative[i] = dy[index]
index += 1
index = 0
for i in xrange(0, numofbins+1):
if len(derivative) > i:
if abs(derivative[i]) > 0:
index += 1
# determining the number of potential values
lmp_io.write_pot_file("%s/pot.%d.new.%d.%d" % (out_path, new_number, atom1, atom2), derivative, potential, numofbins, DeltaR, atom1, atom2, index)
#first index was numofbins
#-----------------------------------------------------------------------------------------------------
def compute_update(out_path, target_g_r, new_g_r, old_distance, old_potential, num_of_types, number, DeltaR, numofbins, number_of_types, lattice, LJ_file_flag,
p_flag, p_now, p_target, temperature, interaction_list):
""" This subroutines performs the IBI. """
print "PFlag = ", p_flag
#go up to numofbins iters at all times!
for i in interaction_list:
update_one_file(out_path, target_g_r[i[0]][i[1]], new_g_r[i[0]][i[1]], old_distance[i[0]][i[1]], old_potential[i[0]][i[1]], num_of_types, number, DeltaR,
numofbins, number_of_types[i[0]], number_of_types[i[1]], lattice, LJ_file_flag, p_flag, p_now, p_target, temperature, i[0], i[1])
def apply_pressure_correction(old_potential, pressure_pot, length, threshold, DeltaR, mode="rigid"):
""" Applies a pressure correction in a gradual way (or not!)
Supported modes:
rigid = rigid smoothing
gradual = slowly increasing smoothing
halfway = slowly increasing smoothing, starting at 50% threshold and ending at 150% threshold
"""
threshold_num = threshold/DeltaR
if mode == "rigid":
for i in xrange (0, length):
potential[i] = old_potential[i]
if threshold <= ((i+0.1) * DeltaR): #0.1 is here to prevent tiny bugs in float comparisons, causing the list to get shorter and shorter...
potential[i] += pressure_pot[i]
if mode == "gradual":
for i in xrange (0, length):
potential[i] = old_potential[i]
#potential[threshold_num:length] += pressure_pot[threshold_num:length]
if threshold <= ((i+0.1) * DeltaR): #0.1 is here to prevent tiny bugs in float comparisons, causing the list to get shorter and shorter...
potential[i] += pressure_pot[i]
if mode == "halfway":
for i in xrange (0, length):
potential[i] = old_potential[i]
if threshold <= ((i+0.1) * DeltaR): #0.1 is here to prevent tiny bugs in float comparisons, causing the list to get shorter and shorter...
potential[i] += pressure_pot[i]
print "Sum of pressure correction: ", np.sum(np.abs(pressure_pot))
return potential
def production():
""" This script will create the next interation of coarse-grained potentials using the Iterative Boltzmann Inversion to
match to a user-supplied radial distribution function (normally from atomistic simulation). It will also attempt a correction
for the pressure. The script will also extrapolate the potentials at low distance values by fitting to a soft Lennard-Jones
potential. Note, this fitting is somewhat unstable (CurveFit.pm) and can cause the IBI to fail. """
print "ARGUMENTS TO THE IBI ARE: ", sys.argv
# user-supplied arguments to the IBI. Note, not all of these arguments are required depending on what analysis is need and files are provided.
lammps_input_file = "" # LAMMPS input file for the current CG iteration.
correct_rdf_base = "" # User-supplied Radial Distribution Function to match to (normally derived from atomistic simulation) - distance is column 1 and the RDF is column 3.
potential_base = "" # the file base-name for the potential energy files. The format is such: pot.<iteration_number>.new.<type1><type2>. In this case the base-name is "pot".
number = 0 # the current IBI iteration number
lammps_data_file = "" # LAMMPS CG data file
lammps_rdf_file = "" # the CG RDF file if calculated by LAMMPS - this is a series of snapshot values, which need to be averaged.
p_target = 1.0 # pressure target for the CG simulation.
p_flag = 0.0 # flag to indicate whether to apply pressure correction - set to one if a pressure target is set by the user.
CG_output_file = "" # LAMMPS thermodynamic log file for the current CG simulation. Used to calculate the current average CG pressure.
p_now = 0 # current CG pressure read from (and averaged) the CG lammps thermodynamic log file;
temperature = 300 # temperature the simulations are run at; default is 300K
LJ_file_flag = 0 # if this flag is set to one, the parameters used in the extrapolation by fitting to a Lennard-Jones potential are read from a file (called LJ_parameters) rather than computed from fitting to the potential / forces.
num_of_bins = 0
DeltaR = 0.0
number_of_arguments = len(sys.argv)
mode = "default"
num_of_types = 0
for i in xrange(0, number_of_arguments):
if sys.argv[i].lower() == "lammps_input_file":
lammps_input_file = sys.argv[i+1]
print "THE LAMMPS INPUT FILE IS ", lammps_input_file
elif sys.argv[i].lower() == "lammps_output_file":
lammps_output_file = sys.argv[i+1]
print "THE LAMMPS OUTPUT FILE IS ", lammps_input_file
elif sys.argv[i].lower() == "lammps_data_file":
lammps_data_file = sys.argv[i+1]
print "THE LAMMPS DATA FILE IS ", lammps_data_file
elif ((sys.argv[i] == "potential_base") or (sys.argv[i] == "potential")):
potential_base = sys.argv[i+1]
elif sys.argv[i].lower() == "lammps_rdf_file":
lammps_rdf_file = sys.argv[i+1]
print "THE RDFS WILL BE READ FROM LAMMPS OUTPUT", lammps_rdf_file
elif (sys.argv[i] == "correct_rdf_base"):
correct_rdf_base = sys.argv[i+1]
print "THE RDFS TO MATCH TO HAVE THE FILE BASE ", correct_rdf_base
elif ((sys.argv[i] == "number") or (sys.argv[i] == "current_number") or (sys.argv[i] == "iteration_number")):
number = int(sys.argv[i+1])
print "THE CURRENT ITERATION NUMBER IS ", number
elif ((sys.argv[i] == "pressure_flag") or (sys.argv[i] == "p_flag")):
p_flag = float(sys.argv[i+1])
print "THE PRESSURE FLAG is ", p_flag
elif ((sys.argv[i] == "pressure_target") or (sys.argv[i] == "p_target")):
p_target = float(sys.argv[i+1])
if abs(p_flag) < 0.00001:
p_flag = 1
print "THE PRESSURE TARGET is ", p_target
elif ((sys.argv[i] == "CG_log_file") or (sys.argv[i] == "CG_logfile")):
CG_output_file = sys.argv[i+1]
p_now = read_CG_log_file(CG_output_file, label="Press")
#TODO: this is only a temp hack!
print "THE CURRENT PRESSURE WILL BE CALCULATED FROM THE LOG FILE ", CG_output_file , p_now
elif (sys.argv[i] == "temperature"):
temperature = float(sys.argv[i+1])
elif (sys.argv[i] == "LJ_param_file"):
LJ_file_flag = 1
elif sys.argv[i].lower() == "numofbins":
num_of_bins = int(sys.argv[i+1])
print "THE NUMBER OF BINS IS ", num_of_bins
elif sys.argv[i].lower() == "deltar":
DeltaR = float(sys.argv[i+1])
print "DeltaR IS ", DeltaR
elif sys.argv[i] == "mode":
mode = sys.argv[i+1]
elif sys.argv[i].lower() == "numoftypes":
num_of_types = int(sys.argv[i+1])
# read in the lammps data file to identify the number of CG types and lattice parameters.
lattice, type_list = lmp_io.read_lammps_data_file(lammps_data_file)
num_of_types = len(type_list)
print "Num of types = ", num_of_types
#num_of_types = 4
number_of_types_array = np.zeros((num_of_types+1))
for n in xrange(1, num_of_types+1):
number_of_types_array[n] = len(type_list["%s" % n])
if mode=="pressure_correct":
num_of_bins, cutoff, offset = lmp_io.get_number_of_bins_and_cutoff(potential_base, 1)
print "Potential numofbins and cutoff:", num_of_bins, cutoff
pots = (potential_base.strip()).split('.')
atom1 = int(pots[-2])
atom2 = int(pots[-1])
print "ATOMS are:", atom1, atom2
potential = np.zeros((num_of_bins+1))
volume = float(lattice[0]) * float(lattice[1]) * float(lattice[2])
hist_rdf = lmp_io.read_lammps_rdf(lammps_rdf_file, num_of_types, number)
pressure_pot = calc_pressure_correction(hist_rdf[atom1][atom2], num_of_bins, DeltaR, number_of_types_array[atom1], number_of_types_array[atom2], abs(p_flag), p_now, p_target, volume, temperature)
old_distance, old_potential, old_derivative = lmp_io.read_in_interaction_file(potential_base, num_of_bins)
potential = apply_pressure_correction(old_potential, pressure_pot, num_of_bins+1, old_distance[1], DeltaR)
potential[0]=potential[1] # TODO: change this temporary workaround into something more systematic. The workaround reduces anomalies in the derivative at the start of the potential.
new_derivatives = da.derivatives(np.arange(offset, cutoff, DeltaR), potential)
print "dy lens:", num_of_bins, len(new_derivatives), len(np.arange(offset-DeltaR, cutoff, DeltaR)), len(potential)
write_pot_file("%s/pot.%d.new.%d.%d" % (os.path.dirname(lammps_output_file), number+1, atom1, atom2), new_derivatives[1:] , potential[1:], num_of_bins, DeltaR, atom1, atom2, num_of_bins, offset, smoothing="no", selection="no") #note: we use an offset here!
elif mode=="default":
# Either read an interaction list from a file in the atom_dir (useful if you want to parametrize only a subset of interactions), or generate one on the fly.
interaction_filename = os.path.dirname(correct_rdf_base) + "/interaction_list"
if os.path.exists(interaction_filename):
interaction_list = interaction_list_from_file(interaction_filename)
else:
interaction_list = generate_interaction_list(num_of_types)
first_array, num_of_bins, cutoff2 = read_in_rdf_file(correct_rdf_base, num_of_types, interaction_list) # read in the rdfs to match to.
print "THE CUTOFF in the RDF files is", cutoff2, ", with", len(first_array[1][1])-1, "number of bins ";
print "THIS IS ITERATION NUMBER", number
deltaR = cutoff2 / num_of_bins # bin spacing from the RDF
previous_position, previous_potential, previous_derivative, old_pot_files, cutoff = lmp_io.read_in_interaction_files(potential_base, num_of_types,
interaction_list, number)
num_of_bins = int(cutoff / deltaR)
print deltaR, cutoff2, num_of_bins, correct_rdf_base
print "THE CUTOFF in the POS FILES is", cutoff, "and number of bins are", num_of_bins
# read in the RDFs of the CG calculated by LAMMPS.
hist_rdf = lmp_io.read_lammps_rdf(lammps_rdf_file, num_of_types, number)
# print lammps_rdf_file, len(hist_rdf[1][1])
DeltaR = cutoff / num_of_bins
# calculate the IBI
compute_update(os.path.dirname(lammps_output_file), first_array, hist_rdf, previous_position, previous_potential, num_of_types, number, DeltaR, num_of_bins, number_of_types_array, lattice, LJ_file_flag, p_flag, p_now, p_target, temperature, interaction_list)
# modify the lammps input file, ready for the next iteration
modify_lammps_in_file(lammps_input_file, lammps_output_file, number, interaction_list, num_of_types)
else:
print "ERROR: mode is incorrectly set in IBI.py. Should be e.g., default or pressure_correct"
sys.exit()
def basic_test_suite():
""" Simple testing of various functions in the script."""
print "read_lammps_data_file"
lattice, type_list = lmp_io.read_lammps_data_file("CG_first_interaction.lammps05")
print "read_lammps_rdf"
rdf_average = lmp_io.read_lammps_rdf("tmp.1.rdf", 3, 1)
print "read_CG_log_file"
final_pressure = read_CG_log_file("new_CG.prod1.log")
print "smooth_data"
smoothed = dm.smooth_data(rdf_average[1][1])
print "read_in_rdf_file"
rdf_array, numofbins, cutoff = read_in_rdf_file("rdf", 3, [[1,1],[1,2],[1,3],[2,2],[2,3],[3,3]])
print "read_in_interaction_files"
distance, potential, derivative, pot_file_list, cutoff = lmp_io.read_in_interaction_files("./pot", 3, [[1,1],[1,2],[1,3],[2,2],[2,3],[3,3]], 1)
#print "lattice: ", lattice
#print "type_list: ", type_list
#print "final_pressure: ", final_pressure
#print "rdf_average: ", rdf_average[1][1]
#print "smoothed rdf_average: ", smoothed
#print len(rdf_average[1][1]), len(smoothed)
#print "rdf_array: ", rdf_array, "\n numofbins: ", numofbins, "\n cutoff: ", cutoff
#print "distance: ", distance, "\n potential: ", potential, "\n derivative: ", derivative, "\n pot_file_list: ", pot_file_list, "\n cutoff: ", cutoff
#print potential[1][1], len(potential[1][1])
if __name__ == "__main__":
production()
#basic_test_suite()
|
uschille/FabSim
|
python/IBI.py
|
Python
|
lgpl-3.0
| 24,218
|
[
"LAMMPS"
] |
b08465ae35718c13eef4489ff2d9dad69e0b08388dede45abbc05002968cef1a
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
__version__ = "0.2.0-dev"
import os
from setuptools import find_packages, setup
from setuptools.extension import Extension
import numpy as np
classes = """
Development Status :: 1 - Planning
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = ('Data structures, algorithms and educational '
'resources for bioinformatics.')
with open('README.rst') as f:
long_description = f.read()
# Dealing with Cython
USE_CYTHON = os.environ.get('USE_CYTHON', False)
ext = '.pyx' if USE_CYTHON else '.c'
extensions = [Extension("skbio.stats._subsample._subsample",
["skbio/stats/_subsample/_subsample" + ext]),
Extension("skbio.alignment._ssw._ssw_wrapper",
["skbio/alignment/_ssw/_ssw_wrapper" + ext,
"skbio/alignment/_ssw/ssw.c"])]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='scikit-bio',
version=__version__,
license='BSD',
description=description,
long_description=long_description,
author="scikit-bio development team",
author_email="gregcaporaso@gmail.com",
maintainer="scikit-bio development team",
maintainer_email="gregcaporaso@gmail.com",
url='http://scikit-bio.org',
test_suite='nose.collector',
packages=find_packages(),
ext_modules=extensions,
include_dirs=[np.get_include()],
install_requires=['numpy >= 1.7', 'matplotlib >= 1.1.0',
'scipy >= 0.13.0', 'pandas', 'future', 'natsort'],
extras_require={'test': ["nose >= 0.10.1", "pep8", "flake8",
"python-dateutil"],
'doc': ["Sphinx >= 1.2.2", "sphinx-bootstrap-theme"]},
classifiers=classifiers,
package_data={
'skbio.stats.tests': ['data/*'],
'skbio.stats.distance.tests': ['data/*'],
'skbio.stats.ordination.tests': ['data/*'],
'skbio.parse.sequences.tests': ['data/*'],
}
)
|
JWDebelius/scikit-bio
|
setup.py
|
Python
|
bsd-3-clause
| 2,965
|
[
"scikit-bio"
] |
53247b553149d82b99f3eccc321bf6e536969a5fe0c71f3797b33cdeb5e653df
|
# -*- coding: utf-8 -*-
#
# recording_demo.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Recording examples
------------------
This script demonstrates how to select different recording backends
and read the result data back in. The simulated network itself is
rather boring with only a single poisson generator stimulating a
single neuron, so we get some data.
"""
import nest
import numpy as np
def setup(record_to, time_in_steps):
"""Set up the network with the given parameters."""
nest.ResetKernel()
nest.overwrite_files = True
pg_params = {'rate': 1000000.}
sr_params = {'record_to': record_to, 'time_in_steps': time_in_steps}
n = nest.Create('iaf_psc_exp')
pg = nest.Create('poisson_generator', 1, pg_params)
sr = nest.Create('spike_recorder', 1, sr_params)
nest.Connect(pg, n, syn_spec={'weight': 10.})
nest.Connect(n, sr)
return sr
def get_data(sr):
"""Get recorded data from the spike_recorder."""
if sr.record_to == 'ascii':
return np.loadtxt(f'{sr.filenames[0]}', dtype=object)
if sr.record_to == 'memory':
return sr.get('events')
# Just loop through some recording backends and settings
for time_in_steps in (True, False):
for record_to in ('ascii', 'memory'):
sr = setup(record_to, time_in_steps)
nest.Simulate(30.0)
data = get_data(sr)
print(f"simulation resolution in ms: {nest.resolution}")
print(f"data recorded by recording backend {record_to} (time_in_steps={time_in_steps})")
print(data)
|
sanjayankur31/nest-simulator
|
pynest/examples/recording_demo.py
|
Python
|
gpl-2.0
| 2,196
|
[
"NEURON"
] |
70683a197ce2dd4bc31086f3dc50102b30fd3cac6bc0954bf73ed665c9adf625
|
#!/usr/bin/python3
# encoding: utf-8
# Berreman4x4 example
# Authors: O. Castany, C. Molinaro
# Example of a cholesteric liquid crystal
import numpy, Berreman4x4
from numpy import sin, sqrt, abs
from Berreman4x4 import c, pi, e_y
import matplotlib.pyplot as pyplot
# Materials
glass = Berreman4x4.IsotropicNonDispersiveMaterial(1.55)
front = back = Berreman4x4.IsotropicHalfSpace(glass)
# Liquid crystal oriented along the x direction
(no, ne) = (1.5, 1.7)
Dn = ne-no
n_med = (ne + no)/2
LC = Berreman4x4.UniaxialNonDispersiveMaterial(no, ne) # ne along z
R = Berreman4x4.rotation_v_theta(e_y, pi/2) # rotation of pi/2 along y
LC = LC.rotated(R) # apply rotation from z to x
# Cholesteric pitch (m):
p = 0.65e-6
# One half turn of a right-handed helix:
TN = Berreman4x4.TwistedMaterial(LC, p/2, angle=+pi, div=35)
# Inhomogeneous layer, repeated layer, and structure
IL = Berreman4x4.InhomogeneousLayer(TN)
N = 15 # number half pitch repetitions
h = N * p/2
L = Berreman4x4.RepeatedLayers([IL], N)
s = Berreman4x4.Structure(front, [L], back)
# Normal incidence:
Kx = 0.0
# Calculation parameters
lbda_min, lbda_max = 0.8e-6, 1.2e-6 # (m)
lbda_B = p * n_med
lbda_list = numpy.linspace(lbda_min, lbda_max, 100)
k0_list = 2*pi/lbda_list
############################################################################
# Analytical calculation for the maximal reflection
R_th = numpy.tanh(Dn/n_med*pi*h/p)**2
lbda_B1, lbda_B2 = p*no, p*ne
############################################################################
# Calculation with Berreman4x4
data = Berreman4x4.DataList([s.evaluate(Kx,k0) for k0 in k0_list])
T_pp = data.get('T_pp')
T_ps = data.get('T_ps')
T_ss = data.get('T_ss')
T_sp = data.get('T_sp')
# Transmission coefficients for incident unpolarized light:
T_pn = 0.5 * (T_pp + T_ps)
T_sn = 0.5 * (T_sp + T_ss)
T_nn = T_sn + T_pn
# Transmission coefficients for 's' and 'p' polarized light, with
# unpolarized measurement.
T_ns = T_ps + T_ss
T_np = T_pp + T_sp
###########################################################################
# Text output: eigenvalues and eigenvectors of the transmission matrix for
# a wavelength in the middle of the stop-band.
i = numpy.argmin(abs(lbda_list-lbda_B)) # index for stop-band center
T = data[i].T_ti # transmission matrix
eigenvalues, eigenvectors = numpy.linalg.eig(T)
numpy.set_printoptions(precision=3)
print("\nTransmission in the middle of the stop-band...\n")
print("Eigenvalues of the Jones transmission matrix:")
print(eigenvalues)
print("Corresponding power transmission:")
print(abs(eigenvalues)**2)
print("Corresponding eigenvectors:")
print(eigenvectors)
# Note: the transformation matrix to the eigenvector basis is
# B = numpy.matrix(eigenvectors), and the matrix B⁻¹ T B is diagonal.
print("Normalization to the 'p' componant:")
print(eigenvectors/eigenvectors[0,:])
print("Ratio 's'/'p':")
print(abs(eigenvectors[1,:]/eigenvectors[0,:]))
print("Complex angle (°) (+90°: L, -90°: R)")
print(180/pi*numpy.angle(eigenvectors[1,:]/eigenvectors[0,:]))
# We observe that the eigenvectors are nearly perfectly polarized circular waves
###########################################################################
# Jones matrices for the circular wave basis
# Right-circular wave is reflected in the stop-band.
# R_LR, T_LR close to zero.
R_RR = data.get('R_RR')
R_LR = data.get('R_LR')
T_RR = data.get('T_RR')
T_LR = data.get('T_LR')
# Left-circular wave is transmitted in the full spectrum.
# T_RL, R_RL, R_LL close to zero, T_LL close to 1.
T_LL = data.get('T_LL')
R_LL = data.get('R_LL')
############################################################################
# Plotting
fig = pyplot.figure()
ax = fig.add_subplot(
title="Right-handed Cholesteric Liquid Crystal, aligned along \n" +
"the $x$ direction, with {:.1f} helix pitches.".format(N/2.),
xlabel=r"Wavelength $\lambda_0$ (m)",
ylabel=r"Power transmission $T$ and reflexion $R$")
# Draw rectangle for λ ∈ [p·no, p·ne], and T ∈ [0, R_th]
rectangle = pyplot.Rectangle((lbda_B1,0), lbda_B2-lbda_B1, R_th, color='cyan')
ax.add_patch(rectangle)
ax.plot(lbda_list, R_RR, '--', label='R_RR')
ax.plot(lbda_list, T_RR, label='T_RR')
ax.plot(lbda_list, T_nn, label='T_nn')
ax.plot(lbda_list, T_ns, label='T_ns')
ax.plot(lbda_list, T_np, label='T_np')
ax.legend(loc='center right', bbox_to_anchor=(1.00, 0.50))
ax.set_ylim(0,1)
fmt = ax.xaxis.get_major_formatter()
fmt.set_powerlimits((-3,3))
s.drawStructure()
pyplot.show()
|
Berreman4x4/Berreman4x4
|
examples/cholesteric-example.py
|
Python
|
gpl-3.0
| 4,607
|
[
"CRYSTAL"
] |
cfb8a12536ad029966b98b5bb16906d01b61b6ab672d183c17ade64aba95aaff
|
# $HeadURL: $
''' Command
Base class for all commands.
'''
from DIRAC import gLogger, S_OK
__RCSID__ = '$Id: $'
class Command( object ):
'''
The Command class is a simple base class for all the commands
for interacting with the clients
'''
def __init__( self, args = None, clients = None ):
self.apis = ( 1 and clients ) or {}
self.masterMode = False
self.onlyCache = False
self.metrics = { 'failed' : [] }
self.args = { 'onlyCache' : False }
_args = ( 1 and args ) or {}
self.args.update( _args )
def doNew( self, masterParams = None ):
''' To be extended by real commands
'''
return S_OK( ( self.args, masterParams ) )
def doCache( self ):
''' To be extended by real commands
'''
return S_OK( self.args )
def doMaster( self ):
''' To be extended by real commands
'''
return S_OK( self.metrics )
def doCommand( self ):
''' To be extended by real commands
'''
if self.masterMode:
gLogger.verbose( 'doMaster')
return self.returnSObj( self.doMaster() )
gLogger.verbose( 'doCache' )
result = self.doCache()
if not result[ 'OK' ]:
return self.returnERROR( result )
# We may be interested on running the commands only from the cache,
# without requesting new values.
if result[ 'Value' ] or self.args[ 'onlyCache' ]:
return result
gLogger.verbose( 'doNew' )
return self.returnSObj( self.doNew() )
def returnERROR( self, s_obj ):
'''
Overwrites S_ERROR message with command name, much easier to debug
'''
s_obj[ 'Message' ] = '%s %s' % ( self.__class__.__name__, s_obj[ 'Message' ] )
return s_obj
def returnSObj( self, s_obj ):
'''
Overwrites S_ERROR message with command name, much easier to debug
'''
if s_obj[ 'OK' ]:
return s_obj
return self.returnERROR( s_obj )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
Sbalbp/DIRAC
|
ResourceStatusSystem/Command/Command.py
|
Python
|
gpl-3.0
| 2,153
|
[
"DIRAC"
] |
f88ddbc7fee5c99e22d6848b25b99bb1886c7f7626c9ec79459cf8bc3ce6002e
|
#!/usr/bin/env python
#-------------------------------------------------------------
# Author: Thomas Schwarzl <schwarzl@embl.de>
# With the help of: Christian Hauer <chauer@embl.de>
# Licenced under MIT Creative Licence
# Last change: 21 October 2014
#-------------------------------------------------------------
#-------------------------------------------------------------
# pip install numpy --user
import sys, os, re, csv, random, math, logging, shutil, gzip, traceback
import datetime, time
try:
import HTSeq
except Exception:
print "Please install the HTSeq framework e.g. like this"
print "pip install HTSeq"
print "pip install HTSeq --user"
os._exit(1)
from pybedtools import BedTool
from sortedcontainers import SortedSet
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from random import randint # can be removed in the real script
from itertools import islice
#-------------------------------------------------------------
class iCLIP:
data = {}
features = {}
reads = {} #probably to delete
stats = {}
sites_unique = {}
sites = {}
stranded = True
bamfile = ""
gtfname = ""
data = {}
datadf = {}
verbose = False
deletionRatio = 0
minDeletions = 1
halfwindow = 100
sortOutput = True
primary = False
filterExons = True
maxReadIntervalLength = 0
minAlignmentQuality = 0
maxReadLength = 0
minReadLength = 0
sites_unique = 0
files = {}
readLength = {}
coverage = {}
# Constructor
# Reads all the options and creates the data structures needed
def __init__(self, options):
if hasattr(options, 'bamfile'):
self.bamfile = options.bamfile
self.outputDir = options.outputDir
self.bedOutputDir = os.path.join(options.outputDir, 'bed')
if hasattr(options, 'maxReadIntervalLength'):
self.maxReadIntervalLength = options.maxReadIntervalLength
if hasattr(options, 'minAlignmentQuality'):
self.minAlignmentQuality = options.minAlignmentQuality
self.stranded = options.stranded
self.verbose = options.verbose
if hasattr(options, 'deletionRatio'):
self.deletionRatio = options.deletionRatio
if hasattr(options, 'minDeletions'):
self.minDeletions = options.minDeletions
if hasattr(options, 'gtfname'):
self.gtfname = options.gtfname
if hasattr(options, 'halfwindow'):
self.halfwindow = options.halfwindow
if hasattr(options, 'sortOutput'):
self.sortOutput = options.sortOutput
if hasattr(options, 'primary'):
self.primary = options.primary
if hasattr(options, 'filterExons'):
self.filterExons = options.filterExons
self.features = { 'exons': HTSeq.GenomicArrayOfSets( "auto", stranded=self.stranded ) ,
'introns': HTSeq.GenomicArrayOfSets( "auto", stranded=self.stranded ) }
#self.reads = HTSeq.GenomicArrayOfSets("auto", stranded=self.stranded)
self.stats = {'total_reads': 0,
'above quality criteria': 0,
'reads with deletions': 0,
'reads with mismatches': 0,
'reads with insertions': 0,
'total deletions': 0,
'total insertions': 0,
'total mismatches': 0,
'average deletion length': 0,
'average insertion length': 0,
'average mismatch length': 0,
'maxReadInterval': self.maxReadIntervalLength,
'minAlignmentQuality': self.minAlignmentQuality}
# create the output dirs
self.createDir(self.outputDir)
self.createDir(self.bedOutputDir)
# =====================================================================================
# Destructor closes the file connections
def __exit__(self, type, value, traceback):
for key, file in self.files:
file.close()
# =====================================================================================
def run(self):
# calculateDistancesFromReads
self.calculateDistancesFromReads()
self.calculateDistancesFromSites()
# =====================================================================================
# The Read lengths will be determined, the starting positions
# of all reads and the positions of deletions
def readBamFile(self):
# initiate file handlers for the deletion and insertion bed
self.files['deletion'] = gzip.open(os.path.join(self.bedOutputDir, 'cims-deletions.bed.gz'), 'w')
self.files['insertion'] = gzip.open(os.path.join(self.bedOutputDir, 'cims-insertions.bed.gz'), 'w')
self.files['reads'] = gzip.open(os.path.join(self.bedOutputDir, 'cims-reads.bed.gz'), 'w')
# get a BAM Reader from HTSeq
almnt_file = HTSeq.BAM_Reader( self.bamfile )
print "Reading the Bam File"
# Read through the BAM file and retrieve aligned reads
for almnt in almnt_file:
# Counts the read
self.stats['total_reads'] += 1
if self.stats['total_reads'] % 10000 == 0:
print "Read in " + str(self.stats['total_reads']) + " reads"
if self.readFullfillsQualityCriteria(almnt):
# Counts the read if it is above a certain quality criteria
self.stats['above quality criteria'] += 1
# Calculate read length and also the minimum and maximum read length
readLength = self.calcMinMax(almnt)
# Calculate the adapter length and add it to a table: adapter length versus read length
self.increaseDataCount('adapters', self.getAdapterSequence(almnt.read.name), readLength)
# Calculate the starting position of the read and store it in self.sites
self.increaseSiteCount('start', self.determineStartSite(almnt.iv), almnt.read.name)
# Calculate the middle position of the read and store it in self.sites
self.increaseSiteCount('middle', self.determineMiddleSite(almnt.iv), almnt.read.name)
# Calculate the end position of the read and store it in self.sites
self.increaseSiteCount('end', self.determineEndSite(almnt.iv), almnt.read.name)
# Calculate the position of insertions, deletion and mismatches and store them in self.sites
self.addCigarInformation(almnt, readLength)
# Calculate the cross link site - this will be in future either the start site
# or for example a deletion site if available
# self.increaseSiteCount('crosslink', self.determineCrosslinkSite(almnt.iv), almnt.read.name)
# Create a coverage plot and store it in self.sites
# self.increaseSiteCount('coverage', almnt.iv, almnt.read.name)
# Store the individual reads in self.reads
#self.addRead(almnt, readLength)
# Stores the sequence Frequency per base
self.countSequenceFrequencies(almnt.read.seq, readLength)
# Write the Read as CIMS bed file
self.writeReadToCIMSBed(almnt)
# store the read length
self.readLength[almnt.read.name] = readLength
# stores the read counts for read length
self.increaseDataCount('readcount', readLength, "counts")
# Do some stats calculations after all reads have been read.
self.calculateReadStats()
#-------------------------------------------------------------
# This function calculates the distances of a read
# to certain features. Therefore it will run through all the
# reads of the BAM file again. This could be also stored in
# the memory if that would be faster and memory not an issue
def calculateDistancesFromReads(self):
# Looping through the reads again and calculating the distances
print "Calculating Distances From Reads"
# get a HTSeq bam file reader
almnt_file = HTSeq.BAM_Reader( self.bamfile )
i = 1
# run through the aligned reads
for almnt in almnt_file:
i += 1
if i % 10000 == 0:
print "%s reads processed" % i
# if the criteria is fullfilled
if self.readFullfillsQualityCriteria(almnt):
# calculate the features of the read
readLength = self.getSequenceLength(almnt)
startSite = self.determineStartSite(almnt.iv)
middleSite = self.determineMiddleSite(almnt.iv)
endSite = self.determineEndSite(almnt.iv)
# calculate the distance of the read to start certain features.
self.doCalculateDistance(startSite, 'start', readLength, 'start-to-start')
self.doCalculateDistance(startSite, 'deletions', readLength, 'start-to-deletion')
self.doCalculateDistance(middleSite, 'deletions', readLength, 'middle-to-deletion')
self.doCalculateDistance(endSite, 'deletions', readLength, 'end-to-deletion')
self.doCalculateDistance(startSite, 'insertions', readLength, 'start-to-insertion')
self.doCalculateDistance(middleSite, 'insertions', readLength, 'middle-to-insertion')
self.doCalculateDistance(endSite, 'insertions', readLength, 'end-to-insertion')
#-------------------------------------------------------------
# This function calculates the distance from features to other
# features. So e.g. from one deletion site to another deletion
# site.
def calculateDistancesFromSites(self):
print "Calculating Distances From Sites"
#for (start, value) in self.sites['start'].steps():
# if len(value) > 0:
# self.doCalculateDistance(start, 'start', value, 'start-to-start')
for (exonIntron, value) in self.sites['exon-intron'].steps():
if len(value) > 0:
self.doCalculateDistance(exonIntron, 'deletions', value, 'exon-intron-to-deletion')
self.doCalculateDistance(exonIntron, 'insertions', value, 'exon-intron-to-insertion')
self.doCalculateDistance(exonIntron, 'start', value, 'exon-intron-to-start')
self.doCalculateDistance(exonIntron, 'end', value, 'exon-intron-to-end')
self.doCalculateDistance(exonIntron, 'middle', value, 'exon-intron-to-middle')
self.doCalculateCoverage(exonIntron, value, 'exon-intron-coverage')
for (intronExon, value) in self.sites['intron-exon'].steps():
if len(value) > 0:
self.doCalculateDistance(intronExon, 'deletions', value, 'intron-exon-to-deletion')
self.doCalculateDistance(intronExon, 'insertions', value, 'intron-exon-to-insertion')
self.doCalculateDistance(intronExon, 'start', value, 'intron-exon-to-start')
self.doCalculateDistance(intronExon, 'end', value, 'intron-exon-to-end')
self.doCalculateDistance(intronExon, 'middle', value, 'intron-exon-to-middle')
self.doCalculateCoverage(intronExon, value, 'intron-exon-coverage')
for (geneStart, value) in self.sites['gene-start'].steps():
if len(value) > 0:
self.doCalculateDistance(geneStart, 'deletions', value, 'gene-start-to-deletion')
self.doCalculateDistance(geneStart, 'insertions', value, 'gene-start-to-insertion')
for (geneEnd, value) in self.sites['gene-end'].steps():
if len(value) > 0:
self.doCalculateDistance(geneEnd, 'deletions', value, 'gene-end-to-deletion')
self.doCalculateDistance(geneEnd, 'insertions', value, 'gene-end-to-insertion')
for (deletion, value) in self.sites['deletions'].steps():
if len(value) > 0:
self.doCalculateDistance(deletion, 'deletions', value, 'deletion-to-deletions')
self.doCalculateDistance(deletion, 'insertions', value, 'deletion-to-insertions')
self.doCalculateDistance(deletion, 'start', value, 'deletion-to-start')
self.doCalculateDistance(deletion, 'middle', value, 'deletion-to-middle')
self.doCalculateDistance(deletion, 'end', value, 'deletion-to-end')
for (insertion, value) in self.sites['insertions'].steps():
if len(value) > 0:
self.doCalculateDistance(insertion, 'insertions', value, 'insertion-to-insertions')
# calculate the ratios
#self.doCalculateDeletionRatio('exon-intron-to-deletion', 'exon-intron-coverage', offset=1)
#self.doCalculateDeletionRatio('exon-intron-to-deletion', 'exon-intron-coverage', offset=1)
# #-------------------------------------------------------------
# # calculate the deletion rate per base
# def doCalculateDeletionRatio(self, mutationKey, coverageKey, offset=0):
# for dist, readLengthDict in self.data[mutationKey]:
# for readLength, counts in readLengthDict:
# val = offset
# try:
# val = self.data[coverageKey][dist][readLength]
# except IndexError:
# val = offset
#
# if val <= 0:
# logging.warning("Offset ")
# int(dist)
#
# # TODO 1) finish the offset calculation
# 2) check if the distances are calculated correctly
#
#
# # deletions are not counted in the coverage!
#
#
# for i in self.data['exon-intron-to-deletion']:
# pass
#-------------------------------------------------------------
# This function calculates Distances from a site
# site: HTSeq.GenomicPosition of the site
# sitekey: look for sites for distance calculation in that specific container
# value: ?
# datakey: name of the data storage for this distance
def doCalculateDistance(self, site, sitekey, value, datakey, min=False):
# print "doCalculateDistance %s %s %s %s " % (str(site), sitekey, marker, datakey)
# if min == True it will only return one item
# returns list of tuples: distance to readLength
distList = self.calcDistance(sitekey, site, value, min)
if len(distList) == 0:
self.increaseDataCount(datakey, "unknown", "unknown")
else:
for dist, readLength in distList:
#print "Storing %s and with read length %s" % (dist, readLength)
self.increaseDataCount(datakey, str(dist), readLength)
#-------------------------------------------------------------
# get the background coverage for the window
def doCalculateCoverage(self, eventSite, eventValue, datakey):
window = HTSeq.GenomicInterval(eventSite.chrom,
max(0, eventSite.start - self.halfwindow),
eventSite.end + self.halfwindow,
eventSite.strand)
# TODO it does not work
# File "/g/hentze/projects/iCLIP/BindingSites/src/iclipper/lib/iCLIP.py", line 346, in doCalculateCoverage
# for interval, value in cov[window].steps():
# TypeError: string indices must be integers, not HTSeq._HTSeq.GenomicInterval
# # returns genomic interval (interval) and a set (values)
# for readLength, cov in self.coverage:
# for interval, value in cov[window].steps():
# # get the distance from the start to the middle of the event
# dist = math.floor(interval.start - (eventSite.start + eventSite.end) / 2)
#
# # add the interval to the data table
# for i in range(0, interval.length - 1):
# self.increaseDataCount(datakey, dist + i, readLength, value)
#-------------------------------------------------------------
# Stores the nucleotide frequencies per read length
def countSequenceFrequencies(self, sequence, readLength):
pos = 1
for nucleotide in sequence:
self.increaseDataCount("nucleotides-" + str(readLength), nucleotide, str(pos))
self.increaseDataCount("nucleotides-all", nucleotide, str(pos))
pos += 1
#-------------------------------------------------------------
def calcDistance(self, sitekey, eventSite, eventValue, min=False):
retList = []
#print "[[[ %s, %s, %s ]]]" % (sitekey, eventSite, eventValue)
# Get the window for the specific site and get all features in there as GenomicArray
# Then we iterate over this Genomic array which gives back an interval and a value
# The value corresponds to how many of these events occur at the site
#print "Strand: %s eventsite %s eventvalue %s" % (eventSite., eventSite.end, eventSite, eventValue)
window = HTSeq.GenomicInterval(eventSite.chrom,
max(0, eventSite.start_d - self.halfwindow),
eventSite.end_d + self.halfwindow,
eventSite.strand)
defaultValue = self.halfwindow + 1
# iterable
i = defaultValue
# returns genomic interval (interval) and a set (values)
for interval, values in self.sites[sitekey][window].steps():
# the sets can be empty, therefor check and
# go through every taken site
if len(values) > 1:
# strings are returned when there is only one value, avoid this
# by creating an iterable element (list) manually
if type(values) is str:
values = [ values ]
#print "VALUES: %s " % values
# now iterate through all the sites
for val in values:
# calculate the distance for each value which is not on the same read.
if val != eventValue:
# get the read length
readLength = self.readLength[val]
# calculate distance
dist = math.floor((interval.start_d + interval.end_d) / 2 - (eventSite.start_d + eventSite.end_d) / 2)
#print " === distance %s" % dist
# if the minimum is seeked, just look for the first unique one, since everything is ordered anyway
if min:
# if the stored is smaller than the current one, then jump out of the loop and return the stored
# value
if abs(i) > dist:
return [(dist, readLength)]
# else store this values as new value
else:
i = dist
else:
retList.append((dist, readLength))
# if there is no read greater downstream, then take the best upstream
if min and abs(i) < defaultValue:
retList.append((i, readLength))
return retList
#-------------------------------------------------------------
# Gets the steps for an interval in a site GenomicArray
# which can be indexed by sitekey
# the start is set so it cannot be a negativ number
def getSitesInWindow(self, sitekey, startSite, halfwindow):
#print "%s Start %s, Site: %s, Stop %s" % (startSite.strand, max(0, startSite.start - halfwindow), startSite.start, startSite.end + halfwindow)
return self.sites[sitekey][ HTSeq.GenomicInterval(startSite.chrom, max(0, startSite.start - halfwindow), startSite.end + halfwindow, startSite.strand) ]
#-------------------------------------------------------------
# Filter Deletions:
# Adds a GenomicArray to sites under key 'selected-deletions'
# with a minimum of 'options.minDeletions' reads and
# a ratio of 'options.deletionRatio' deletions to
# background
def filterDeletions(self):
deletionRatio = self.deletionRatio
for iv, value in self.sites['deletions'].steps():
if value >= self.minDeletions:
#print "----"
#print sites['coverage'][iv]
# TODO: Select for ratio
self.sites['selected-deletions'][iv] += value
#-------------------------------------------------------------
# Parse Cigar string and add to the statistics
def addCigarInformation(self, almnt, readLength):
variations = self.parseCigar(almnt)
if len(variations['deletions']) > 0:
self.increaseStats('reads with deletions')
if len(variations['deletions']) > 0:
self.increaseStats('reads with insertions')
for variation in variations['deletions']:
self.increaseStats('total deletions')
self.increaseStats('average deletion length', variation.ref_iv.length)
self.increaseSiteCount('deletions', variation.ref_iv, almnt.read.name)
self.increaseDataCount('deletions', variation.ref_iv.length, readLength)
self.addPositionsOnRead('deletion', variation.query_from, variation.query_to + 1, readLength, almnt)
for variation in variations['insertions']:
self.increaseStats('total insertions')
self.increaseStats('average insertion length', variation.ref_iv.length)
self.increaseSiteCount('insertions', self.getGenomicIntervalWithEndOffset(variation.ref_iv, 1), almnt.read.name)
self.increaseDataCount('insertions', variation.ref_iv.length, readLength)
self.addPositionsOnRead('insertion', variation.query_from, variation.query_to, readLength, almnt)
for variation in variations['hits']:
# add to coverage plot
self.addCoverage(variation.ref_iv, readLength)
#-------------------------------------------------------------
def getGenomicIntervalWithEndOffset(self, interval, offset):
if interval.strand == "-":
return(HTSeq.GenomicInterval(interval.chrom, interval.start, interval.end + 1, interval.strand))
else:
return(HTSeq.GenomicInterval(interval.chrom, interval.start - 1, interval.end, interval.strand))
#-------------------------------------------------------------
def addPositionsOnRead(self, key, query_from, query_to, readLength, almnt):
self.writeVariationToCIMSBed(key, query_from, query_to, almnt)
for i in range(query_from, query_to):
self.increaseDataCount(str(key) + "-sites-perbase", almnt.read.seq[i-1], readLength)
self.increaseDataCount(str(key) + "-sites", i, readLength)
#-------------------------------------------------------------
# This function write a Bed file need for the CIMS/CITS analysis
def writeVariationToCIMSBed(self, key, query_from, query_to, almnt):
self.files[key].write(str(almnt.iv.chrom) + "\t" + str(almnt.iv.start) + "\t" + str(almnt.iv.end) + "\t" + str(almnt.read.name) + "\t" + str(query_from) + "\t" + str(almnt.iv.strand) + "\n")
#-------------------------------------------------------------
# This function write a Bed file need for the CIMS/CITS analysis
def writeReadToCIMSBed(self, almnt):
self.files['reads'].write(str(almnt.iv.chrom) + "\t" + str(almnt.iv.start) + "\t" + str(almnt.iv.end) + "\t" + str(almnt.read.name) + "\t" + str(1) + "\t" + str(almnt.iv.strand) + "\n")
#-------------------------------------------------------------
# This method determines if a read fullfills the critera to be included in the analysis
def readFullfillsQualityCriteria(self, almnt):
if almnt.paired_end and almnt.pe_which == "second":
return False
else:
return(almnt.aligned and
almnt.iv.length <= self.maxReadIntervalLength and
almnt.aQual >= self.minAlignmentQuality and
not almnt.failed_platform_qc and # SAM flag 0x0200
self.primaryFilter(almnt))
#-------------------------------------------------------------
# If the primary filter is activated (option primary is set),
# for multimapping reads it will filter out the best location
# using the SAM flag 0x0100
def primaryFilter(self, almnt):
if self.primary:
return(almnt.not_primary_alignment)
else:
return(True)
# ------------------------------------------------------------
# Print verbose.
# Prints the message
def printv(self, string):
if self.verbose == True:
print "%s" % string
#-------------------------------------------------------------
def increaseStats(self, key, value=1):
self.stats[key] += value
#-------------------------------------------------------------
# increases the data count by value
def increaseDataCount(self, key, x, y, value=1):
if key not in self.data:
self.data[key] = { x: { y: value }}
else:
try:
self.data[key][x][y] += value
except KeyError:
try:
self.data[key][x][y] = value
except KeyError:
self.data[key][x] = { y: value }
#-------------------------------------------------------------
# increases the count of a site type by a given value
def increaseSiteCount(self, key, pos, value=1):
s = self.sites.get(key, HTSeq.GenomicArrayOfSets("auto", stranded=self.stranded)) #, storage="memmap")
if (pos.end - pos.start) <= 0:
logging.warning("Wanted to add site count " + key + " for " + pos.chrom + ":" + str(pos.end) + " - " + str(pos.start) + " with value " + str(value))
else:
try:
s[pos] = value
self.sites[key] = s
except IndexError:
logging.warning("IndexError when adding " + str(pos) + " value " + str(value) + " to key " + str(key))
#-------------------------------------------------------------
# increases the count of a site type by a given value
def addCoverage(self, interval, readLength, value=1):
readLength = str(readLength)
s = self.coverage.get(readLength, HTSeq.GenomicArray("auto", stranded=self.stranded, typecode='i')) #, storage="memmap")
try:
#print "Adding %s with value %s " % (interval, value)
s[interval] += value
self.coverage[readLength] = s
except IndexError, KeyError:
logging.warning("IndexError when adding coverage " + str(interval) + " value " + str(value) + " to key " + readLength)
# #-------------------------------------------------------------
# # adds a read
# def addCoverage(self, almnt):
# if (almnt.iv.end - almnt.iv.start) > 0:
# self.reads[almnt.iv] += almnt.read.name
#-------------------------------------------------------------
# returns a list of GenomeIntervals for the positions of
# deletions.
def parseCigar(self, almnt):
variations = { 'deletions': list(),
'insertions': list(),
'hits': list() }
for i in almnt.cigar:
if i.type == 'D':
variations['deletions'].append(i)
elif i.type == 'I':
variations['insertions'].append(i)
elif i.type == 'M':
variations['hits'].append(i)
return(variations)
#-------------------------------------------------------------
# returns GenomicPosition for start site
def determineStartSite(self, iv):
return(HTSeq.GenomicPosition(iv.chrom, iv.start_d, iv.strand))
#-------------------------------------------------------------
# returns GenomicPosition for end site
def determineEndSite(self, iv):
return(HTSeq.GenomicPosition(iv.chrom, iv.end_d, iv.strand))
#-------------------------------------------------------------
# returns GenomicPosition for middle site
# TODO: Check how this performs with gapped aligments
def determineMiddleSite(self, iv):
return(HTSeq.GenomicPosition(iv.chrom, round((iv.start_d + iv.end_d) / 2), iv.strand))
#-------------------------------------------------------------
# TODO: determine the crosslink site, means a basepair down of the start site
def determineCrosslinkSite(self, iv):
pos = 0
if(self.stranded):
if(iv.strand == "+"):
pos = iv.start_d - 1
elif(iv.strand == "-"):
pos = iv.start + 1
else:
raise("Strand not known %s" % iv.strand)
else:
pos = iv.start_d - 2
return(HTSeq.GenomicPosition(iv.chrom, pos, iv.strand))
#-------------------------------------------------------------
# this will convert the data structure to an output data
# structure
def convertDataToDataFrame(self):
for i in self.data:
self.datadf[i] = pd.DataFrame(self.data[i]).T.fillna(0)
#-------------------------------------------------------------
# sorts the data frame so the output will look nice
def sortData(self):
for key in self.datadf:
self.datadf[key] = self.datadf[key].sort_index(axis=0, ascending=True)
self.datadf[key] = self.datadf[key].sort_index(axis=1, ascending=True)
#-------------------------------------------------------------
# Write the Results to OutputDirectory and copies the
# R script there, as well as executes it
def writeOutput(self):
print "Writing Output:"
print "# Converting data to matrices and sorting"
self.convertDataToDataFrame()
if self.sortOutput:
self.sortData()
print "# Writing data tables"
for keys in self.datadf:
self.writeDataToFile(keys)
print "# Writing the stats"
self.writeStatsToFile(self.outputDir + '/stats.csv')
print "# Writing Bed Graphs"
for keys in self.sites:
self.writeBedGraph(keys)
print "# Copying the R file into the folder"
# TODO
# rfile = os.path.join('lib', 'iclipper.R')
rfile = '/g/hentze/projects/iCLIP/BindingSites/src/iclipper/iclipper.R'
if os.path.exists(rfile):
#assert not os.path.isabs(rfile)
dstdir = os.path.join(self.outputDir, 'iclipper.R')
shutil.copy(rfile, dstdir)
#-------------------------------------------------------------
def writeStatsToFile(self, statsname):
w = csv.writer(open(statsname, "w"))
for key, val in self.stats.items():
w.writerow([key, str(val)])
#-------------------------------------------------------------
def getSequenceLength(self, almnt):
return(len(almnt.read.seq))
#-------------------------------------------------------------
# calculates the read length and stores the min and max length
def calcMinMax(self, almnt):
length = self.getSequenceLength(almnt)
if not (self.maxReadLength == 0 and self.minReadLength == 0):
if length > self.maxReadLength:
self.maxReadLength = length
if length < self.minReadLength:
self.minReadLength = length
return(length)
#-------------------------------------------------------------
# Extract the adapter sequence of a read name.
# Here, we presume that it is the character string at the end of the name
def getAdapterSequence(self, readname ):
adapter = ""
m = re.search('[A-Z]+$', readname)
if m:
return(m.group(0))
else:
return("unknown")
#-------------------------------------------------------------
# Calculate read statistics
def calculateReadStats(self):
if self.stats['total deletions'] > 0:
self.stats['average deletion length'] = self.stats['average deletion length'] / self.stats['total deletions']
if self.stats['total insertions'] > 0:
self.stats['average insertion length'] = self.stats['average insertion length'] / self.stats['total insertions']
#-------------------------------------------------------------
# Write the table from data to the output directory
def writeDataToFile(self, slot):
file = open(self.outputDir + "/data_" + slot + ".txt", "w")
file.write(self.datadf[slot].to_string())
file.close()
#-------------------------------------------------------------
# Write Bed Graph
def writeBedGraph(self, index):
if self.stranded:
self.getGenomicArrayFromSet(self.sites[index]).write_bedgraph_file(os.path.join(self.outputDir, "bed", "sites_" + index + "_plus.bed"), strand="+", track_options="")
self.getGenomicArrayFromSet(self.sites[index]).write_bedgraph_file(os.path.join(self.outputDir, "bed", "sites_" + index + "_minus.bed"), strand="-", track_options="")
else:
self.getGenomicArrayFromSet(self.sites[index]).write_bedgraph_file(os.path.join(self.outputDir, "bed", "sites_" + index + ".bed"), strand=".", track_options="")
#-------------------------------------------------------------
def getGenomicArrayFromSet(self, gas):
ga = HTSeq.GenomicArray("auto", typecode='i', stranded=self.stranded)
for interval, values in gas.steps():
if len(values) > 0:
ga[interval] = len(values)
return ga
#-------------------------------------------------------------
def plotOutput(self):
outputDir = self.outputDir
data = self.data
print "# Plotting table for Exon Distance"
# Plot for Exon Distances
key = 'exondist'
df = data[key]
plt.plot(df.sum(axis=1))
#plt.axis([0, 6, 0, 20])
plt.grid(True)
plt.xlabel('Read Length')
plt.savefig(outputDir + '/' + key + '.png', bbox_inches='tight')
# Plot for Intron Distances
key = 'introndist'
df = data[key]
plt.plot(df.sum(axis=1))
#ax.legend(handles, labels)
plt.grid(True)
plt.xlabel('Read Length')
plt.savefig(outputDir + '/' + key + '.png', bbox_inches='tight')
# Plot for Adapters
#key = 'adapters'
#df = data[key]
#plt.plot(df.sum(axis=0))
#plt.grid(True)
#plt.xlabel('Adapters')
#plt.savefig(outputDir + '/' + key + '.png', bbox_inches='tight')
#print "# Plotting table for Intron Distance"
#plt.plot( np.arange( -halfwinwidth, halfwinwidth ), profile )
#plt.savefig(outputDir + '/foo.png', bbox_inches='tight')
#-------------------------------------------------------------
def createDir(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
#-------------------------------------------------------------
# load gtf or gff genome file of reference organism (Homo sapiens, Mus musculus) and search for exons #################
# NOTE: for introns the gtf file has to be modified first
def processGTF(self, gtfname):
print "Processing the GTF file %s" % gtfname
gtffile = HTSeq.GFF_Reader( gtfname )
for feature in gtffile:
if feature.type == "exon" or feature.type == "exonic":
self.features['exons'][ feature.iv ] += feature
if feature.type == "intron" or feature.type == "intronic":
self.features['introns'][ feature.iv ] += feature
#-------------------------------------------------------------
def calcBedToFeature(self, bedname, featureUpstream='exons', featureDownstream='exons', window=399):
print "Bed"
bedreader = HTSeq.BED_Reader(bedname)
for bed in bedreader:
exon3ps = set( [ f.iv.end_d for f in self.features[featureUpstream][ bed.iv.start_d_as_pos ] ] )
exon5ps = set( [ f.iv.start_d for f in self.features[featureDownstream][ bed.iv.start_d_as_pos ] ] )
for e3p in exon3ps:
dist = abs( e3p - bed.iv.end_d ) + abs(bed.iv.end_d - bed.iv.start_d)/2
if len( exon3ps ) == 1:
self.increaseDataCount('bed-' + featureUpstream + '-' + featureDownstream, str( -min( dist, window ) ), "bed")
for e5p in exon5ps:
dist = abs( e5p - bed.iv.start_d ) + abs(bed.iv.end - bed.iv.start)/2
if len( exon5ps ) == 1:
self.increaseDataCount('bed-' + featureUpstream + '-' + featureDownstream, str( min( dist, window ) ), "bed")
#-------------------------------------------------------------
def readExonGTF(self, gtfname):
# store the file name
print "Reading GTF file %s " % gtfname
# open a gff reader connection with htseq
gff = HTSeq.GFF_Reader(gtfname)
# iterate through the features of the gff file and extract the exons corresponding
# to one sigle gene
for geneID, exonList in self.bundleByGene( self.getExonsOnly( gff ) ):
geneGenomicArray = HTSeq.GenomicArray( "auto", typecode="b", stranded=self.stranded )
geneStart = 1e30
geneEnd = 0
#print "-------------------------"
# dict stores all exon ends to the start sites to filter out exons with
# multiple start and end sites
exonDict = {}
# for each exon of a gene
for exon in exonList:
geneGenomicArray[exon.iv] = True
geneStart = min( geneStart, exon.iv.start )
geneEnd = max( geneEnd, exon.iv.end )
#exonDict[exon.iv.chrom exon.iv.start] = exon.iv.end
#print "GeneStart %s" % geneStart
#print "GeneEnd %s " % geneEnd
geneInterval = HTSeq.GenomicInterval( exon.iv.chrom, geneStart, geneEnd, exon.iv.strand)
# save the gene start and the gene end
self.increaseSiteCount('gene-start', geneInterval.start_d_as_pos, geneID)
self.increaseSiteCount('gene-end', geneInterval.end_d_as_pos, geneID)
#print "[ %s ] ------- " % geneID
mem = True
#print geneGenomicArray
i = 1
e = 1
for interval, isExon in geneGenomicArray[ geneInterval ].steps():
# print '%s -- %s ' % (interval, isExon)
if isExon:
#add = True
#if self.filterExons:
#if
#TODO Filter Exons
# print "exon %s " % str(interval)
self.features['exons'][interval] = str(geneID) + "-e" + str(e)
e += 1
## add intron exon site
#if(geneStart == interval.start):
# print "first Exon"
#if(geneEnd == interval.end):
# print "last Exon"
if mem:
mem = False
else:
sys.os._exit(1)
else:
# Stores the correct exon intron site (site on the intron)
startPos = 0
endPos = 0
self.increaseSiteCount('exon-intron', interval.start_d_as_pos, geneID)
# Sotres the correct specific intron exon site
self.increaseSiteCount('intron-exon', interval.end_d_as_pos, geneID)
#print "exon-intron"
#print interval.start_d_as_pos
#print "intron-exon"
#print interval.end_d_as_pos
#print "intron %s " % str(interval)
#print "exon-intron %s " % str(interval.start_d_as_pos)
#print "intron-exon %s " % str(interval.end_d_as_pos)
self.features['introns'][interval] = str(geneID) + "-i" + str(i)
i += 1
if not mem:
mem = True
else:
sys.os._exit(1)
#-------------------------------------------------------------
def getTimeStamp(self):
return time.strftime("%c")
#-------------------------------------------------------------
# returns exons Only
def getExonsOnly( self, features ):
for feature in features:
if feature.type == "exon":
yield feature
#-------------------------------------------------------------
# Bundles the features by gene
def bundleByGene( self, features ):
currentID = None
genelist = []
for feature in features:
if feature.attr["gene_id"] == currentID:
genelist.append( feature )
else:
if currentID is not None:
yield ( currentID, genelist )
genelist = [ feature ]
currentID = feature.attr["gene_id"]
yield(currentID, genelist)
|
Distue/iclipper
|
lib/iCLIP.py
|
Python
|
mit
| 41,846
|
[
"HTSeq"
] |
f7d25259f783d2e4f013c5939d868c7993e88eaaf6adfb46399f64116ebaf1fe
|
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
datatypes_repository_name = 'emboss_datatypes_0020'
datatypes_repository_description = "Galaxy applicable data formats used by Emboss tools."
datatypes_repository_long_description = "Galaxy applicable data formats used by Emboss tools. This repository contains no tools."
emboss_repository_description = 'Galaxy wrappers for Emboss version 5.0.0 tools'
emboss_repository_long_description = 'Galaxy wrappers for Emboss version 5.0.0 tools'
workflow_filename = 'Workflow_for_0060_filter_workflow_repository.ga'
workflow_name = 'Workflow for 0060_filter_workflow_repository'
filtering_repository_description = "Galaxy's filtering tool for test 0040"
filtering_repository_long_description = "Long description of Galaxy's filtering tool for test 0040"
category_0000_name='Test 0000 Basic Repository Features 1'
category_0001_name='Test 0000 Basic Repository Features 2'
category_0010_name='Test 0010 Repository With Tool Dependencies'
category_0020_name='Test 0020 Basic Repository Dependencies'
category_0030_name='Test 0030 Repository Dependency Revisions'
category_0040_name='test_0040_repository_circular_dependencies'
category_0050_name='test_0050_repository_n_level_circular_dependencies'
category_0060_name='Test 0060 Workflow Features'
'''
This script will run in one of two possible ways:
1. Directly, by calling sh run_functional_tests.sh -toolshed test/tool_shed/functional/test_1300_reset_all_metadata.py.
-OR-
2. After the previous test scripts have completed.
In the first case, it is desirable to have the Galaxy database in a state that is as close as possible to the state it would
be in following the second case. This means explicitly installing whatever repositories would be in an installed state following
the previous test scripts.
'''
base_datatypes_count = 0
repository_datatypes_count = 0
running_standalone = False
class TestResetInstalledRepositoryMetadata( ShedTwillTestCase ):
'''Verify that the "Reset selected metadata" feature works.'''
def test_0000_initiate_users( self ):
"""Create necessary user accounts."""
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % test_user_1_email
test_user_1_private_role = self.test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
admin_user_private_role = self.test_db_util.get_private_role( admin_user )
def test_0005_create_categories( self ):
'''Create the categories for the repositories in this test script.'''
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
category_0000 = self.create_category( name=category_0000_name, description='Test 0000 Basic Repository Features 1' )
category_0001 = self.create_category( name=category_0001_name, description='Test 0000 Basic Repository Features 2' )
category_0010 = self.create_category( name=category_0010_name, description='Tests for a repository with tool dependencies.' )
category_0020 = self.create_category( name=category_0020_name, description='Testing basic repository dependency features.' )
category_0030 = self.create_category( name=category_0030_name, description='Testing repository dependencies by revision.' )
category_0040 = self.create_category( name=category_0040_name, description='Testing handling of circular repository dependencies.' )
category_0050 = self.create_category( name=category_0050_name, description='Testing handling of circular repository dependencies to n levels.' )
category_0060 = self.create_category( name=category_0060_name, description='Test 0060 - Workflow Features' )
def test_0010_create_repositories_from_0000_series( self ):
'''Create repository filtering_0000 if necessary.'''
global repository_datatypes_count
global running_standalone
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
category = self.create_category( name=category_0000_name, description='' )
repository = self.get_or_create_repository( name='filtering_0000',
description="Galaxy's filtering tool",
long_description="Long description of Galaxy's filtering tool",
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ) )
if self.repository_is_new( repository ):
running_standalone = True
self.upload_file( repository,
filename='filtering/filtering_1.1.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded filtering 1.1.0 tarball.',
strings_displayed=[],
strings_not_displayed=[] )
self.upload_file( repository,
filename='filtering/filtering_2.2.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded filtering 2.2.0 tarball.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0015_create_repositories_from_0010_series( self ):
'''Create repository freebayes_0010.'''
category = self.create_category( name=category_0010_name, description='' )
repository = self.get_or_create_repository( name='freebayes_0010',
description="Galaxy's freebayes tool",
long_description="Long description of Galaxy's freebayes tool",
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
filename='freebayes/freebayes.xml',
filepath=None,
valid_tools_only=False,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded freebayes.xml.',
strings_displayed=[],
strings_not_displayed=[] )
self.upload_file( repository,
filename='freebayes/tool_data_table_conf.xml.sample',
filepath=None,
valid_tools_only=False,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded tool_data_table_conf.xml.sample',
strings_displayed=[],
strings_not_displayed=[] )
self.upload_file( repository,
filename='freebayes/sam_fa_indices.loc.sample',
filepath=None,
valid_tools_only=False,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded sam_fa_indices.loc.sample',
strings_displayed=[],
strings_not_displayed=[] )
self.upload_file( repository,
filename='freebayes/tool_dependencies.xml',
filepath=None,
valid_tools_only=False,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded tool_dependencies.xml',
strings_displayed=[],
strings_not_displayed=[] )
def test_0020_create_repositories_from_0020_series( self ):
'''Create repositories emboss_0020 and emboss_datatypes_0020 if necessary.'''
category = self.create_category( name=category_0020_name, description='' )
repository = self.get_or_create_repository( name='emboss_datatypes_0020',
description="Galaxy applicable data formats used by Emboss tools.",
long_description="Galaxy applicable data formats used by Emboss tools. This repository contains no tools.",
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
filename='emboss/datatypes/datatypes_conf.xml',
filepath=None,
valid_tools_only=False,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded datatypes_conf.xml.',
strings_displayed=[],
strings_not_displayed=[] )
repository = self.get_or_create_repository( name='emboss_0020',
description='Galaxy wrappers for Emboss version 5.0.0 tools',
long_description='Galaxy wrappers for Emboss version 5.0.0 tools',
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='emboss/emboss.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded emboss.tar',
strings_displayed=[],
strings_not_displayed=[] )
def test_0025_create_repositories_from_0030_series( self ):
'''Create repositories emboss_0030, emboss_5_0030, emboss_6_0030, and emboss_datatypes_0030.'''
category = self.create_category( name=category_0030_name, description='' )
datatypes_repository = self.get_or_create_repository( name='emboss_datatypes_0030',
description=datatypes_repository_description,
long_description=datatypes_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( datatypes_repository ):
self.upload_file( datatypes_repository,
filename='emboss/datatypes/datatypes_conf.xml',
filepath=None,
valid_tools_only=False,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded datatypes_conf.xml.',
strings_displayed=[],
strings_not_displayed=[] )
repository_datatypes_count = int( self.get_repository_datatypes_count( datatypes_repository ) )
emboss_5_repository = self.get_or_create_repository( name='emboss_5_0030',
description=emboss_repository_description,
long_description=emboss_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( emboss_5_repository,
filename='emboss/emboss.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded emboss.tar',
strings_displayed=[],
strings_not_displayed=[] )
repository_dependencies_path = self.generate_temp_path( 'test_0330', additional_paths=[ 'emboss', '5' ] )
dependency_tuple = ( self.url, datatypes_repository.name, datatypes_repository.user.username, self.get_repository_tip( datatypes_repository ) )
self.create_repository_dependency( repository=emboss_5_repository, repository_tuples=[ dependency_tuple ], filepath=repository_dependencies_path )
emboss_6_repository = self.get_or_create_repository( name='emboss_6_0030',
description=emboss_repository_description,
long_description=emboss_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( emboss_6_repository,
filename='emboss/emboss.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded emboss.tar',
strings_displayed=[],
strings_not_displayed=[] )
repository_dependencies_path = self.generate_temp_path( 'test_0330', additional_paths=[ 'emboss', '6' ] )
dependency_tuple = ( self.url, datatypes_repository.name, datatypes_repository.user.username, self.get_repository_tip( datatypes_repository ) )
self.create_repository_dependency( repository=emboss_6_repository, repository_tuples=[ dependency_tuple ], filepath=repository_dependencies_path )
emboss_repository = self.get_or_create_repository( name='emboss_0030',
description=emboss_repository_description,
long_description=emboss_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( emboss_repository,
filename='emboss/emboss.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded emboss.tar',
strings_displayed=[],
strings_not_displayed=[] )
repository_dependencies_path = self.generate_temp_path( 'test_0330', additional_paths=[ 'emboss', '5' ] )
dependency_tuple = ( self.url, emboss_5_repository.name, emboss_5_repository.user.username, self.get_repository_tip( emboss_5_repository ) )
self.create_repository_dependency( repository=emboss_repository, repository_tuples=[ dependency_tuple ], filepath=repository_dependencies_path )
dependency_tuple = ( self.url, emboss_6_repository.name, emboss_6_repository.user.username, self.get_repository_tip( emboss_6_repository ) )
self.create_repository_dependency( repository=emboss_repository, repository_tuples=[ dependency_tuple ], filepath=repository_dependencies_path )
def test_0030_create_repositories_from_0040_series( self ):
'''Create repositories freebayes_0040 and filtering_0040.'''
category = self.create_category( name=category_0040_name, description='' )
repository = self.get_or_create_repository( name='freebayes_0040',
description="Galaxy's freebayes tool",
long_description="Long description of Galaxy's freebayes tool",
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
filename='freebayes/freebayes.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded the tool tarball.',
strings_displayed=[],
strings_not_displayed=[] )
repository = self.get_or_create_repository( name='filtering_0040',
description=filtering_repository_description,
long_description=filtering_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='filtering/filtering_1.1.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded the tool tarball for filtering 1.1.0.',
strings_displayed=[],
strings_not_displayed=[] )
repository = self.test_db_util.get_repository_by_name_and_owner( 'freebayes_0040', common.test_user_1_name )
filtering_repository = self.test_db_util.get_repository_by_name_and_owner( 'filtering_0040', common.test_user_1_name )
repository_dependencies_path = self.generate_temp_path( 'test_1340', additional_paths=[ 'filtering' ] )
repository_tuple = ( self.url, repository.name, repository.user.username, self.get_repository_tip( repository ) )
self.create_repository_dependency( repository=filtering_repository, repository_tuples=[ repository_tuple ], filepath=repository_dependencies_path )
repository = self.test_db_util.get_repository_by_name_and_owner( 'filtering_0040', common.test_user_1_name )
freebayes_repository = self.test_db_util.get_repository_by_name_and_owner( 'freebayes_0040', common.test_user_1_name )
repository_dependencies_path = self.generate_temp_path( 'test_1340', additional_paths=[ 'freebayes' ] )
repository_tuple = ( self.url, repository.name, repository.user.username, self.get_repository_tip( repository ) )
self.create_repository_dependency( repository=freebayes_repository, repository_tuples=[ repository_tuple ], filepath=repository_dependencies_path )
def test_0035_create_repositories_from_0050_series( self ):
'''Create repositories emboss_0050, emboss_datatypes_0050, filtering_0050, freebayes_0050.'''
category = self.create_category( name=category_0050_name, description='' )
datatypes_repository = self.get_or_create_repository( name='emboss_datatypes_0050',
description="Datatypes for emboss",
long_description="Long description of Emboss' datatypes",
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( datatypes_repository ):
emboss_repository = self.get_or_create_repository( name='emboss_0050',
description="Galaxy's emboss tool",
long_description="Long description of Galaxy's emboss tool",
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
filtering_repository = self.get_or_create_repository( name='filtering_0050',
description="Galaxy's filtering tool",
long_description="Long description of Galaxy's filtering tool",
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
freebayes_repository = self.get_or_create_repository( name='freebayes_0050',
description="Galaxy's freebayes tool",
long_description="Long description of Galaxy's freebayes tool",
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( datatypes_repository,
filename='emboss/datatypes/datatypes_conf.xml',
filepath=None,
valid_tools_only=False,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded datatypes_conf.xml.',
strings_displayed=[],
strings_not_displayed=[] )
self.upload_file( emboss_repository,
filename='emboss/emboss.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded emboss.tar',
strings_displayed=[],
strings_not_displayed=[] )
self.upload_file( freebayes_repository,
filename='freebayes/freebayes.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded freebayes tarball.',
strings_displayed=[],
strings_not_displayed=[] )
self.upload_file( filtering_repository,
filename='filtering/filtering_1.1.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded filtering 1.1.0 tarball.',
strings_displayed=[],
strings_not_displayed=[] )
repository_dependencies_path = self.generate_temp_path( 'test_0350', additional_paths=[ 'emboss' ] )
dependency_tuple = ( self.url, datatypes_repository.name, datatypes_repository.user.username, self.get_repository_tip( datatypes_repository ) )
self.create_repository_dependency( repository=emboss_repository, repository_tuples=[ dependency_tuple ], filepath=repository_dependencies_path )
repository_dependencies_path = self.generate_temp_path( 'test_0350', additional_paths=[ 'filtering' ] )
dependency_tuple = ( self.url, emboss_repository.name, emboss_repository.user.username, self.get_repository_tip( emboss_repository ) )
self.create_repository_dependency( repository=filtering_repository, repository_tuples=[ dependency_tuple ], filepath=repository_dependencies_path )
repository_dependencies_path = self.generate_temp_path( 'test_0350', additional_paths=[ 'freebayes' ] )
dependency_tuple = ( self.url, filtering_repository.name, filtering_repository.user.username, self.get_repository_tip( filtering_repository ) )
self.create_repository_dependency( repository=emboss_repository, repository_tuples=[ dependency_tuple ], filepath=repository_dependencies_path )
dependency_tuples = [ ( self.url, datatypes_repository.name, datatypes_repository.user.username, self.get_repository_tip( datatypes_repository ) ),
( self.url, emboss_repository.name, emboss_repository.user.username, self.get_repository_tip( emboss_repository ) ),
( self.url, filtering_repository.name, filtering_repository.user.username, self.get_repository_tip( filtering_repository ) ),
( self.url, freebayes_repository.name, freebayes_repository.user.username, self.get_repository_tip( freebayes_repository ) ) ]
self.create_repository_dependency( repository=freebayes_repository, repository_tuples=dependency_tuples, filepath=repository_dependencies_path )
def test_0035_create_repositories_from_0060_series( self ):
category = self.create_category( name=category_0060_name, description='' )
workflow_repository = self.get_or_create_repository( name='filtering_0060',
description="Galaxy's filtering tool",
long_description="Long description of Galaxy's filtering tool",
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( workflow_repository ):
workflow = file( self.get_filename( 'filtering_workflow/Workflow_for_0060_filter_workflow_repository.ga' ), 'r' ).read()
workflow = workflow.replace( '__TEST_TOOL_SHED_URL__', self.url.replace( 'http://', '' ) )
workflow_filepath = self.generate_temp_path( 'test_0360', additional_paths=[ 'filtering_workflow' ] )
if not os.path.exists( workflow_filepath ):
os.makedirs( workflow_filepath )
file( os.path.join( workflow_filepath, workflow_filename ), 'w+' ).write( workflow )
self.upload_file( workflow_repository,
filename=workflow_filename,
filepath=workflow_filepath,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded filtering workflow.',
strings_displayed=[],
strings_not_displayed=[] )
self.upload_file( workflow_repository,
filename='filtering/filtering_2.2.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded filtering 2.2.0 tarball.',
strings_displayed=[],
strings_not_displayed=[] )
def test_9900_install_all_missing_repositories( self ):
'''Call the install_repository method to ensure that all required repositories are installed.'''
global repository_datatypes_count
global base_datatypes_count
global running_standalone
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
base_datatypes_count = int( self.get_datatypes_count() )
self.install_repository( 'filtering_0000', common.test_user_1_name, category_0000_name, strings_displayed=[] )
self.install_repository( 'freebayes_0010', common.test_user_1_name, category_0010_name, strings_displayed=[] )
self.install_repository( 'emboss_0020', common.test_user_1_name, category_0020_name, strings_displayed=[] )
self.install_repository( 'emboss_5_0030', common.test_user_1_name, category_0030_name, strings_displayed=[] )
self.install_repository( 'freebayes_0050', common.test_user_1_name, category_0050_name, strings_displayed=[] )
self.install_repository( 'filtering_0060', common.test_user_1_name, category_0060_name, strings_displayed=[] )
current_datatypes = int( self.get_datatypes_count() )
# If we are running this test by itself, installing the emboss repository should also install the emboss_datatypes
# repository, and this should add datatypes to the datatypes registry. If that is the case, verify that datatypes
# have been added, otherwise verify that the count is unchanged.
if running_standalone:
message = 'Installing emboss did not add new datatypes.\nFound: %d\nExpected: %d' % \
( current_datatypes, base_datatypes_count + repository_datatypes_count )
assert current_datatypes > base_datatypes_count, message
else:
assert current_datatypes == base_datatypes_count, 'Installing emboss added new datatypes.'
def test_9905_reset_metadata_on_all_repositories( self ):
'''Reset metadata on all repositories, then verify that it has not changed.'''
repository_metadata = dict()
repositories = self.test_db_util.get_all_installed_repositories( actually_installed=True )
for repository in repositories:
repository_metadata[ self.security.encode_id( repository.id ) ] = repository.metadata
self.reset_metadata_on_selected_installed_repositories( repository_metadata.keys() )
for repository in repositories:
self.test_db_util.ga_refresh( repository )
old_metadata = repository_metadata[ self.security.encode_id( repository.id ) ]
# When a repository with tools to be displayed in a tool panel section is deactivated and reinstalled,
# the tool panel section remains in the repository metadata. However, when the repository's metadata
# is subsequently reset, the tool panel section is removed from the repository metadata. While this
# is normal and expected behavior, the functional tests assume that repository metadata will not change
# in any way after a reset. A workaround is to remove the tool panel section from the stored repository
# metadata dict, in order to eliminate the misleading detection of changed metadata.
if 'tool_panel_section' in old_metadata and 'tool_panel_section' not in repository.metadata:
del old_metadata[ 'tool_panel_section' ]
assert repository.metadata == old_metadata, 'Metadata for %s repository %s changed after reset. \nOld: %s\nNew: %s' % \
( repository.status, repository.name, old_metadata, repository.metadata )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/test/tool_shed/functional/test_1300_reset_all_metadata.py
|
Python
|
gpl-3.0
| 34,892
|
[
"Galaxy"
] |
8853862d046f71c6710258074f7b985a56132afbd318e9e9211433f296a414f5
|
"""uwg simulation running commands."""
import sys
import json
import logging
try:
import click
except ImportError:
raise ImportError(
'click is not installed. Try `pip install . [cli]` command.'
)
from uwg import UWG
_logger = logging.getLogger(__name__)
@click.group(help='Commands for simulating UWG models.')
def simulate():
pass
@simulate.command('model')
@click.argument('model-json', type=click.Path(
exists=True, file_okay=True, dir_okay=False, resolve_path=True))
@click.argument('epw-path', type=click.Path(
exists=True, file_okay=True, dir_okay=False, resolve_path=True))
@click.option('--new-epw-dir', help='Optional argument for the destination directory '
'into which the morphed .epw file is written. The argument passed here '
'will overwrite the new_epw_dir specified in the UWG JSON model file.',
default=None, show_default=True)
@click.option('--new-epw-name', help='Optional argument for The destination file name '
'of the morphed .epw file. The argument passed here will overwrite the '
'new_epw_name specified in the UWG JSON model file.',
default=None, show_default=True)
def simulate_json_model(model_json, epw_path, new_epw_dir, new_epw_name):
"""Simulate a UWG model from a JSON model file.
\b
Args:
model_json: Full path to a JSON model file.
epw_path: Full path of the rural .epw file that will be morphed.
"""
try:
with open(model_json) as json_file:
data = json.load(json_file)
uwg_model = UWG.from_dict(
data, epw_path=epw_path, new_epw_dir=new_epw_dir, new_epw_name=new_epw_name)
uwg_model.generate()
uwg_model.simulate()
uwg_model.write_epw()
except Exception as e:
_logger.exception('UWG model simulation failed.\n{}'.format(e))
sys.exit(1)
else:
sys.exit(0)
@simulate.command('param')
@click.argument('param-uwg', type=click.Path(
exists=True, file_okay=True, dir_okay=False, resolve_path=True))
@click.argument('epw-path', type=click.Path(
exists=True, file_okay=True, dir_okay=False, resolve_path=True))
@click.option('--new-epw-dir', help='Optional argument for the destination directory '
'into which the morphed .epw file is written.',
default=None, show_default=True)
@click.option('--new-epw-name', help='Optional argument for The destination file name '
'of the morphed .epw file.',
default=None, show_default=True)
def simulate_uwg_param_model(param_uwg, epw_path, new_epw_dir, new_epw_name):
"""Simulate a UWG model from a .uwg parameter file.
\b
Args:
param_uwg: Full path to a .uwg param file.
epw_path: Full path of the rural .epw file that will be morphed.
"""
try:
uwg_model = UWG.from_param_file(
param_uwg, epw_path, new_epw_dir, new_epw_name)
uwg_model.generate()
uwg_model.simulate()
uwg_model.write_epw()
except Exception as e:
_logger.exception('UWG model simulation failed.\n{}'.format(e))
sys.exit(1)
else:
sys.exit(0)
|
chriswmackey/UWG_Python
|
uwg/cli/simulate.py
|
Python
|
gpl-3.0
| 3,213
|
[
"EPW"
] |
2dab0cc0358c3f92b6ae1376db2d53e2f0361ff6f438a867d000eba9326a48c0
|
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
"""
Ref. to Kresse-paper ... XXX
"""
import numpy as np
from gpaw.utilities.blas import axpy
from gpaw.fd_operators import FDOperator
class BaseMixer:
"""Pulay density mixer."""
def __init__(self, beta=0.1, nmaxold=3, weight=50.0, dtype=float):
"""Construct density-mixer object.
Parameters
----------
beta: float
Mixing parameter between zero and one (one is most
aggressive).
nmaxold: int
Maximum number of old densities.
weight: float
Weight parameter for special metric (for long wave-length
changes).
"""
self.beta = beta
self.nmaxold = nmaxold
self.weight = weight
self.dtype = dtype
self.dNt = None
self.mix_rho = False
def initialize_metric(self, gd):
self.gd = gd
if self.weight == 1:
self.metric = None
else:
a = 0.125 * (self.weight + 7)
b = 0.0625 * (self.weight - 1)
c = 0.03125 * (self.weight - 1)
d = 0.015625 * (self.weight - 1)
self.metric = FDOperator([a,
b, b, b, b, b, b,
c, c, c, c, c, c, c, c, c, c, c, c,
d, d, d, d, d, d, d, d],
[(0, 0, 0),
(-1, 0, 0), (1, 0, 0), #b
(0, -1, 0), (0, 1, 0), #b
(0, 0, -1), (0, 0, 1), #b
(1, 1, 0), (1, 0, 1), (0, 1, 1), #c
(1, -1, 0), (1, 0, -1), (0, 1, -1), #c
(-1, 1, 0), (-1, 0, 1), (0, -1, 1), #c
(-1, -1, 0), (-1, 0, -1), (0, -1, -1), #c
(1, 1, 1), (1, 1, -1), (1, -1, 1), #d
(-1, 1, 1), (1, -1, -1), (-1, -1, 1), #d
(-1, 1, -1), (-1, -1, -1) #d
],
gd, self.dtype).apply
self.mR_G = gd.empty(dtype=self.dtype)
def initialize(self, density):
self.initialize_metric(density.gd)
def reset(self):
"""Reset Density-history.
Called at initialization and after each move of the atoms.
my_nuclei: All nuclei in local domain.
"""
# History for Pulay mixing of densities:
self.nt_iG = [] # Pseudo-electron densities
self.R_iG = [] # Residuals
self.A_ii = np.zeros((0, 0))
self.dNt = None
self.D_iap = []
self.dD_iap = []
def get_charge_sloshing(self):
"""Return number of electrons moving around.
Calculated as the integral of the absolute value of the change
of the density from input to output."""
return self.dNt
def set_charge_sloshing(self, dNt):
self.dNt = dNt
def mix(self, nt_G, D_ap, phase_cd=None):
iold = len(self.nt_iG)
if iold > 0:
if iold > self.nmaxold:
# Throw away too old stuff:
del self.nt_iG[0]
del self.R_iG[0]
del self.D_iap[0]
del self.dD_iap[0]
# for D_p, D_ip, dD_ip in self.D_a:
# del D_ip[0]
# del dD_ip[0]
iold = self.nmaxold
# Calculate new residual (difference between input and output)
R_G = nt_G - self.nt_iG[-1]
# Use np.absolute instead of np.fabs
self.dNt = self.gd.integrate(np.absolute(R_G))
self.R_iG.append(R_G)
self.dD_iap.append([])
for D_p, D_ip in zip(D_ap, self.D_iap[-1]):
self.dD_iap[-1].append(D_p - D_ip)
# Update matrix:
A_ii = np.zeros((iold, iold))
i1 = 0
i2 = iold - 1
if self.metric is None:
mR_G = R_G
else:
mR_G = self.mR_G
self.metric(R_G, mR_G, phase_cd=phase_cd)
for R_1G in self.R_iG:
# Inner product between new and old residues
# XXX For now, use only real part of residues
# For complex quantities a .conjugate should be added ??
a = self.gd.comm.sum(np.vdot(R_1G.real, mR_G.real))
if self.dtype == complex:
a += self.gd.comm.sum(np.vdot(R_1G.imag, mR_G.imag))
A_ii[i1, i2] = a
A_ii[i2, i1] = a
i1 += 1
A_ii[:i2, :i2] = self.A_ii[-i2:, -i2:]
self.A_ii = A_ii
try:
B_ii = np.linalg.inv(A_ii)
except np.linalg.LinAlgError:
alpha_i = np.zeros(iold)
alpha_i[-1] = 1.0
else:
alpha_i = B_ii.sum(1)
try:
# Normalize:
alpha_i /= alpha_i.sum()
except ZeroDivisionError:
alpha_i[:] = 0.0
alpha_i[-1] = 1.0
# Calculate new input density:
nt_G[:] = 0.0
for D in D_ap:
D[:] = 0.0
beta = self.beta
for i, alpha in enumerate(alpha_i):
axpy(alpha, self.nt_iG[i], nt_G)
axpy(alpha * beta, self.R_iG[i], nt_G)
for D_p, D_ip, dD_ip in zip(D_ap, self.D_iap[i],
self.dD_iap[i]):
axpy(alpha, D_ip, D_p)
axpy(alpha * beta, dD_ip, D_p)
# Store new input density (and new atomic density matrices):
self.nt_iG.append(nt_G.copy())
self.D_iap.append([])
for D_p in D_ap:
self.D_iap[-1].append(D_p.copy())
def estimate_memory(self, mem, gd):
gridbytes = gd.bytecount()
mem.subnode('nt_iG, R_iG', 2 * self.nmaxold * gridbytes)
def __repr__(self):
classname = self.__class__.__name__
template = '%s(beta=%f, nmaxold=%d, weight=%f)'
string = template % (classname, self.beta, self.nmaxold, self.weight)
return string
class DummyMixer(BaseMixer):
"""Dummy mixer for TDDFT, i.e., it does not mix."""
def mix(self, nt_G):
pass
def estimate_memory(self, mem, gd):
pass
class Mixer(BaseMixer):
"""Mix spin up and down densities separately"""
def initialize(self, density):
self.mixers = []
for s in range(density.nspins):
mixer = BaseMixer(self.beta, self.nmaxold, self.weight)
mixer.initialize_metric(density.gd)
self.mixers.append(mixer)
def mix(self, density):
"""Mix pseudo electron densities."""
nt_sG = density.nt_sG
D_asp = density.D_asp.values()
D_sap = []
for s in range(density.nspins):
D_sap.append([D_sp[s] for D_sp in D_asp])
for nt_G, D_ap, mixer in zip(nt_sG, D_sap, self.mixers):
mixer.mix(nt_G, D_ap)
def reset(self):
for mixer in self.mixers:
mixer.reset()
def get_charge_sloshing(self):
"""Return number of electrons moving around.
Calculated as the integral of the absolute value of the change
of the density from input to output."""
if self.mixers[0].dNt is None:
return None
return sum([mixer.dNt for mixer in self.mixers])
def set_charge_sloshing(self, dNt):
for mixer in self.mixers:
mixer.set_charge_sloshing(dNt / len(self.mixers))
class MixerSum(BaseMixer):
"""For pseudo electron densities, mix the total charge density and for
density matrices, mix spin up and densities separately.
Magnetization density is not mixed, i.e new magnetization density is used
"""
def mix(self, density):
nt_sG = density.nt_sG
D_asp = density.D_asp.values()
# Mix density
nt_G = density.nt_sG.sum(0)
BaseMixer.mix(self, nt_G, D_asp)
# Only new magnetization for spin density
dnt_G = nt_sG[0] - nt_sG[1]
#dD_ap = [D_sp[0] - D_sp[1] for D_sp in D_asp]
# Construct new spin up/down densities
nt_sG[0] = 0.5 * (nt_G + dnt_G)
nt_sG[1] = 0.5 * (nt_G - dnt_G)
class MixerSum2(BaseMixer):
"""Mix the total pseudo electron density and the total density matrices.
Magnetization density is not mixed, i.e new magnetization density is used.
"""
def mix(self, density):
nt_sG = density.nt_sG
D_asp = density.D_asp.values()
# Mix density
nt_G = density.nt_sG.sum(0)
D_ap = [D_p[0] + D_p[1] for D_p in D_asp]
BaseMixer.mix(self, nt_G, D_ap)
# Only new magnetization for spin density
dnt_G = nt_sG[0] - nt_sG[1]
dD_ap = [D_sp[0] - D_sp[1] for D_sp in D_asp]
# Construct new spin up/down densities
nt_sG[0] = 0.5 * (nt_G + dnt_G)
nt_sG[1] = 0.5 * (nt_G - dnt_G)
for D_sp, D_p, dD_p in zip(D_asp, D_ap, dD_ap):
D_sp[0] = 0.5 * (D_p + dD_p)
D_sp[1] = 0.5 * (D_p - dD_p)
class MixerDif(BaseMixer):
"""Mix the charge density and magnetization density separately"""
def __init__(self, beta=0.1, nmaxold=3, weight=50.0,
beta_m=0.7, nmaxold_m=2, weight_m=10.0):
"""Construct density-mixer object.
Parameters
----------
beta: float
Mixing parameter between zero and one (one is most
aggressive).
nmaxold: int
Maximum number of old densities.
weight: float
Weight parameter for special metric (for long wave-length
changes).
"""
self.beta = beta
self.nmaxold = nmaxold
self.weight = weight
self.beta_m = beta_m
self.nmaxold_m = nmaxold_m
self.weight_m = weight_m
self.dNt = None
self.mix_rho = False
def initialize(self, density):
assert density.nspins == 2
self.mixer = BaseMixer(self.beta, self.nmaxold, self.weight)
self.mixer.initialize_metric(density.gd)
self.mixer_m = BaseMixer(self.beta_m, self.nmaxold_m, self.weight_m)
self.mixer_m.initialize_metric(density.gd)
def reset(self):
self.mixer.reset()
self.mixer_m.reset()
def mix(self, density):
nt_sG = density.nt_sG
D_asp = density.D_asp.values()
# Mix density
nt_G = density.nt_sG.sum(0)
D_ap = [D_sp[0] + D_sp[1] for D_sp in D_asp]
self.mixer.mix(nt_G, D_ap)
# Mix magnetization
dnt_G = nt_sG[0] - nt_sG[1]
dD_ap = [D_sp[0] - D_sp[1] for D_sp in D_asp]
self.mixer_m.mix(dnt_G, dD_ap)
# Construct new spin up/down densities
nt_sG[0] = 0.5 * (nt_G + dnt_G)
nt_sG[1] = 0.5 * (nt_G - dnt_G)
for D_sp, D_p, dD_p in zip(D_asp, D_ap, dD_ap):
D_sp[0] = 0.5 * (D_p + dD_p)
D_sp[1] = 0.5 * (D_p - dD_p)
def get_charge_sloshing(self):
if self.mixer.dNt is None:
return None
return self.mixer.dNt
class MixerRho(BaseMixer):
def initialize(self, density):
self.mix_rho = True
self.initialize_metric(density.finegd)
def mix(self, density):
"""Mix pseudo electron densities."""
rhot_g = density.rhot_g
BaseMixer.mix(self, rhot_g, [])
class MixerRho2(BaseMixer):
def initialize(self, density):
self.mix_rho = True
self.initialize_metric(density.finegd)
def mix(self, density):
"""Mix pseudo electron densities."""
rhot_g = density.rhot_g
BaseMixer.mix(self, rhot_g, density.D_asp.values())
class BaseMixer_Broydn:
def __init__(self, beta=0.1, nmaxold=6):
self.verbose = False
self.beta = beta
self.nmaxold = nmaxold
self.weight = 1
self.mix_rho = False
def initialize(self, density):
self.gd = density.gd
def reset(self):
self.step = 0
self.d_nt_G = []
self.d_D_ap = []
self.nt_iG = []
self.D_iap = []
self.c_G = []
self.v_G = []
self.u_G = []
self.u_D = []
self.dNt = None
def get_charge_sloshing(self):
return self.dNt
def mix(self, nt_G, D_ap):
if self.step > 2:
del self.d_nt_G[0]
for d_Dp in self.d_D_ap:
del d_Dp[0]
if self.step > 0:
self.d_nt_G.append(nt_G - self.nt_iG[-1])
for d_Dp, D_p, D_ip in zip(self.d_D_ap, D_ap, self.D_iap):
d_Dp.append(D_p - D_ip[-1])
fmin_G = self.gd.integrate(self.d_nt_G[-1] * self.d_nt_G[-1])
self.dNt = self.gd.integrate(np.fabs(self.d_nt_G[-1]))
if self.verbose:
print 'Mixer: broydn: fmin_G = %f fmin_D = %f'% fmin_G
if self.step == 0:
self.eta_G = np.empty(nt_G.shape)
self.eta_D = []
for D_p in D_ap:
self.eta_D.append(0)
self.u_D.append([])
self.D_iap.append([])
self.d_D_ap.append([])
else:
if self.step >= 2:
del self.c_G[:]
if len(self.v_G) >= self.nmaxold:
del self.u_G[0]
del self.v_G[0]
for u_D in self.u_D:
del u_D[0]
temp_nt_G = self.d_nt_G[1] - self.d_nt_G[0]
self.v_G.append(temp_nt_G / self.gd.integrate(temp_nt_G
* temp_nt_G))
if len(self.v_G) < self.nmaxold:
nstep = self.step - 1
else:
nstep = self.nmaxold
for i in range(nstep):
self.c_G.append(self.gd.integrate(self.v_G[i] *
self.d_nt_G[1]))
self.u_G.append(self.beta * temp_nt_G + self.nt_iG[1] - self.nt_iG[0])
for d_Dp, u_D, D_ip in zip(self.d_D_ap, self.u_D, self.D_iap):
temp_D_ap = d_Dp[1] - d_Dp[0]
u_D.append(self.beta * temp_D_ap + D_ip[1] - D_ip[0])
usize = len(self.u_G)
for i in range(usize - 1):
a_G = self.gd.integrate(self.v_G[i] * temp_nt_G)
axpy(-a_G, self.u_G[i], self.u_G[usize - 1])
for u_D in self.u_D:
axpy(-a_G, u_D[i], u_D[usize - 1])
self.eta_G = self.beta * self.d_nt_G[-1]
for i, d_Dp in enumerate(self.d_D_ap):
self.eta_D[i] = self.beta * d_Dp[-1]
usize = len(self.u_G)
for i in range(usize):
axpy(-self.c_G[i], self.u_G[i], self.eta_G)
for eta_D, u_D in zip(self.eta_D, self.u_D):
axpy(-self.c_G[i], u_D[i], eta_D)
axpy(-1.0, self.d_nt_G[-1], nt_G)
axpy(1.0, self.eta_G, nt_G)
for D_p, d_Dp, eta_D in zip(D_ap, self.d_D_ap, self.eta_D):
axpy(-1.0, d_Dp[-1], D_p)
axpy(1.0, eta_D, D_p)
if self.step >= 2:
del self.nt_iG[0]
for D_ip in self.D_iap:
del D_ip[0]
self.nt_iG.append(np.copy(nt_G))
for D_ip, D_p in zip(self.D_iap, D_ap):
D_ip.append(np.copy(D_p))
self.step += 1
class Mixer_Broydn(BaseMixer_Broydn):
"""Mix spin up and down densities separately"""
def initialize(self, density):
self.mixers = []
for s in range(density.nspins):
mixer = BaseMixer_Broydn()
mixer.initialize(density)
#mixer.initialize_metric(density.gd)
self.mixers.append(mixer)
def mix(self, density):
"""Mix pseudo electron densities."""
nt_sG = density.nt_sG
D_asp = density.D_asp.values()
D_sap = []
for s in range(density.nspins):
D_sap.append([D_sp[s] for D_sp in D_asp])
for nt_G, D_ap, mixer in zip(nt_sG, D_sap, self.mixers):
mixer.mix(nt_G, D_ap)
def reset(self):
for mixer in self.mixers:
mixer.reset()
def get_charge_sloshing(self):
"""Return number of electrons moving around.
Calculated as the integral of the absolute value of the change
of the density from input to output."""
if self.mixers[0].dNt is None:
return None
return sum([mixer.dNt for mixer in self.mixers])
def set_charge_sloshing(self, dNt):
for mixer in self.mixers:
mixer.set_charge_sloshing(dNt / len(self.mixers))
class MixerSum_Broydn(BaseMixer_Broydn):
def mix(self, density):
nt_sG = density.nt_sG
D_asp = density.D_asp.values()
# Mix density
nt_G = density.nt_sG.sum(0)
BaseMixer_Broydn.mix(self, nt_G, D_asp)
# Only new magnetization for spin density
dnt_G = nt_sG[0] - nt_sG[1]
#dD_ap = [D_sp[0] - D_sp[1] for D_sp in D_asp]
# Construct new spin up/down densities
nt_sG[0] = 0.5 * (nt_G + dnt_G)
nt_sG[1] = 0.5 * (nt_G - dnt_G)
|
qsnake/gpaw
|
gpaw/dfpt/mixer.py
|
Python
|
gpl-3.0
| 17,948
|
[
"GPAW"
] |
bff834d4984661fda881e414868be81f9892be0f552d1ab5319c8bf240949508
|
#
# Copyright 2014 Flytxt
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from pyspark import SparkContext
from GMMclustering import GMMclustering
from pyspark.mllib.linalg import Vectors, SparseVector
class GMMModel(object):
"""
A clustering model derived from the Gaussian Mixture model.
>>> data = sc.parallelize(np.array([0.5,1,0.75,1,-0.75,0.5,-0.5,0.5,\
-1,-0.5,-0.75,-0.75,0.75,-0.5,0.75,-0.75]).reshape(8,2))
>>> model = GMMModel.trainGMM(data,4,10)
>>> np.argmax(model.predict(np.array([0.5,1]))) == \
np.argmax(model.predict(np.array([0.75,1])))
True
>>> np.argmax(model.predict(np.array([-0.75,0.5]))) == \
np.argmax(model.predict(np.array([-0.5,0.5])))
True
>>> np.argmax(model.predict(np.array([-1,-0.5]))) == \
np.argmax(model.predict(np.array([0.75,-0.5])))
False
>>> np.argmax(model.predict(np.array([0.75,-0.75]))) == \
np.argmax(model.predict(np.array([-0.75,-0.75])))
False
>>> sparse_data = ([Vectors.sparse(3, {1: 1.0}),\
Vectors.sparse(3, {1: 1.1}),\
Vectors.sparse(3, {2: 1.0}),\
Vectors.sparse(3, {2: 1.1})])
>>> sparse_data_rdd = sc.parallelize(sparse_data)
>>> model = GMMModel.trainGMM(sparse_data_rdd,2,10)
>>> np.argmax(model.predict(np.array([0., 1., 0.]))) == \
np.argmax(model.predict(np.array([0, 1.1, 0.])))
True
>>> np.argmax(model.predict(Vectors.sparse(3, {1: 1.0}))) == \
np.argmax(model.predict(Vectors.sparse(3, {2: 1.0})))
False
>>> np.argmax(model.predict(sparse_data[2])) == \
np.argmax(model.predict(sparse_data[3]))
True
"""
@classmethod
def trainGMM(cls, data, n_components, n_iter=100, ct=1e-3):
"""
Train a GMM clustering model.
"""
gmmObj = GMMclustering().fit(data, n_components, n_iter, ct)
return gmmObj
@classmethod
def resultPredict(cls, gmmObj, data):
"""
Get the result of predict
Return responsibility matrix and cluster labels .
"""
responsibility_matrix = data.map(lambda m: gmmObj.predict(m))
cluster_labels = responsibility_matrix.map(lambda b: np.argmax(b))
return responsibility_matrix, cluster_labels
def _test():
import doctest
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
himanshu14/GMM
|
GMMModel.py
|
Python
|
apache-2.0
| 3,380
|
[
"Gaussian"
] |
d82bc14b1b6ad1ffcac0b906300a7ea779de551bd27cd68de278cb98c6d0bbf3
|
import sys
import struct
import traceback
import envi
import envi.bits as e_bits
from envi.bits import binary
from envi.archs.arm.const import *
from envi.archs.arm.regs import *
# Universal opcode things:
# len
# mode
#FIXME: TODO
# * Thumb Extension Parser
# * Jazelle Extension Parser
# * Emulators
####################################################################
# Parsers for the multiply family of instruction encodings
def chopmul(opcode):
op1 = (opcode >> 20) & 0xff
a = (opcode >> 16) & 0xf
b = (opcode >> 12) & 0xf
c = (opcode >> 8) & 0xf
d = (opcode >> 4) & 0xf
e = opcode & 0xf
return (op1<<4)+d,(a,b,c,d,e)
# FIXME this seems to be universal...
def addrToName(mcanv, va):
sym = mcanv.syms.getSymByAddr(va)
if sym != None:
return repr(sym)
return "0x%.8x" % va
# The keys in this table are made of the
# concat of bits 27-21 and 7-4 (only when
# ienc == mul!
iencmul_codes = {
# Basic multiplication opcodes
binary("000000001001"): ("mul",(0,4,2), 0),
binary("000000011001"): ("mul",(0,4,2), IF_PSR_S),
binary("000000101001"): ("mla",(0,4,2,1), 0),
binary("000000111001"): ("mla",(0,4,2,1), IF_PSR_S),
binary("000001001001"): ("umaal",(1,0,4,2), 0),
binary("000010001001"): ("umull",(1,0,4,2), 0),
binary("000010011001"): ("umull",(1,0,4,2), IF_PSR_S),
binary("000010101001"): ("umlal",(1,0,4,2), 0),
binary("000010111001"): ("umlal",(1,0,4,2), IF_PSR_S),
binary("000011001001"): ("smull",(1,0,4,2), 0),
binary("000011011001"): ("smull",(1,0,4,2), IF_PSR_S),
binary("000011101001"): ("smlal",(1,0,4,2), 0),
binary("000011111001"): ("smlal",(1,0,4,2), IF_PSR_S),
# multiplys with <x><y>
# "B"
binary("000100001000"): ("smlabb", (0,4,2,1), 0),
binary("000100001010"): ("smlatb", (0,4,2,1), 0),
binary("000100001100"): ("smlabt", (0,4,2,1), 0),
binary("000100001110"): ("smlatt", (0,4,2,1), 0),
binary("000100101010"): ("smulwb", (0,4,2), 0),
binary("000100101110"): ("smulwt", (0,4,2), 0),
binary("000100101000"): ("smlawb", (0,4,2), 0),
binary("000100101100"): ("smlawt", (0,4,2), 0),
binary("000101001000"): ("smlalbb", (1,0,4,2), 0),
binary("000101001010"): ("smlaltb", (1,0,4,2), 0),
binary("000101001100"): ("smlalbt", (1,0,4,2), 0),
binary("000101001110"): ("smlaltt", (1,0,4,2), 0),
binary("000101101000"): ("smulbb", (0,4,2), 0),
binary("000101101010"): ("smultb", (0,4,2), 0),
binary("000101101100"): ("smulbt", (0,4,2), 0),
binary("000101101110"): ("smultt", (0,4,2), 0),
# type 2 multiplys
binary("011100000001"): ("smuad", (0,4,2), 0),
binary("011100000011"): ("smuadx", (0,4,2), 0),
binary("011100000101"): ("smusd", (0,4,2), 0),
binary("011100000111"): ("smusdx", (0,4,2), 0),
binary("011100000001"): ("smlad", (0,4,2), 0),
binary("011100000011"): ("smladx", (0,4,2), 0),
binary("011100000101"): ("smlsd", (0,4,2), 0),
binary("011100000111"): ("smlsdx", (0,4,2), 0),
binary("011101000001"): ("smlald", (0,4,2), 0),
binary("011101000011"): ("smlaldx", (0,4,2), 0),
binary("011101000101"): ("smlsld", (0,4,2), 0),
binary("011101000111"): ("smlsldx", (0,4,2), 0),
binary("011101010001"): ("smmla", (0,4,2,1), 0),
binary("011101010011"): ("smmlar", (0,4,2,1), 0),
binary("011101011101"): ("smmls", (0,4,2,1), 0),
binary("011101011111"): ("smmlsr", (0,4,2,1), 0),
binary("011101010001"): ("smmul", (0,4,2), 0),
binary("011101010011"): ("smmulr", (0,4,2), 0),
}
def sh_lsl(num,shval):
return (num&0xffffffff) << shval
def sh_lsr(num,shval):
return (num&0xffffffff) >> shval
def sh_asr(num,shval):
return num >> shval
def sh_ror(num,shval):
return (((num&0xffffffff) >> shval) | (num<< (32-shval))) & 0xffffffff
def sh_rrx(num,shval, emu=None):
half1 = (num&0xffffffff) >> shval
half2 = num<<(33-shval)
newC = (num>>(shval-1)) & 1
if emu != None:
flags = emu.getFlags()
oldC = (flags>>PSR_C) & 1
emu.setFlags(flags & PSR_C_mask | newC) #part of the change
else:
oldC = 0 # FIXME:
retval = (half1 | half2 | (oldC << (32-shval))) & 0xffffffff
return retval
shifters = (
sh_lsl,
sh_lsr,
sh_asr,
sh_ror,
sh_rrx,
)
####################################################################
# Mnemonic tables for opcode based mnemonic lookup
# Dataprocessing mnemonics
dp_mnem = ("and","eor","sub","rsb","add","adc","sbc","rsc","tst","teq","cmp","cmn","orr","mov","bic","mvn",)
# FIXME: THIS IS FUGLY
dp_noRn = (13,15)
dp_noRd = (8,9,10,11)
# FIXME: !!! Don't make SBZ and SBO's part of the list of opers !!!
# first parm SBZ: mov,mvn
# second parm SBZ: tst,teq,cmp,cmn,
def dpbase(opval):
"""
Parse and return opcode,sflag,Rn,Rd for a standard
dataprocessing instruction.
"""
ocode = (opval >> 21) & 0xf
sflag = (opval >> 20) & 0x1
Rn = (opval >> 16) & 0xf
Rd = (opval >> 12) & 0xf
#print "DPBASE:",ocode,sflag,Rn,Rd
return ocode,sflag,Rn,Rd
####################################################################
# Parser functions for each of the instruction encodings
def p_dp_imm_shift(opval, va):
ocode,sflag,Rn,Rd = dpbase(opval)
Rm = opval & 0xf
shtype = (opval >> 5) & 0x3
shval = (opval >> 7) & 0x1f #CHECKME: is this correctly done?
if ocode in dp_noRn:# FIXME: FUGLY (and slow...)
olist = (
ArmRegOper(Rd),
ArmRegShiftImmOper(Rm, shtype, shval),
)
elif ocode in dp_noRd:
olist = (
ArmRegOper(Rn),
ArmRegShiftImmOper(Rm, shtype, shval),
)
else:
olist = (
ArmRegOper(Rd),
ArmRegOper(Rn),
ArmRegShiftImmOper(Rm, shtype, shval),
)
opcode = (IENC_DP_IMM_SHIFT << 16) + ocode
if sflag > 0:
iflags = IF_PSR_S
else:
iflags = 0
return (opcode, dp_mnem[ocode], olist, iflags)
# specialized mnemonics for p_misc
qop_mnem = ('qadd','qsub','qdadd','qdsub')
smla_mnem = ('smlabb','smlabt','smlatb','smlatt',)
smlal_mnem = ('smlalbb','smlalbt','smlaltb','smlaltt',)
smul_mnem = ('smulbb','smulbt','smultb','smultt',)
smlaw_mnem = ('smlawb','smlawt',)
smlaw_mnem = ('smulwb','smulwt',)
def p_misc(opval, va):
# 0x0f900000 = 0x01000000 or 0x01000010 (misc and misc1 are both parsed at the same time. see the footnote [2] on dp instructions in the Atmel AT91SAM7 docs
if opval & 0x0fc00000 == 0x01000000:
opcode = (IENC_MISC << 16) + 1
mnem = 'mrs'
r = (opval>>22) & 1
Rd = (opval>>12) & 0xf
olist = (
ArmRegOper(Rd),
ArmPgmStatRegOper(r),
)
elif opval & 0x0fc000f0 == 0x01200000:
opcode = (IENC_MISC << 16) + 2
mnem = 'msr'
r = (opval>>22) & 1
Rd = (opval>>12) & 0xf
olist = (
ArmPgmStatRegOper(r),
ArmRegOper(Rd),
)
elif opval & 0x0ff000f0 == 0x01200020:
opcode = (IENC_MISC << 16) + 5
mnem = 'bxj'
Rm = opval & 0xf
olist = ( ArmRegOper(Rm), )
elif opval & 0x0ff00090 == 0x01000080:
opcode = (IENC_MISC << 16) + 9
xy = (opval>>5)&3
mnem = smla_mnem[xy]
Rd = (opval>>16) & 0xf
Rn = (opval>>12) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd),
ArmRegOper(Rm),
ArmRegOper(Rs),
ArmRegOper(Rn),
)
elif opval & 0x0ff000b0 == 0x01200080:
opcode = (IENC_MISC << 16) + 10
y = (opval>>6)&1
mnem = smlaw_mnem[y]
Rd = (opval>>16) & 0xf
Rn = (opval>>12) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd),
ArmRegOper(Rm),
ArmRegOper(Rs),
ArmRegOper(Rn),
)
elif opval & 0x0ff000b0 == 0x012000a0:
opcode = (IENC_MISC << 16) + 11
y = (opval>>6)&1
mnem = smulw_mnem[y]
Rd = (opval>>16) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd),
ArmRegOper(Rm),
ArmRegOper(Rs),
)
elif opval & 0x0ff00090 == 0x01400080:
opcode = (IENC_MISC << 16) + 12
xy = (opval>>5)&3
mnem = smlal_mnem[xy]
Rdhi = (opval>>16) & 0xf
Rdlo = (opval>>12) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rdlo),
ArmRegOper(Rdhi),
ArmRegOper(Rs),
ArmRegOper(Rn),
)
elif opval & 0x0ff00090 == 0x01600080:
opcode = (IENC_MISC << 16) + 13
xy = (opval>>5)&3
mnem = smulxy_mnem[xy]
Rd = (opval>>16) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd),
ArmRegOper(Rm),
ArmRegOper(Rs),
)
mnem = 'smul' #xy
#elif opval & 0x0fc00000 == 0x03200000:
#mnem = 'msr'
else:
raise Exception("p_misc: invalid instruction: %.8x:\t%.8x"%(va,opval))
opcode = IENC_UNDEF
mnem = "undefined instruction"
olist = ()
return (opcode, mnem, olist, 0)
#### these actually belong to the media section, and already exist there. FIXME: DELETE
#misc1_mnem = ("pkhbt", "pkhtb", "rev", "rev16", "revsh", "sel", "ssat", "ssat16", "usat", "usat16", )
def p_misc1(opval, va): #
#R = (opval>>22) & 1
#Rn = (opval>>16) & 0xf
#Rd = (opval>>12) & 0xf
#rot_imm = (opval>>8) & 0xf
#imm = opval & 0xff
#Rm = opval & 0xf
if opval & 0x0ff000f0 == 0x01200010:
opcode = INS_BX
mnem = 'bx'
Rm = opval & 0xf
olist = ( ArmRegOper(Rm), )
elif opval & 0x0ff000f0 == 0x01600010:
opcode = (IENC_MISC << 16) + 4
mnem = 'clz'
Rd = (opval>>12) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd),
ArmRegOper(Rm),
)
elif opval & 0x0ff000f0 == 0x01200030:
#opcode = (IENC_MISC << 16) + 6
opcode = INS_BLX
mnem = 'blx'
Rm = opval & 0xf
olist = ( ArmRegOper(Rm), )
elif opval & 0x0f9000f0 == 0x01000050: #all qadd/qsub's
opcode = (IENC_MISC << 16) + 7
qop = (opval>>21)&3
mnem = qop_mnem[qop]
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd),
ArmRegOper(Rm),
ArmRegOper(Rn),
)
elif opval & 0x0ff000f0 == 0x01200070:
opcode = (IENC_MISC << 16) + 8
mnem = 'bkpt'
immed = ((opval>>4)&0xfff0) + (opval&0xf)
olist = ( ArmImmOper(immed), )
else:
raise Exception("p_misc1: invalid instruction: %.8x:\t%.8x"%(va,opval))
return (opcode, mnem, olist, 0)
swap_mnem = ("swp","swpb",)
strex_mnem = ("strex","ldrex",) # actual full instructions
#strh_mnem = ("strh","ldrh",)
#ldrs_mnem = ("ldrsh","ldrsb",)
#ldrd_mnem = ("ldrd","strd",)
strh_mnem = (("str",IF_H),("ldr",IF_H),) # IF_H
ldrs_mnem = (("ldr",IF_S|IF_H),("ldr",IF_S|IF_B),) # IF_SH, IF_SB
ldrd_mnem = (("ldr",IF_D),("str",IF_D),) # IF_D
def p_extra_load_store(opval, va):
pubwl = (opval>>20) & 0x1f
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
Rs = (opval>>8) & 0xf
op1 = (opval>>5) & 0x3
Rm = opval & 0xf
iflags = 0
if opval&0x0fb000f0==0x01000090:# swp/swpb
idx = (pubwl>>2)&1
opcode = (IENC_EXTRA_LOAD << 16) + idx
mnem = swap_mnem[idx]
olist = (
ArmRegOper(Rd),
ArmRegOper(Rm),
ArmImmOffsetOper(Rn, 0, va),
)
elif opval&0x0fe000f0==0x01800090:# strex/ldrex
idx = pubwl&1
opcode = (IENC_EXTRA_LOAD << 16) + 2 + idx
mnem = strex_mnem[idx]
olist = (
ArmRegOper(Rd),
ArmRegOper(Rm),
ArmRegOper(Rn),
)
elif opval&0x0e4000f0==0x000000b0:# strh/ldrh regoffset
idx = pubwl&1
opcode = (IENC_EXTRA_LOAD << 16) + 4 + idx
mnem,iflags = strh_mnem[idx]
olist = (
ArmRegOper(Rd),
ArmRegOffsetOper(Rn, Rm, va, pubwl),
)
elif opval&0x0e4000f0==0x004000b0:# strh/ldrh immoffset
idx = pubwl&1
opcode = (IENC_EXTRA_LOAD << 16) + 6 + idx
mnem,iflags = strh_mnem[idx]
olist = (
ArmRegOper(Rd),
ArmImmOffsetOper(Rn,(Rs<<4)+Rm, va),
)
elif opval&0x0e4000d0==0x004000d0:# ldrsh/b immoffset
idx = (opval>>5)&1
opcode = (IENC_EXTRA_LOAD << 16) + 8 + idx
mnem,iflags = ldrs_mnem[idx]
olist = (
ArmRegOper(Rd),
ArmImmOffsetOper(Rn, (Rs<<4)+Rm, va, pubwl),
)
elif opval&0x0e4000d0==0x000000d0:# ldrsh/b regoffset
idx = (opval>>5)&1
opcode = (IENC_EXTRA_LOAD << 16) + 10 + idx
mnem,iflags = ldrs_mnem[idx]
olist = (
ArmRegOper(Rd),
ArmRegOffsetOper(Rn, Rm, va, pubwl),
)
elif opval&0x0e5000d0==0x000000d0:# ldrd/strd regoffset
idx = (opval>>5)&1
opcode = (IENC_EXTRA_LOAD << 16) + 12 + idx
mnem,iflags = ldrd_mnem[idx]
olist = (
ArmRegOper(Rd),
ArmRegOffsetOper(Rn, Rm, va, pubwl),
)
elif opval&0x0e5000d0==0x004000d0:# ldrd/strd immoffset
idx = (opval>>5)&1
opcode = (IENC_EXTRA_LOAD << 16) + 14 + idx
mnem,iflags = ldrd_mnem[idx]
olist = (
ArmRegOper(Rd),
ArmImmOffsetOper(Rn, (Rs<<4)+Rm),
)
else:
raise Exception("extra_load_store: invalid instruction: %.8x:\t%.8x"%(va,opval))
return (opcode, mnem, olist, iflags)
def p_dp_reg_shift(opval, va):
ocode,sflag,Rn,Rd = dpbase(opval)
Rm = opval & 0xf
shtype = (opval >> 5) & 0x3
Rs = (opval >> 8) & 0xf
if ocode in dp_noRn:# FIXME: FUGLY
olist = (
ArmRegOper(Rd),
ArmRegShiftRegOper(Rm, shtype, Rs),
)
elif ocode in dp_noRd:
olist = (
ArmRegOper(Rn),
ArmRegShiftRegOper(Rm, shtype, Rs),
)
else:
olist = (
ArmRegOper(Rd),
ArmRegOper(Rn),
ArmRegShiftRegOper(Rm, shtype, Rs),
)
opcode = (IENC_DP_REG_SHIFT << 16) + ocode
if sflag > 0:
iflags = IF_PSR_S
else:
iflags = 0
return (opcode, dp_mnem[ocode], olist, iflags)
multfail = (None, None, None,)
def p_mult(opval, va):
ocode, vals = chopmul(opval)
mnem, opindexes, flags = iencmul_codes.get(ocode, multfail)
if mnem == None:
raise Exception("p_mult: invalid instruction: %.8x:\t%.8x"%(va,opval))
olist = []
for i in opindexes:
olist.append(ArmRegOper(vals[i]))
opcode = (IENC_MULT << 16) + ocode
return (opcode, mnem, olist, flags)
def p_dp_imm(opval, va):
ocode,sflag,Rn,Rd = dpbase(opval)
imm = opval & 0xff
rot = (opval >> 7) & 0x1e # effectively, rot*2
if ocode in dp_noRn:# FIXME: FUGLY
olist = (
ArmRegOper(Rd),
ArmImmOper(imm, rot, S_ROR),
)
elif ocode in dp_noRd:
olist = (
ArmRegOper(Rn),
ArmImmOper(imm, rot, S_ROR),
)
else:
olist = (
ArmRegOper(Rd),
ArmRegOper(Rn),
ArmImmOper(imm, rot, S_ROR),
)
opcode = (IENC_DP_IMM << 16) + ocode
if sflag > 0:
iflags = IF_PSR_S
else:
iflags = 0
return (opcode, dp_mnem[ocode], olist, iflags)
def p_undef(opval, va):
raise Exception("p_undef: invalid instruction (by definition in ARM spec): %.8x:\t%.8x"%(va,opval))
opcode = IENC_UNDEF
mnem = "undefined instruction"
olist = (
ArmImmOper(opval),
)
return (opcode, mnem, olist, 0)
def p_mov_imm_stat(opval, va): # only one instruction: "msr"
imm = opval & 0xff
rot = (opval>>8) & 0xf
r = (opval>>22) & 1
mask = (opval>>16) & 0xf
immed = ((imm>>rot) + (imm<<(32-rot))) & 0xffffffff
olist = (
ArmPgmStatRegOper(mask),
ArmImmOper(immed),
)
opcode = (IENC_MOV_IMM_STAT << 16)
return (opcode, "msr", olist, 0)
ldr_mnem = ("str", "ldr")
tsizes = (4, 1,)
def p_load_imm_off(opval, va):
pubwl = (opval>>20) & 0x1f
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
imm = opval & 0xfff
if pubwl & 4: # B
iflags = IF_B
if (pubwl & 0x12) == 2:
iflags |= IF_T
else:
iflags = 0
olist = (
ArmRegOper(Rd),
ArmImmOffsetOper(Rn, imm, va, pubwl=pubwl) # u=-/+, b=word/byte
)
opcode = (IENC_LOAD_IMM_OFF << 16)
return (opcode, ldr_mnem[pubwl&1], olist, iflags)
def p_load_reg_off(opval, va):
pubwl = (opval>>20) & 0x1f
Rd = (opval>>12) & 0xf
Rn = (opval>>16) & 0xf
Rm = opval & 0xf
shtype = (opval>>5) & 0x3
shval = (opval>>7) & 0x1f
if pubwl & 4: # B
iflags = IF_B
if (pubwl & 0x12) == 2:
iflags |= IF_T
else:
iflags = 0
olist = (
ArmRegOper(Rd),
ArmScaledOffsetOper(Rn, Rm, shtype, shval, va, pubwl), # u=-/+, b=word/byte
)
opcode = (IENC_LOAD_REG_OFF << 16)
return (opcode, ldr_mnem[pubwl&1], olist, iflags)
def p_media(opval, va):
"""
27:20, 7:4
"""
# media is a parent for the following:
# parallel add/sub 01100
# pkh, ssat, ssat16, usat, usat16, sel 01101
# rev, rev16, revsh 01101
# smlad, smlsd, smlald, smusd 01110
# usad8, usada8 01111
definer = (opval>>23) & 0x1f
if definer == 0xc:
return p_media_parallel(opval, va)
elif definer == 0xd:
return p_media_pack_sat_rev_extend(opval, va)
elif definer == 0xe:
return p_mult(opval, va)
return p_media_smul(opval, va)
else:
return p_media_usada(opval, va)
#generate mnemonics for parallel instructions (could do manually like last time...)
parallel_mnem = []
par_suffixes = ("add16", "addsubx", "subaddx", "sub16", "add8", "sub8", "", "")
par_prefixes = ("","s","q","sh","","u","uq","uh")
for pre in par_prefixes:
for suf in par_suffixes:
parallel_mnem.append(pre+suf)
parallel_mnem = tuple(parallel_mnem)
def p_media_parallel(opval, va):
opc1 = (opval>>17) & 0x38
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
opc1 += (opval>>5) & 7
Rm = opval & 0xf
mnem = parallel_mnem[opc1]
olist = (
ArmRegOper(Rd),
ArmRegOper(Rn),
ArmRegOper(Rm),
)
opcode = IENC_MEDIA_PARALLEL + opc1
return (opcode, mnem, olist, 0)
xtnd_mnem = []
xtnd_suffixes = ("xtab16","xtab","xtah","xtb16","xtb","xth",)
xtnd_prefixes = ("s","u")
for pre in xtnd_prefixes:
for suf in xtnd_suffixes:
xtnd_mnem.append(pre+suf)
xtnd_mnem = tuple(xtnd_mnem)
pkh_mnem = ('pkhbt', 'pkhtb',)
sat_mnem = ('ssat','usat')
sat16_mnem = ('ssat16','usat16')
rev_mnem = ('rev','rev16',None,'revsh',)
def p_media_pack_sat_rev_extend(opval, va):
## part of p_media
# assume bit 23 == 1
opc1 = (opval>>20) & 7
opc2 = (opval>>4) & 0xf
opc25 = opc2 & 3
opcode = 0
if opc1 == 0 and opc25 == 1: #pkh
mnem = pkh_mnem[(opval>>6)&1]
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
shift_imm = (opval>>7) & 0x1f
Rm = opval & 0xf
# 'FIXME WHAT WAS OPCODE SUPPOSED TO BE HERE @las?'
# 'dear visi, read the code ;) move the "mnem =" line if you prefer'
olist = (
ArmRegOper(Rd),
ArmRegOper(Rn),
ArmRegShiftImmOper(Rm, S_LSL, shift_imm),
)
elif (opc1 & 2) and opc25 == 1: #word sat
opidx = (opval>>22)&1
sat_imm = 1 + (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
Rm = opval & 0xf
if opc1 & 0x10: # ?sat16
mnem = sat16_mnem[opidx]
olist = (
ArmRegOper(Rd),
ArmImmOper(sat_imm),
ArmRegOper(Rm),
)
opcode = IENC_MEDIA_SAT + opidx
else:
mnem = sat_mnem[opidx]
shift_imm = (opval>>7) & 0x1f
sh = (opval>>5) & 2
olist = (
ArmRegOper(Rd),
ArmImmOper(sat_imm),
ArmRegShiftImmOper(Rm, sh, shift_imm),
)
opcode = IENC_MEDIA_SAT + 2 + opidx
elif (opc1 & 3) == 2 and opc2 == 3: #parallel half-word sat
raise Exception("WTF! Parallel Half-Word Saturate... what is that instruction?")
elif (opc1 > 0) and (opc2 & 7) == 3: # byte rev word
opidx = ((opval>>21) & 2) + ((opval>>7) & 1)
mnem = rev_mnem[opidx]
if mnem == None:
raise Exception("p_media_pack_sat_rev_extend: invalid instruction: %.8x:\t%.8x"%(va,opval))
Rd = (opval>>12) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd),
ArmRegOper(Rm),
)
opcode = IENC_MEDIA_REV + opidx
#elif opc1 == 3 and opc2 == 0xb: # byte rev pkt halfword
#elif opc1 == 7 and opc2 == 0xb: # byte rev signed halfword
elif opc1 == 0 and opc2 == 0xb: # select bytes
mnem = "sel"
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd),
ArmRegOper(Rn),
ArmRegOper(Rm),
)
opcode = IENC_MEDIA_SEL
elif opc2 == 7: # sign extend
mnem = xtnd_mnem[opc1]
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
rot = (opval>>10) & 3
Rm = opval & 0xf
olist = (
ArmRegOper(Rd),
ArmRegOper(Rn),
ArmRegShiftImmOper(Rm, S_ROR, rot),
)
opcode = IENC_MEDIA_EXTEND + opc1
else:
raise Exception("p_media_extend: invalid instruction: %.8x:\t%.8x"%(va,opval))
return (opcode, mnem, olist, 0)
#smult3_mnem = ('smlad','smlsd',,,'smlald')
def p_media_smul(opval, va):
raise Exception("Should not reach here. If we reach here, we'll have to implement MEDIA_SMUL extended multiplication (type 3)")
# hmmm, is this already handled?
def p_media_usada(opval, va):
Rd = (opval>>16) & 0xf
Rn = (opval>>12) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
if Rn == 0xf:
mnem = "usad8"
olist = (
ArmRegOper(Rd),
ArmRegOper(Rm),
ArmRegOper(Rs),
)
opcode = IENC_MEDIA_USAD8
else:
mnem = "usada8"
olist = (
ArmRegOper(Rd),
ArmRegOper(Rm),
ArmRegOper(Rs),
ArmRegOper(Rn),
)
opcode = IENC_MEDIA_USADA8
return (opcode, mnem, olist, 0)
def p_arch_undef(opval, va):
raise Exception("p_arch_undef: invalid instruction (by definition in ARM spec): %.8x:\t%.8x"%(va,opval))
#print >>sys.stderr,("implementme: p_arch_undef")
return (IENC_ARCH_UNDEF, 'arch undefined', (ArmImmOper(opval),), 0)
ldm_mnem = ("stm", "ldm")
def p_load_mult(opval, va):
puswl = (opval>>20) & 0x1f
mnem = ldm_mnem[(puswl&1)]
#flags = ((puswl<<10) & 0x3000) + IF_DA # ???? WTF?
flags = ((puswl&0x18)<<21) + IF_DA # store bits for decoding whether to dec/inc before/after between ldr/str. IF_DA tells the repr to print the the DAIB extension after the conditional
Rn = (opval>>16) & 0xf
reg_list = opval & 0xffff
olist = (
ArmRegOper(Rn),
ArmRegListOper(reg_list, puswl),
)
if puswl & 2: # W (mnemonic: "!")
flags |= IF_W
olist[0].oflags |= OF_W
if puswl & 4: # UM - usermode, or mov current SPSR -> CPSR if r15 included
flags |= IF_UM
olist[1].oflags |= OF_UM
opcode = (IENC_LOAD_MULT << 16)
return (opcode, mnem, olist, flags)
def instrenc(encoding, index):
return (encoding << 16) + index
INS_B = instrenc(IENC_BRANCH, 0)
INS_BL = instrenc(IENC_BRANCH, 1)
INS_BX = instrenc(IENC_MISC, 3)
INS_BXJ = instrenc(IENC_MISC, 5)
INS_BLX = IENC_UNCOND_BLX
b_mnem = ("b", "bl",)
def p_branch(opval, va): # primary branch encoding. others were added later in the media section
off = e_bits.signed(opval, 3)
off <<= 2
link = (opval>>24) & 1
#FIXME this assumes A1 branch encoding.
olist = ( ArmOffsetOper(off, va),)
if link:
flags = envi.IF_CALL
else:
flags = envi.IF_BRANCH
opcode = (IENC_BRANCH << 16) + link
return (opcode, b_mnem[link], olist, flags)
ldc_mnem = ("stc", "ldc",)
def p_coproc_load(opval, va): #FIXME: MRRC, MCRR encoded here.
punwl = (opval>>20) & 0x1f
Rn = (opval>>16) & 0xf
CRd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
offset = opval & 0xff
if punwl & 4: # L
iflags = IF_L
else:
iflags = 0
olist = (
ArmCoprocOper(cp_num),
ArmCoprocRegOper(CRd),
ArmImmOffsetOper(Rn, offset*4, va, pubwl=punwl),
)
opcode = (IENC_COPROC_LOAD << 16)
return (opcode, ldc_mnem[punwl&1], olist, iflags)
mcrr_mnem = ("mcrr", "mrrc")
def p_coproc_dbl_reg_xfer(opval, va):
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
opcode = (opval>>4) & 0xf
CRm = opval & 0xf
mnem = mcrr_mnem[(opval>>20) & 1]
olist = (
ArmCoprocOper(cp_num),
ArmCoprocOpcodeOper(opcode),
ArmRegOper(Rd),
ArmRegOper(Rn),
ArmCoprocRegOper(CRm),
)
opcode = IENC_COPROC_RREG_XFER<<16
return (opcode, mnem, olist, 0)
cdp_mnem = ["cdp" for x in range(15)]
cdp_mnem.append("cdp2")
def p_coproc_dp(opval, va):
opcode1 = (opval>>20) & 0xf
CRn = (opval>>16) & 0xf
CRd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
opcode2 = (opval>>5) & 0x7
CRm = opval & 0xf
mnem = cdp_mnem[opval>>28]
olist = (
ArmCoprocOper(cp_num),
ArmCoprocOpcodeOper(opcode1),
ArmCoprocRegOper(CRd),
ArmCoprocRegOper(CRn),
ArmCoprocRegOper(CRm),
ArmCoprocOpcodeOper(opcode2),
)
opcode = (IENC_COPROC_DP << 16)
return (opcode, mnem, olist, 0) #FIXME: CDP2 (cond = 0b1111) also needs handling.
mcr_mnem = ("mcr", "mrc")
def p_coproc_reg_xfer(opval, va):
opcode1 = (opval>>21) & 0x7
load = (opval>>20) & 1
CRn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
opcode2 = (opval>>5) & 0x7
CRm = opval & 0xf
olist = (
ArmCoprocOper(cp_num),
ArmCoprocOpcodeOper(opcode1),
ArmRegOper(Rd),
ArmCoprocRegOper(CRn),
ArmCoprocRegOper(CRm),
ArmCoprocOpcodeOper(opcode2),
)
opcode = (IENC_COPROC_REG_XFER << 16)
return (opcode, mcr_mnem[load], olist, 0)
def p_swint(opval, va):
swint = opval & 0xffffff
olist = ( ArmImmOper(swint), )
opcode = IENC_SWINT << 16 + 1
return (opcode, "swi", olist, 0)
cps_mnem = ("cps","cps FAIL-bad encoding","cpsie","cpsid")
mcrr2_mnem = ("mcrr2", "mrrc2")
ldc2_mnem = ("stc2", "ldc2",)
mcr2_mnem = ("mcr2", "mrc2")
def p_uncond(opval, va):
if opval & 0x0f000000 == 0x0f000000:
# FIXME THIS IS HORKED
opcode = IENC_SWINT << 16 + 2
immval = opval & 0x00ffffff
return (opcode, 'swi', (ArmImmOper(immval),), 0)
optop = ( opval >> 26 ) & 0x3
if optop == 0:
if opval & 0xfff10020 == 0xf1000000:
#cps
imod = (opval>>18)&3
mmod = (opval>>17)&1
aif = (opval>>5)&7
mode = opval&0x1f
mnem = cps_mnem[imod]
if imod & 2:
olist = [
ArmCPSFlagsOper(aif) # if mode is set...
]
else:
olist = []
if mmod:
olist.append(ArmImmOper(mode))
opcode = IENC_UNCOND_CPS + imod
return (opcode, mnem, olist, 0)
elif (opval & 0xffff00f0) == 0xf1010000:
#setend
e = (opval>>9) & 1
mnem = "setend"
olist = ( ArmEndianOper(e), )
opcode = IENC_UNCOND_SETEND
return (opcode, mnem, olist, 0)
else:
raise Exception("p_uncond (ontop=0): invalid instruction: %.8x:\t%.8x"%(va,opval))
elif optop == 1:
if (opval & 0xf570f000) == 0xf550f000:
#cache preload - also known as a nop on most platforms... does nothing except prefetch instructions from cache.
# i'm tempted to cut the parsing of it and just return a canned something.
mnem = "pld"
I = (opval>>25) & 1 # what the freak am i supposed to do with "i"???
Rn = (opval>>16) & 0xf
U = (opval>>23) & 1
opcode = IENC_UNCOND_PLD
if I:
immoffset = opval & 0xfff
olist = (ArmImmOffsetOper(Rn, immoffset, va, U<<3),)
else:
Rm = opval & 0xf
shtype = (opval>>5) & 3
shval = (opval>>7) & 0x1f
olist = (ArmScaledOffsetOper(Rn, Rm, shtype, shval, va, pubwl), )
return (opcode, mnem, olist, 0)
else:
raise Exception("p_uncond (ontop=1): invalid instruction: %.8x:\t%.8x"%(va,opval))
elif optop == 2:
if (opval & 0xfe5f0f00) == 0xf84d0500:
#save return state
pu_w = (opval>>21) & 0xf
mnem = "srs"
flags = ((pu_w<<10) & 0x3000) + IF_DA
mode = opval & 0xf
olist = (
ArmModeOper(mode, pu_w&1),
)
opcode = IENC_UNCOND_SRS
return (opcode, mnem, olist, flags)
elif (opval & 0xfe500f00) == 0xf8100a00:
#rfe
pu = (opval>>23) & 3
mnem = "rfe"
flags = (pu<<12) + IF_DA
Rn = (opval>>16) & 0xf
olist = (
ArmRegOper(Rn),
)
opcode = IENC_UNCOND_RFE
return (opcode, mnem, olist, flags)
elif (opval & 0xfe000000) == 0xfa000000:
#blx
mnem = "blx"
h = (opval>>23) & 2
imm_offset = e_bits.signed(opval, 3) + h
olist = (
ArmOffsetOper(imm_offset, va),
)
opcode = INS_BLX #should this be IENC_UNCOND_BLX?
return (opcode, mnem, olist, 0)
else:
raise Exception("p_uncond (ontop=2): invalid instruction: %.8x:\t%.8x"%(va,opval))
else:
if (opval & 0xffe00000) == 0xfc400000:
#MRCC2/MRRC2
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
opcode = (opval>>4) & 0xf
CRm = opval & 0xf
mnem = mcrr2_mnem[(opval>>20) & 1]
olist = (
ArmCoprocOper(cp_num),
ArmCoprocOpcodeOper(opcode),
ArmRegOper(Rd),
ArmRegOper(Rn),
ArmCoprocRegOper(CRm),
)
opcode = IENC_COPROC_RREG_XFER<<16
return (opcode, mnem, olist, 0)
elif (opval & 0xfe000000) == 0xfc000000:
#stc2/ldc2
punwl = (opval>>20) & 0x1f
Rn = (opval>>16) & 0xf
CRd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
offset = opval & 0xff
if punwl & 4: # L
iflags = IF_L
else:
iflags = 0
olist = (
ArmCoprocOper(cp_num),
ArmCoprocRegOper(CRd),
ArmImmOffsetOper(Rn, offset*4, va, pubwl=punwl),
)
opcode = (IENC_COPROC_LOAD << 16)
return (opcode, ldc2_mnem[punwl&1], olist, iflags)
elif (opval & 0xff000010) == 0xfe000000:
#coproc dp (cdp2)
return p_coproc_dp(opval)
elif (opval & 0xff000010) == 0xfe000010:
#mcr2/mrc2
opcode1 = (opval>>21) & 0x7
load = (opval>>20) & 1
CRn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
opcode2 = (opval>>5) & 0x7
CRm = opval & 0xf
olist = (
ArmCoprocOper(cp_num),
ArmCoprocOpcodeOper(opcode1),
ArmRegOper(Rd),
ArmCoprocRegOper(CRn),
ArmCoprocRegOper(CRm),
ArmCoprocOpcodeOper(opcode2),
)
opcode = (IENC_COPROC_REG_XFER << 16)
return (opcode, mcr2_mnem[load], olist, 0)
else:
raise Exception("p_uncond (ontop=3): invalid instruction: %.8x:\t%.8x"%(va,opval))
####################################################################
# Table of the parser functions
ienc_parsers_tmp = [None for x in range(21)]
ienc_parsers_tmp[IENC_DP_IMM_SHIFT] = p_dp_imm_shift
ienc_parsers_tmp[IENC_MISC] = p_misc
ienc_parsers_tmp[IENC_MISC1] = p_misc1
ienc_parsers_tmp[IENC_EXTRA_LOAD] = p_extra_load_store
ienc_parsers_tmp[IENC_DP_REG_SHIFT] = p_dp_reg_shift
ienc_parsers_tmp[IENC_MULT] = p_mult
ienc_parsers_tmp[IENC_UNDEF] = p_undef
ienc_parsers_tmp[IENC_MOV_IMM_STAT] = p_mov_imm_stat
ienc_parsers_tmp[IENC_DP_IMM] = p_dp_imm
ienc_parsers_tmp[IENC_LOAD_IMM_OFF] = p_load_imm_off
ienc_parsers_tmp[IENC_LOAD_REG_OFF] = p_load_reg_off
ienc_parsers_tmp[IENC_ARCH_UNDEF] = p_arch_undef
ienc_parsers_tmp[IENC_MEDIA] = p_media
ienc_parsers_tmp[IENC_LOAD_MULT] = p_load_mult
ienc_parsers_tmp[IENC_BRANCH] = p_branch
ienc_parsers_tmp[IENC_COPROC_RREG_XFER] = p_coproc_dbl_reg_xfer
ienc_parsers_tmp[IENC_COPROC_LOAD] = p_coproc_load
ienc_parsers_tmp[IENC_COPROC_DP] = p_coproc_dp
ienc_parsers_tmp[IENC_COPROC_REG_XFER] = p_coproc_reg_xfer
ienc_parsers_tmp[IENC_SWINT] = p_swint
ienc_parsers_tmp[IENC_UNCOND] = p_uncond
ienc_parsers = tuple(ienc_parsers_tmp)
####################################################################
# the primary table is index'd by the 3 bits following the
# conditional and are structured as follows:
# ( ENC, nexttable )
# If ENC != None, those 3 bits were enough for us to know the
# encoding type, otherwise move on to the second table.
# The secondary tables have the format:
# (mask, value, ENC). If the opcode is masked with "mask"
# resulting in "value" we have found the instruction encoding.
# NOTE: All entries in these tables *must* be from most specific
# to least!
# Table for initial 3 bit == 0
s_0_table = (
# Order is critical here...
(binary("00000000000000000000000000010000"), binary("00000000000000000000000000000000"), IENC_DP_IMM_SHIFT),
(binary("00000001100100000000000000010000"), binary("00000001000000000000000000000000"), IENC_MISC),
(binary("00000001100100000000000010010000"), binary("00000001000000000000000000010000"), IENC_MISC1),
(binary("00000001000000000000000011110000"), binary("00000000000000000000000010010000"), IENC_MULT),
(binary("00000001001000000000000010010000"), binary("00000001001000000000000010010000"), IENC_EXTRA_LOAD),
(binary("00000000000000000000000010010000"), binary("00000000000000000000000010010000"), IENC_EXTRA_LOAD),
(binary("00000000000000000000000010010000"), binary("00000000000000000000000000010000"), IENC_DP_REG_SHIFT),
(0,0, IENC_UNDEF), #catch-all
)
s_1_table = (
(binary("00000001100110000000000000000000"), binary("00000001000000000000000000000000"), IENC_UNDEF),
(binary("00000001100110000000000000000000"), binary("00000001001000000000000000000000"), IENC_MOV_IMM_STAT),
(0,0, IENC_DP_IMM),
)
s_3_table = (
(binary("00000001111100000000000011110000"),binary("00000001111100000000000011110000"), IENC_ARCH_UNDEF),
(binary("00000000000000000000000000010000"),binary("00000000000000000000000000010000"), IENC_MEDIA),
(0,0, IENC_LOAD_REG_OFF),
)
s_6_table = (
(binary("00001111111000000000000000000000"),binary("00001100010000000000000000000000"), IENC_COPROC_RREG_XFER),
(binary("00001110000000000000000000000000"),binary("00001100000000000000000000000000"), IENC_COPROC_LOAD),
)
s_7_table = (
(binary("00000001000000000000000000000000"),binary("00000001000000000000000000000000"), IENC_SWINT),
(binary("00000001000000000000000000010000"),binary("00000000000000000000000000010000"), IENC_COPROC_REG_XFER),
(0, 0, IENC_COPROC_DP),
)
# Initial 3 (non conditional) primary table
inittable = [
(None, s_0_table),
(None, s_1_table),
(IENC_LOAD_IMM_OFF, None), # Load or store an immediate
(None, s_3_table),
(IENC_LOAD_MULT, None),
(IENC_BRANCH, None),
(None, s_6_table),
(None, s_7_table),
(IENC_UNCOND, None),
]
# FIXME for emulation...
#def s_lsl(val, shval):
#pass
#def s_lsr(val, shval):
#pass
# These are indexed by the 2 bit "shift" value in some DP encodings
#shift_handlers = (
#s_lsl,
#s_lsr,
#s_asr,
#s_ror,
#)
endian_names = ("le","be")
#FIXME IF_NOFALL (and other envi flags)
class ArmOpcode(envi.Opcode):
def __hash__(self):
return int(hash(self.mnem) ^ (self.size << 4))
def __len__(self):
return int(self.size)
def getBranches(self, emu=None):
"""
Return a list of tuples. Each tuple contains the target VA of the
branch, and a possible set of flags showing what type of branch it is.
See the BR_FOO types for all the supported envi branch flags....
Example: for bva,bflags in op.getBranches():
"""
ret = []
if not self.iflags & envi.IF_NOFALL:
ret.append((self.va + self.size, envi.BR_FALL))
# FIXME if this is a move to PC god help us...
flags = 0
if self.prefixes != COND_AL:
flags |= envi.BR_COND
if self.opcode == INS_B:
oper = self.opers[0]
ret.append((oper.getOperValue(self), flags))
elif self.opcode == INS_BL:
oper = self.opers[0]
ret.append((oper.getOperValue(self), flags | envi.BR_PROC))
return ret
def render(self, mcanv):
"""
Render this opcode to the specified memory canvas
"""
mnem = self.mnem + cond_codes.get(self.prefixes)
mcanv.addNameText(mnem, typename="mnemonic")
mcanv.addText(" ")
# Allow each of our operands to render
imax = len(self.opers)
lasti = imax - 1
for i in xrange(imax):
oper = self.opers[i]
oper.render(mcanv, self, i)
if i != lasti:
mcanv.addText(",")
#mcanv.addText('; %s' % repr(self))
def __repr__(self):
mnem = self.mnem + cond_codes.get(self.prefixes)
# FIXME put in S flag! -- scratch that... optimize and preload a list of these combos!
# FIXME actually all these are broke... (iflags)
# FIXME handle these in parsing too!
daib_flags = self.iflags & IF_DAIB_MASK
if self.iflags & IF_L:
mnem += 'l'
elif self.iflags & IF_PSR_S:
mnem += 's'
elif daib_flags > 0:
idx = ((daib_flags)>>24)
mnem += daib[idx]
else:
if self.iflags & IF_S:
mnem += 's'
if self.iflags & IF_B:
mnem += 'b'
if self.iflags & IF_H:
mnem += 'h'
elif self.iflags & IF_T:
mnem += 't'
x = []
for o in self.opers:
x.append(o.repr(self))
return mnem + " " + ", ".join(x)
class ArmRegOper(envi.Operand):
def __init__(self, reg, oflags=0):
self.reg = reg
self.oflags = oflags
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.reg != oper.reg:
return False
if self.oflags != oper.oflags:
return False
return True
def involvesPC(self):
return self.reg == 15
def getOperValue(self, op, emu=None):
if emu == None:
return None
return emu.getRegister(self.reg)
def setOperValue(self, op, emu=None, val=None):
if emu == None:
return None
emu.setRegister(self.reg, val)
def render(self, mcanv, op, idx):
rname = arm_regs[self.reg][0]
if self.oflags & OF_W:
rname += "!"
mcanv.addNameText(rname, typename='registers')
def repr(self, op):
rname = arm_regs[self.reg][0]
if self.oflags & OF_W:
rname += "!"
return rname
class ArmRegShiftRegOper(envi.Operand):
def __init__(self, reg, shtype, shreg):
self.reg = reg
self.shtype = shtype
self.shreg = shreg
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.reg != oper.reg:
return False
if self.shtype != oper.shtype:
return False
if self.shreg != oper.shreg:
return False
return True
def involvesPC(self):
return self.reg == 15
def getOperValue(self, op, emu=None):
if emu == None:
return None
return shifters[self.shtype](emu.getRegister(self.reg), emu.getRegister(shreg))
def render(self, mcanv, op, idx):
rname = arm_regs[self.reg][0]
mcanv.addNameText(rname, typename='registers')
mcanv.addText(', ')
mcanv.addNameText(shift_names[self.shtype])
mcanv.addText(' ')
mcanv.addNameText(arm_regs[self.shreg][0], typename='registers')
def repr(self, op):
rname = arm_regs[self.reg][0]+","
return " ".join([rname, shift_names[self.shtype], arm_regs[self.shreg][0]])
class ArmRegShiftImmOper(envi.Operand):
def __init__(self, reg, shtype, shimm):
if shimm == 0:
if shtype == S_ROR:
shtype = S_RRX
elif shtype == S_LSR or shtype == S_ASR:
shimm = 32
self.reg = reg
self.shtype = shtype
self.shimm = shimm
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.reg != oper.reg:
return False
if self.shtype != oper.shtype:
return False
if self.shimm != oper.shimm:
return False
return True
def involvesPC(self):
return self.reg == 15
def getOperValue(self, op, emu=None):
if emu == None:
return None
return shifters[self.shtype](emu.getRegister(self.reg), self.shimm)
def render(self, mcanv, op, idx):
rname = arm_regs[self.reg][0]
mcanv.addNameText(rname, typename='registers')
if self.shimm != 0:
mcanv.addText(', ')
mcanv.addNameText(shift_names[self.shtype])
mcanv.addText(' ')
mcanv.addNameText('#%d' % self.shimm)
elif self.shtype == S_RRX:
mcanv.addText(', ')
mcanv.addNameText(shift_names[self.shtype])
def repr(self, op):
rname = arm_regs[self.reg][0]
retval = [ rname ]
if self.shimm != 0:
retval.append(", "+shift_names[self.shtype])
retval.append("#%d"%self.shimm)
elif self.shtype == S_RRX:
retval.append(shift_names[self.shtype])
return " ".join(retval)
class ArmImmOper(envi.Operand):
def __init__(self, val, shval=0, shtype=S_ROR):
self.val = val
self.shval = shval
self.shtype = shtype
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.getOperValue(None) != oper.getOperValue(None):
return False
return True
def involvesPC(self):
return False
def getOperValue(self, op, emu=None):
return shifters[self.shtype](self.val, self.shval)
def render(self, mcanv, op, idx):
if self.shval != 0:
mcanv.addNameText('#0x%.2x,%d' % (self.val, self.shval))
else:
mcanv.addNameText('#0x%.2x' % (self.val))
def repr(self, op):
if self.shval != 0:
return '#0x%.2x,%d' % (self.val, self.shval)
else:
return '#0x%.2x' % (self.val)
class ArmScaledOffsetOper(envi.Operand):
def __init__(self, base_reg, offset_reg, shtype, shval, va, pubwl=0):
if shval == 0:
if shtype == S_ROR:
shtype = S_RRX
elif shtype == S_LSR or shtype == S_ASR:
shval = 32
self.base_reg = base_reg
self.offset_reg = offset_reg
self.shtype = shtype
self.shval = shval
self.pubwl = pubwl
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.base_reg != oper.base_reg:
return False
if self.offset_reg != oper.offset_reg:
return False
if self.shtype != oper.shtype:
return False
if self.shval != oper.shval:
return False
if self.pubwl != oper.pubwl:
return False
return True
def involvesPC(self):
return self.base_reg == 15
def getOperValue(self, op, emu=None):
if emu == None:
return None
raise Exception("FIXME: Implement ArmScaledOffsetOper.getOperValue()")
return None # FIXME
def render(self, mcanv, op, idx):
pom = ('-','')[(self.pubwl>>4)&1]
idxing = self.pubwl & 0x12
basereg = arm_regs[self.base_reg][0]
offreg = arm_regs[self.offset_reg][0]
shname = shift_names[self.shtype]
mcanv.addText('[')
mcanv.addNameText(basereg, typename='registers')
if (idxing&0x10) == 0:
mcanv.addText('], ')
else:
mcanv.addText(', ')
mcanv.addText(pom)
mcanv.addNameText(offreg, typename='registers')
mcanv.addText(' ')
if self.shval != 0:
mcanv.addNameText(shname)
mcanv.addText(' ')
mcanv.addNameText('#%d' % self.shval)
if idxing == 0x10:
mcanv.addText(']')
elif idxing != 0:
mcanv.addText(']!')
def repr(self, op):
pom = ('-','')[(self.pubwl>>4)&1]
idxing = self.pubwl & 0x12
basereg = arm_regs[self.base_reg][0]
offreg = arm_regs[self.offset_reg][0]
shname = shift_names[self.shtype]
if self.shval != 0:
shval = "%s #%d"%(shname,self.shval)
elif self.shtype == S_RRX:
shval = shname
else:
shval = ""
if (idxing&0x10) == 0: # post-indexed
tname = '[%s], %s%s %s' % (basereg, pom, offreg, shval)
elif idxing == 0x10:
tname = '[%s, %s%s %s]' % (basereg, pom, offreg, shval)
else: # pre-indexed
tname = '[%s, %s%s %s]!' % (basereg, pom, offreg, shval)
return tname
class ArmRegOffsetOper(envi.Operand):
def __init__(self, base_reg, offset_reg, va, pubwl=0):
self.base_reg = base_reg
self.offset_reg = offset_reg
self.pubwl = pubwl
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.base_reg != oper.base_reg:
return False
if self.offset_reg != oper.offset_reg:
return False
if self.pubwl != oper.pubwl:
return False
return True
def involvesPC(self):
return self.base_reg == 15
def getOperValue(self, op, emu=None):
if emu == None:
return None
raise Exception("FIXME: Implement ArmRegOffsetOper.getOperValue()")
def render(self, mcanv, op, idx):
pom = ('-','')[(self.pubwl>>4)&1]
idxing = self.pubwl & 0x12
basereg = arm_regs[self.base_reg][0]
offreg = arm_regs[self.offset_reg][0]
mcanv.addText('[')
mcanv.addNameText(basereg, typename='registers')
if (idxing&0x10) == 0:
mcanv.addText('] ')
else:
mcanv.addText(', ')
mcanv.addText(pom)
mcanv.addNameText(offreg, typename='registers')
if idxing == 0x10:
mcanv.addText(']')
elif idxing != 0:
mcanv.addText(']!')
def repr(self, op):
pom = ('-','')[(self.pubwl>>4)&1]
idxing = self.pubwl & 0x12
basereg = arm_regs[self.base_reg][0]
offreg = arm_regs[self.offset_reg][0]
if (idxing&0x10) == 0: # post-indexed
tname = '[%s], %s%s' % (basereg, pom, offreg)
elif idxing == 0x10: # offset addressing, not updated
tname = '[%s, %s%s]' % (basereg, pom, offreg)
else: # pre-indexed
tname = '[%s, %s%s]!' % (basereg, pom, offreg)
return tname
class ArmImmOffsetOper(envi.Operand):
def __init__(self, base_reg, offset, va, pubwl=8):
self.base_reg = base_reg
self.offset = offset
self.pubwl = pubwl
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.base_reg != oper.base_reg:
return False
if self.offset != oper.offset:
return False
if self.pubwl != oper.pubwl:
return False
return True
def involvesPC(self):
return self.base_reg == 15
def getOperValue(self, op, emu=None):
if emu == None:
return None
pubwl = self.pubwl >> 1
w = pubwl & 1
pubwl >>=1
b = pubwl & 1
pubwl >>=1
u = pubwl & 1
pubwl >>=1
p = pubwl
addr = emu.getRegister(self.basereg)
if u:
addr += self.offset
else:
addr -= self.offset
fmt = ("B", "<L")[b]
ret = emu.readMemoryFormat(addr, fmt)
return ret
def render(self, mcanv, op, idx):
pom = ('-','')[(self.pubwl>>4)&1]
idxing = self.pubwl & 0x12
basereg = arm_regs[self.base_reg][0]
mcanv.addText('[')
mcanv.addNameText(basereg, typename='registers')
if self.offset == 0:
mcanv.addText(']')
else:
if (idxing&0x10) == 0:
mcanv.addText('] ')
else:
mcanv.addText(', ')
mcanv.addNameText('#%s0x%x' % (pom,self.offset))
if idxing == 0x10:
mcanv.addText(']')
elif idxing != 0:
mcanv.addText(']!')
def repr(self, op):
pom = ('-','')[(self.pubwl>>4)&1]
idxing = (self.pubwl) & 0x12
basereg = arm_regs[self.base_reg][0]
if self.offset != 0:
offset = ", #%s0x%x"%(pom,self.offset)
else:
offset = ""
if (idxing&0x10) == 0: # post-indexed
tname = '[%s]%s' % (basereg, offset)
else:
if idxing == 0x10: # offset addressing, not updated
tname = '[%s%s]' % (basereg,offset)
else: # pre-indexed
tname = '[%s%s]!' % (basereg,offset)
return tname
class ArmOffsetOper(envi.Operand): # ArmImmOper but for Branches
def __init__(self, val, va):
self.val = val # depending on mode, this is reg/imm
self.va = va
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
if self.va != oper.va:
return False
return True
def involvesPC(self):
return True
def getOperValue(self, op, emu=None):
return self.va + self.val + op.size + 4 # FIXME WTF?
def render(self, mcanv, op, idx):
value = self.getOperValue(op)
if mcanv.mem.isValidPointer(value):
name = addrToName(mcanv, value)
mcanv.addVaText(name, value)
else:
mcanv.addVaText('0x%.8x' % value, value)
def repr(self, op):
targ = self.getOperValue(op)
tname = "#0x%.8x" % targ
return tname
psrs = ("CPSR", "SPSR")
class ArmPgmStatRegOper(envi.Operand):
def __init__(self, val):
self.val = val
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
return True
def involvesPC(self):
return False
def getOperValue(self, op, emu=None):
if emu == None:
return None
raise Exception("FIXME: Implement ArmPgmStatRegOper.getOperValue()")
return None # FIXME
def repr(self, op):
return psrs[self.val]
class ArmPgmStatFlagsOper(envi.Operand):
# FIXED: visi: sorry, i accidentally overrode the previous class to have two meanings
def __init__(self, val):
self.val = val
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
return True
def involvesPC(self):
return False
def getOperValue(self, op, emu=None):
if emu == None:
return None
raise Exception("FIXME: Implement ArmPgmStatRegOper.getOperValue()")
return None # FIXME
def repr(self, op):
s = ["PSR_",psr_fields[self.val]]
return "".join(s)
class ArmEndianOper(ArmImmOper):
def repr(self, op):
return endian_names[self.val]
def involvesPC(self):
return False
def getOperValue(self, op, emu=None):
return self.val
class ArmRegListOper(envi.Operand):
def __init__(self, val, oflags=0):
self.val = val
self.oflags = oflags
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
if self.oflags != oper.oflags:
return False
return True
def involvesPC(self):
return self.val & 0x80 == 0x80
def render(self, mcanv, op, idx):
mcanv.addText('{')
for l in xrange(16):
if self.val & 1<<l:
mcanv.addNameText(arm_regs[l][0], typename='registers')
mcanv.addText(', ')
mcanv.addText('}')
if self.oflags & OF_UM:
mcanv.addText('^')
def getOperValue(self, op, emu=None):
if emu == None:
return None
reglist = []
for regidx in xrange(16):
#FIXME: check processor mode (abort, system, user, etc... use banked registers?)
if self.val & (1<<regidx):
reg = emu.getRegister(regidx)
reglist.append(reg)
return reglist
def repr(self, op):
s = [ "{" ]
for l in xrange(16):
if (self.val & (1<<l)):
s.append(arm_regs[l][0])
s.append('}')
if self.oflags & OF_UM:
s.append('^')
return " ".join(s)
aif_flags = (None, 'f','i','if','a','af','ai','aif')
class ArmPSRFlagsOper(envi.Operand):
def __init__(self, flags):
self.flags = flags
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.flags != oper.flags:
return False
return True
def involvesPC(self):
return False
def getOperValue(self, op, emu=None):
if emu == None:
return None
raise Exception("FIXME: Implement ArmPSRFlagsOper.getOperValue() (does it want to be a bitmask? or the actual value according to the PSR?)")
return None # FIXME
def repr(self, op):
return aif_flags[self.flags]
class ArmCoprocOpcodeOper(envi.Operand):
def __init__(self, val):
self.val = val
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
return True
def involvesPC(self):
return False
def getOperValue(self, op, emu=None):
return self.val
def repr(self, op):
return "%d"%self.val
class ArmCoprocOper(envi.Operand):
def __init__(self, val):
self.val = val
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
return True
def involvesPC(self):
return False
def getOperValue(self, op, emu=None):
return self.val
def repr(self, op):
return "p%d"%self.val
class ArmCoprocRegOper(envi.Operand):
def __init__(self, val, shtype=None, shval=None):
self.val = val # depending on mode, this is reg/imm
self.shval = shval
self.shtype = shtype
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
if self.shval != oper.shval:
return False
if self.shtype != oper.shtype:
return False
return True
def involvesPC(self):
return False
def getOperValue(self, op, emu=None):
if emu == None:
return None
raise Exception("FIXME: Implement ArmCoprocRegOper.getOperValue()")
return None # FIXME
def repr(self, op):
return "c%d"%self.val
class ArmStdDisasm:
def disasm(self, bytes, offset, va, trackMode=False):
"""
Parse a sequence of bytes out into an envi.Opcode instance.
"""
opbytes = bytes[offset:offset+4]
opval, = struct.unpack("<L", opbytes)
cond = opval >> 28
# Begin the table lookup sequence with the first 3 non-cond bits
encfam = (opval >> 25) & 0x7
if cond == COND_EXTENDED:
enc = IENC_UNCOND
else:
enc,nexttab = inittable[encfam]
if nexttab != None: # we have to sub-parse...
for mask,val,penc in nexttab:
if (opval & mask) == val:
enc = penc
break
# If we don't know the encoding by here, we never will ;)
if enc == None:
raise InvalidInstruction(bytes[:4])
opcode, mnem, olist, flags = ienc_parsers[enc](opval, va)
# Ok... if we're a non-conditional branch, *or* we manipulate PC unconditionally,
# lets call ourself envi.IF_NOFALL
if cond == COND_AL:
if opcode in (INS_B, INS_BX):
flags |= envi.IF_NOFALL
elif ( len(olist) and
isinstance(olist[0], ArmRegOper) and
olist[0].involvesPC() ):
showop = True
flags |= envi.IF_NOFALL
# FIXME conditionals are currently plumbed as "prefixes". Perhaps normalize to that...
op = ArmOpcode(va, opcode, mnem, cond, 4, olist, flags)
op.encoder = enc #FIXME: DEBUG CODE
return op
def checkSetMode(self, op): # FIXME: i'm forked. bx references a register... emulation required unless we want to trust it to always mean a jump to thumb...
# CHANGE TO THUMB MODE FROM ARM
# if bx or blx and target is odd
# if dst is r15 and target is odd
# if set T bit in SPSR then reload CPSR from SPSR (sh*t, emulation here we come)
olist = op.opers
if op.opcode == INS_BX:
self.parent.setMode( olist[0]&1 ) # fornicated because we don't know what the register value is
self.parent.setMode( 1 ) # FIXME: HACK! assumes always a call to bx
elif ( len(olist) > 1 and
isinstance(olist[0], ArmRegOper) and
olist[0].reg == REG_PC ):
mode = olist[1].getOperValue(op)
if mode != None:
self.parent.setMode( mode&1 )
# CHANGE TO JAZELLE MODE
# if bxj is called and jazelle is available and enabled.... sheesh this is gonna get complex
elif op.opcode == INS_BXJ:
if self.parent.jzl_enabled:
self.parent.setMode( 2 )
else:
pass # how do we change PC from here? bxj changes to jazelle mode,
# but if it's unavailable, the operand is the Arm/Thumb "handler"
|
joxeankoret/nightmare
|
lib/interfaces/envi/archs/arm/armdisasm.py
|
Python
|
gpl-2.0
| 61,423
|
[
"xTB"
] |
3b9b43b49b347051ad20e6ef955b5f6c203e31e44bdaf2255bc2027c331cee52
|
from PyQt4 import Qt
import sys
import matplotlib.pyplot as plt
import numpy as np
from collections import OrderedDict
import sys
import csv
class Window(Qt.QWidget):
def __init__(self):
Qt.QWidget.__init__(self)
global global_results
from OneStopTrack import global_results
self.cmBox1 = Qt.QComboBox()
for key in global_results.keys():
self.cmBox1.addItem(str(key))
lbl1 = Qt.QLabel("Arena width (cm):")
self.lneEdt1 = Qt.QLineEdit()
lbl1.setBuddy(self.lneEdt1)
lbl2 = Qt.QLabel("Arena height (cm):")
self.lneEdt2 = Qt.QLineEdit()
lbl2.setBuddy(self.lneEdt2)
lbl3 = Qt.QLabel("Number of vertical lines dividing arena:")
self.spnBox1 = Qt.QSpinBox()
lbl3.setBuddy(self.spnBox1)
lbl4 = Qt.QLabel("Number of horizontal lines dividing arena:")
self.spnBox2 = Qt.QSpinBox()
lbl4.setBuddy(self.spnBox2)
lbl5 = Qt.QLabel("Interpolation Method:")
self.cmBox2 = Qt.QComboBox()
self.cmBox2.addItems(["none", "nearest", "bilinear", "bicubic", "spline16",
"spline36", "hanning", "hamming", "hermite", "kaiser",
"quadric", "catrom", "gaussian", "bessel", "mitchell",
"sinc", "lanczos"])
lbl5.setBuddy(self.cmBox2)
self.pshBtn1 = Qt.QPushButton(self.tr('Draw Grid Time Heatmap'))
self.pshBtn1.clicked.connect(self.draw_heatmap)
spacer1 = Qt.QLabel()
spacer2 = Qt.QLabel()
spacer3 = Qt.QLabel()
layout1 = Qt.QFormLayout()
layout1.addRow(lbl1, self.lneEdt1)
layout1.addRow(lbl2, self.lneEdt2)
layout1.addRow(lbl3, self.spnBox1)
layout1.addRow(lbl4, self.spnBox2)
layout2 = Qt.QFormLayout()
layout2.addRow(lbl5, self.cmBox2)
layout3 = Qt.QVBoxLayout(self)
layout3.addWidget(self.cmBox1)
layout3.addWidget(spacer1)
layout3.addLayout(layout1)
layout3.addWidget(spacer2)
layout3.addLayout(layout2)
layout3.addWidget(spacer3)
layout3.addWidget(self.pshBtn1)
self.setWindowTitle(self.tr("Animal Movement Heatmap"))
def calc_grid(self):
self.arena_width = float(self.lneEdt1.text())
self.arena_height = float(self.lneEdt2.text())
self.num_width_divs = int(self.spnBox1.text())+1
self.num_height_divs = int(self.spnBox2.text())+1
self.width_div = self.arena_width/self.num_width_divs
self.height_div = self.arena_height/self.num_height_divs
time = [float(tim) for tim in global_results[str(self.cmBox1.currentText())]["results"]['vid_pts_time']]
try:
pos = [eval(posi) for posi in global_results[str(self.cmBox1.currentText())]["results"]['position']]
except:
pos = global_results[str(self.cmBox1.currentText())]["results"]['position']
section = [(x,y) for y in range(self.num_height_divs)
for x in range(self.num_width_divs)]
grid = OrderedDict()
for gridy in section:
grid[gridy] = [(gridy[0]*self.width_div, gridy[1]*self.height_div),
((gridy[0]+1)*self.width_div, (gridy[1]+1)*self.height_div), 0]
time_ints = [(time[i+1]-time[i]) for i in range(len(time)-1)]
for tim, pos in zip(time_ints, pos):
for key in grid:
if (grid[key][0][0] <= pos[0] < grid[key][1][0]) and (grid[key][0][1] <= pos[1] <= grid[key][1][1]):
grid[key][2] += tim
for key in grid.keys():
grid[key][2] = round(grid[key][2],2)
return grid
def draw_heatmap(self):
interpolation_method = self.cmBox2.currentText()
grid = self.calc_grid()
x = []
y = []
times = []
for key in grid.keys():
x.append(((grid[key][0][0]+grid[key][1][0])/2))
y.append(((grid[key][0][1]+grid[key][1][1])/2))
times.append(grid[key][2])
xedges = list(np.arange(0, (self.arena_width+self.width_div),self.width_div))
yedges = list(np.arange(0, (self.arena_height+self.height_div),self.height_div))
# not sure why y and x needed to be swapped but they did for it to work :\
h, _, _, _ = plt.hist2d(y, x, bins=[yedges, xedges], weights=times)
plt.clf()
plt.close()
fig = plt.gcf()
fig.canvas.set_window_title("Trial %s Heatmap" %(self.cmBox1.currentText()))
plt.imshow(h,interpolation=interpolation_method, aspect="auto", origin="lower", extent=[0,self.arena_width,0,self.arena_height])
plt.gca().xaxis.tick_top()
plt.gca().invert_yaxis()
plt.gca().set_aspect("equal")
plt.xlabel("Arena Width (cm)")
plt.ylabel("Arena Height (cm)")
plt.colorbar(label="Time spent in grid (seconds)")
plt.show()
# main ================================================
def main():
app = Qt.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
mzie/RATRACTION
|
animal_movement_heatmap.py
|
Python
|
gpl-3.0
| 5,243
|
[
"Gaussian"
] |
d37587cf3d84c3c3a6fa747e247b35118c1d535d7b01804d442e75dcf545b50e
|
from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
# In this test, we check and make sure Gam can run without predictor columns as long as gam column is specified
def test_gam_null_predictors():
print("Checking null predictor run for binomial")
h2o_data = h2o.import_file(pyunit_utils.locate("smalldata/gam_test/gamBinomial1Col.csv"))
buildModelMetricsCheck(h2o_data, 'binomial')
print("Checking null predictor for gaussian")
h2o_data = h2o.import_file(
path=pyunit_utils.locate("smalldata/gam_test/gamGaussian1Col.csv"))
buildModelMetricsCheck(h2o_data, 'gaussian')
print("Checking null predictor for multinomial")
h2o_data = h2o.import_file(
path=pyunit_utils.locate("smalldata/gam_test/gamMultinomial1Col.csv"))
buildModelMetricsCheck(h2o_data, 'multinomial')
print("gam modelmetrics test completed successfully")
def buildModelMetricsCheck(train_data, family):
x = []
y = "response"
if not(family == 'gaussian'):
train_data[y] = train_data[y].asfactor()
frames = train_data.split_frame(ratios=[0.9], seed=12345)
h2o_model = H2OGeneralizedAdditiveEstimator(family=family, gam_columns=["C1"])
h2o_model.train(x=x, y=y, training_frame=frames[0], validation_frame=frames[1])
h2o_model2 = H2OGeneralizedAdditiveEstimator(family=family, gam_columns=["C1"])
h2o_model2.train(y=y, training_frame=frames[0], validation_frame=frames[1])
# check and make sure coefficient does not contain predictor column
coeffNames = h2o_model.coef().keys()
assert not "C1" in coeffNames, "Not expecting C1 to be a coefficient but it is."
# check and make sure both model produce the same metrics
if family=='gaussian':
assert h2o_model.mse() == h2o_model2.mse(), "Expected model MSE: {0}, Actual: {1}".format(h2o_model.mse(),
h2o_model2.mse())
else:
assert h2o_model.logloss() == h2o_model2.logloss(), "Expected model logloss: {0}, Actual: " \
"{1}".format(h2o_model.logloss(), h2o_model2.logloss())
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gam_null_predictors)
else:
test_gam_null_predictors()
|
h2oai/h2o-3
|
h2o-py/tests/testdir_algos/gam/pyunit_PUBDEV_7931_gam_null_predictors.py
|
Python
|
apache-2.0
| 2,490
|
[
"Gaussian"
] |
cfad1eb486ce22ec5e47a65a8410423fa7014c66c152549181e815a4f839e431
|
#!/usr/bin/env python
# encoding: utf-8
"""
Analyze multiple periods and starspot evolution
"""
from __future__ import print_function, division, absolute_import
import datetime
import os
import sys
from scipy.ndimage import gaussian_filter
from scipy import signal
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import h5py
from . import eclipsing_binary, config, utils
def find_subpeak_range(peak_freqs, peak_heights, height_ratio):
"""
Find the highest subpeak and range of significant subpeak frequencies.
Parameters
----------
peak_freqs : numpy.array_like
Frequencies of subpeaks
peak_heights : numpy.array_like
Heights of subpeaks
height_ratio : float
Significant peaks have `peak_heights` > `height_ratio` * highest peak
Returns
-------
freq_range, height_range : None, numpy.ndarray, or 2-tuple of numpy.ndarray
The frequencies and heights of the subpeaks.
None if no subpeaks are found,
2-tuple of length 1 array if one subpeak is found,
3-tuple of length 1 arrays if two subpeaks are found.
"""
if len(peak_freqs) == 0:
return None, None
# Sort in descending order of peak height
height_sort = np.argsort(peak_heights)[::-1]
peak_heights_sorted = peak_heights[height_sort]
peak_freqs_sorted = peak_freqs[height_sort]
sig_indices = peak_heights_sorted > height_ratio * peak_heights_sorted[0]
if np.sum(sig_indices) <= 1:
height_range = (peak_heights_sorted[sig_indices],
peak_heights_sorted[sig_indices])
freq_range = (peak_freqs_sorted[sig_indices],
peak_freqs_sorted[sig_indices])
else:
sig_heights = peak_heights_sorted[sig_indices]
sig_freqs = peak_freqs_sorted[sig_indices]
subpeak_index_1 = np.argmin(sig_freqs)
subpeak_index_2 = np.argmax(sig_freqs)
height_range = (sig_heights[0],
sig_heights[subpeak_index_1],
sig_heights[subpeak_index_2])
freq_range = (sig_freqs[0],
sig_freqs[subpeak_index_1],
sig_freqs[subpeak_index_2])
return freq_range, height_range
def detect_multiple_periods(results_file, pgram_file, kernel=30,
height_ratio=0.3, fix=False,
restrict_file='restrict_freqs.csv',
plot_all=False, plot_example=False):
"""
Look for multiple periods and constrain differential rotation.
Parameters
----------
results_file : str
Name of the CSV file containing results.
pgram_file : str
Name of the HDF5 file containing the periodograms
kernel : float, optional
Standard deviation of Gaussian smoothing kernel
height_ratio : float, optional
Significant peaks have `peak_heights` > `height_ratio` * highest peak
fix : bool, optional
Set to True to only run on EBs that need fixing.
restrict_file : str, optional
CSV file containing KIC numbers and period range restrictions.
plot_all : bool, optional
Set to True to save plots of periodogram and peaks as PNGs.
plot_example : bool, optional
Set to True to plot results for a single example.
"""
# Load inspection data
df = pd.read_csv('{}/{}'.format(config.repo_data_dir, results_file))
# Load periodograms
h5 = h5py.File('{}/{}'.format(config.data_dir, pgram_file), 'r')
# Only analyze likely starspot EBs
sp_mask = df['class'].values == 'sp'
if fix:
sp_mask &= df['p_acf_ok'] == 'f'
kics = df['KIC'][sp_mask].values
p_rot = df['p_acf'][sp_mask].values
# -1 filled arrays to hold results
# First column for highest peak, next two columns for subpeaks
peak_freqs_1 = np.zeros((len(kics), 3), dtype=np.float64) - 1.
peak_heights_1 = np.zeros_like(peak_freqs_1, dtype=np.float64) - 1.
peak_freqs_2 = np.zeros_like(peak_freqs_1, dtype=np.float64) - 1.
peak_heights_2 = np.zeros_like(peak_freqs_1, dtype=np.float64) - 1.
total_systems = len(kics)
print('Finding peaks in {} periodograms...'.format(total_systems))
if plot_all:
if not os.path.exists('{}/png'.format(config.data_dir)):
os.mkdir('{}/png'.format(config.data_dir))
if plot_example:
save_results = False
# HACK: Set first rotation period to rotation period of example
p_rot[0] = p_rot[np.where(kics == 4751083)[0][0]]
kics = kics[kics == 4751083]
else:
save_results = True
df_rf = pd.read_csv('{}/{}'.format(config.repo_data_dir, restrict_file))
for ii in range(len(kics)):
# Compute periodogram on frequency grid
freqs = h5['{}/freqs'.format(kics[ii])][:]
powers = h5['{}/powers'.format(kics[ii])][:]
# Only look for peaks around p_rot from ACF
if p_rot[ii] > 45:
# Expand search area
f_min = 1 / 200
else:
f_min = 1 / 45
if kics[ii] in df_rf['KIC'].values:
mask = df_rf['KIC'].values == kics[ii]
f_min = 1 / df_rf['pmax'].values[mask]
f_max = 1 / df_rf['pmin'].values[mask]
else:
f_max = 1.5 / p_rot[ii]
search_area = (freqs > f_min) & (freqs < f_max)
search_freqs = freqs[search_area]
search_powers = powers[search_area]
# Smooth periodogram
smoothed_power = gaussian_filter(search_powers, sigma=kernel)
# Detect peaks and minima in smoothed periodogram
relative_maxes = signal.argrelmax(smoothed_power)[0]
relative_mins = signal.argrelmin(smoothed_power)[0]
# Sort peaks by height
height_sort = np.argsort(smoothed_power[relative_maxes])[::-1]
powers_sorted = smoothed_power[relative_maxes][height_sort]
if len(powers_sorted) == 0:
# No peaks found
peak_freqs_1[ii] = (-2., -2., -2.)
peak_heights_1[ii] = (-2., -2., -2.)
peak_freqs_2[ii] = (-2., -2., -2.)
peak_heights_2[ii] = (-2., -2., 2.)
continue
# Only consider peaks higher than height_ratio * highest peak
height_cutoff = powers_sorted > height_ratio * powers_sorted[0]
n_significant_peaks = np.sum(height_cutoff)
if n_significant_peaks > 1:
# Take the two highest peaks
peak_index_1 = relative_maxes[height_sort][height_cutoff][0]
peak_index_2 = relative_maxes[height_sort][height_cutoff][1]
peak_indicies = (peak_index_1, peak_index_2)
store_arrs = [[peak_freqs_1, peak_heights_1],
[peak_freqs_2, peak_heights_2]]
else:
# There is only one significant subpeak
peak_indicies = relative_maxes[height_sort][height_cutoff]
store_arrs = [[peak_freqs_1, peak_heights_1]]
# Find peaks in unsmoothed, oversampled periodogram
orig_peaks = signal.argrelmax(search_powers)[0]
for index, store in zip(peak_indicies, store_arrs):
# Get indices of adjacent minima
lt_max = relative_mins < index
if np.sum(lt_max) == 0:
# There is no minimum to the left
index_left = index - 1
else:
index_left = relative_mins[np.nonzero(lt_max)[0][-1]]
if np.sum(~lt_max) == 0:
# There is no minimum to the right
index_right = index + 1
else:
index_right = relative_mins[np.nonzero(~lt_max)[0][0]]
# Peak indices within adjacent minima
in_range = (orig_peaks > index_left) & (orig_peaks < index_right)
if np.sum(in_range) == 0:
# No adjacent minima, search all frequencies
xx = search_freqs[orig_peaks]
yy = search_powers[orig_peaks]
else:
xx = search_freqs[orig_peaks][in_range]
yy = search_powers[orig_peaks][in_range]
freq_range, height_range = find_subpeak_range(xx, yy, height_ratio)
if freq_range is None:
# No subpeaks found
continue
# Store results to arrays
peak_freqs, peak_heights = store
if len(freq_range) == 2:
peak_freqs[ii, :2] = freq_range
peak_heights[ii, :2] = height_range
elif len(freq_range) == 3:
peak_freqs[ii] = freq_range
peak_heights[ii] = height_range
sys.stdout.write('\r{:.1f}% complete'.format((ii + 1) * 100 /
total_systems))
sys.stdout.flush()
if plot_example:
fig, ax1 = plt.subplots()
ax1.plot(search_freqs, search_powers, lw=0.75, color='k')
ax1.plot(search_freqs, smoothed_power, lw=1.5, ls=':',
color='purple')
for jj in [1, 2]:
xx = peak_freqs_1[ii, jj]
yy = peak_heights_1[ii, jj]
ax1.scatter(xx, yy, color='k', s=30, zorder=4, marker='x')
xx = peak_freqs_2[ii, jj]
yy = peak_heights_2[ii, jj]
ax1.scatter(xx, yy, color='k', s=30, zorder=4, marker='x')
ax1.set_xlabel('Frequency (day$^{-1}$)')
ax1.set_ylabel('Power')
ax1.set_title('KIC {}'.format(kics[ii]), fontsize=20, y=1.24)
ax1.minorticks_on()
# Set x limits
x_vals = np.concatenate((peak_freqs_1[ii], peak_freqs_2[ii]))
x_vals = x_vals[x_vals > 0]
if len(x_vals) > 1:
xmin = x_vals.min()
xmax = x_vals.max()
xdiff = xmax - xmin
ax1.set_xlim(xmin - 0.5 * xdiff, xmax + 0.5 * xdiff)
else:
ax1.set_xlim(search_freqs.min(), search_freqs.max())
ax2 = ax1.twiny()
ax2_ticks = ax1.get_xticks()
def tick_function(x):
per = 1 / x
return ['{:.1f}'.format(p) for p in per]
ax2.set_xticks(ax2_ticks)
ax2.set_xbound(ax1.get_xbound())
ax2.set_xticklabels(tick_function(ax2_ticks))
ax2.tick_params(axis='x', which='major', pad=5)
ax2.set_xlabel('Period (days)')
plt.savefig('{}/multipeak_example.pdf'.format(config.data_dir))
plt.close()
if plot_all:
plt.plot(search_freqs, search_powers, lw=0.5)
plt.plot(search_freqs, smoothed_power, lw=0.5, ls=':')
plt.axvline(1 / p_rot[ii], lw=0.5, linestyle='--', color='k')
colors = ['r', 'b']
for jj in [0, 1, 2]:
xx = peak_freqs_1[ii, jj]
yy = peak_heights_1[ii, jj]
plt.scatter(xx, yy, color=colors[0], s=5, zorder=4)
xx = peak_freqs_2[ii, jj]
yy = peak_heights_2[ii, jj]
plt.scatter(xx, yy, color=colors[1], s=5, zorder=4)
plt.xlabel('Frequency (day$^{-1}$)')
plt.ylabel('Power')
plt.title('KIC {}'.format(kics[ii]))
plt.xlim(search_freqs.min(), search_freqs.max())
plt.ylim(0, 1.1 * search_powers.max())
plt.savefig('{}/png/pgram_KIC{:09d}.png'.format(config.data_dir,
kics[ii]))
plt.close()
print()
if save_results:
# Save results to CSV
df.loc[sp_mask, 'freq_1_1'] = peak_freqs_1[:, 1]
df.loc[sp_mask, 'freq_1_2'] = peak_freqs_1[:, 2]
df.loc[sp_mask, 'freq_2_1'] = peak_freqs_2[:, 1]
df.loc[sp_mask, 'freq_2_2'] = peak_freqs_2[:, 2]
df.loc[sp_mask, 'height_1_1'] = peak_heights_1[:, 1]
df.loc[sp_mask, 'height_1_2'] = peak_heights_1[:, 2]
df.loc[sp_mask, 'height_2_1'] = peak_heights_2[:, 1]
df.loc[sp_mask, 'height_2_2'] = peak_heights_2[:, 2]
df.to_csv('{}/{}'.format(config.repo_data_dir, results_file),
index=False)
def generate_frequency_periodograms(sp_class='sp', window=1.5,
output_file=None, kic_list=None,
sap_list=None, detrend_list=None):
"""
Generate periodograms on a uniform frequency grid.
Parameters
----------
sp_class : {'sp', 'spx'}
Likely or possible starspots
window : float, optional
Window around the eclipse to interpolate over
output_file : str, optional
Specify an alternate output filename. Default uses today's date.
kic_list : list, optional
Only run on these KIC IDs
detrend_list : list, optional
These KIC IDs will be detrended with a low order polynomial.
sap_list : list, optional
These KIC IDs will use the SAP flux instead of PDC.
"""
df = utils.collect_results()
if output_file is None:
today = '{:%Y%m%d}'.format(datetime.date.today())
output_file = '{}/grid_pgrams.{}.h5'.format(config.data_dir, today)
else:
output_file = '{}/{}'.format(config.data_dir, output_file)
h5 = h5py.File(output_file, 'w')
h5.attrs['window'] = window
kics = df['KIC'].values[df['class'].values == sp_class]
if kic_list is not None:
kics = kics[np.in1d(kics, kic_list)]
total_systems = len(kics)
print('Computing grid periodograms for {} systems...'.format(total_systems))
for ii, kic in enumerate(kics):
if sap_list is not None and kic in sap_list:
eb = eclipsing_binary.EclipsingBinary.from_kic(kic, use_pdc=False)
else:
eb = eclipsing_binary.EclipsingBinary.from_kic(kic)
if detrend_list is not None and kic in detrend_list:
eb.normalize(detrend=True)
else:
eb.normalize()
eb.interpolate_over_eclipse(window=window)
freqs, powers = eb.frequency_periodogram()
group = h5.create_group(str(kic))
group.create_dataset('freqs', data=freqs)
group.create_dataset('powers', data=powers)
del eb
sys.stdout.write('\r{:.1f}% complete'.format((ii + 1) * 100 / total_systems))
sys.stdout.flush()
print()
|
jlurie/decatur
|
decatur/mulitperiodic.py
|
Python
|
mit
| 14,463
|
[
"Gaussian"
] |
5a168c02462291bb2b631f956d7e18c0b039d7632f77ac265d6dd8b77cb84aaf
|
from __future__ import absolute_import, division, print_function
import unicodedata
import numpy as np
from .. import Variable, coding
from ..core.pycompat import OrderedDict, basestring, unicode_type
# Special characters that are permitted in netCDF names except in the
# 0th position of the string
_specialchars = '_.@+- !"#$%&\()*,:;<=>?[]^`{|}~'
# The following are reserved names in CDL and may not be used as names of
# variables, dimension, attributes
_reserved_names = set(['byte', 'char', 'short', 'ushort', 'int', 'uint',
'int64', 'uint64', 'float' 'real', 'double', 'bool',
'string'])
# These data-types aren't supported by netCDF3, so they are automatically
# coerced instead as indicated by the "coerce_nc3_dtype" function
_nc3_dtype_coercions = {'int64': 'int32', 'bool': 'int8'}
# encode all strings as UTF-8
STRING_ENCODING = 'utf-8'
def coerce_nc3_dtype(arr):
"""Coerce an array to a data type that can be stored in a netCDF-3 file
This function performs the following dtype conversions:
int64 -> int32
bool -> int8
Data is checked for equality, or equivalence (non-NaN values) with
`np.allclose` with the default keyword arguments.
"""
dtype = str(arr.dtype)
if dtype in _nc3_dtype_coercions:
new_dtype = _nc3_dtype_coercions[dtype]
# TODO: raise a warning whenever casting the data-type instead?
cast_arr = arr.astype(new_dtype)
if not (cast_arr == arr).all():
raise ValueError('could not safely cast array from dtype %s to %s'
% (dtype, new_dtype))
arr = cast_arr
return arr
def encode_nc3_attr_value(value):
if isinstance(value, bytes):
pass
elif isinstance(value, unicode_type):
value = value.encode(STRING_ENCODING)
else:
value = coerce_nc3_dtype(np.atleast_1d(value))
if value.ndim > 1:
raise ValueError("netCDF attributes must be 1-dimensional")
return value
def encode_nc3_attrs(attrs):
return OrderedDict([(k, encode_nc3_attr_value(v))
for k, v in attrs.items()])
def encode_nc3_variable(var):
for coder in [coding.strings.EncodedStringCoder(allows_unicode=False),
coding.strings.CharacterArrayCoder()]:
var = coder.encode(var)
data = coerce_nc3_dtype(var.data)
attrs = encode_nc3_attrs(var.attrs)
return Variable(var.dims, data, attrs, var.encoding)
def _isalnumMUTF8(c):
"""Return True if the given UTF-8 encoded character is alphanumeric
or multibyte.
Input is not checked!
"""
return c.isalnum() or (len(c.encode('utf-8')) > 1)
def is_valid_nc3_name(s):
"""Test whether an object can be validly converted to a netCDF-3
dimension, variable or attribute name
Earlier versions of the netCDF C-library reference implementation
enforced a more restricted set of characters in creating new names,
but permitted reading names containing arbitrary bytes. This
specification extends the permitted characters in names to include
multi-byte UTF-8 encoded Unicode and additional printing characters
from the US-ASCII alphabet. The first character of a name must be
alphanumeric, a multi-byte UTF-8 character, or '_' (reserved for
special names with meaning to implementations, such as the
"_FillValue" attribute). Subsequent characters may also include
printing special characters, except for '/' which is not allowed in
names. Names that have trailing space characters are also not
permitted.
"""
if not isinstance(s, basestring):
return False
if not isinstance(s, unicode_type):
s = s.decode('utf-8')
num_bytes = len(s.encode('utf-8'))
return ((unicodedata.normalize('NFC', s) == s) and
(s not in _reserved_names) and
(num_bytes >= 0) and
('/' not in s) and
(s[-1] != ' ') and
(_isalnumMUTF8(s[0]) or (s[0] == '_')) and
all((_isalnumMUTF8(c) or c in _specialchars for c in s)))
|
jcmgray/xarray
|
xarray/backends/netcdf3.py
|
Python
|
apache-2.0
| 4,111
|
[
"NetCDF"
] |
ac9172674d5b96ca996290e956d677e9db76527e3d2e4ec896e2ab48b94ebfd2
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import cookielib
import glob
import inspect
import logging
import os
import random
import re
import socket
import string
import sys
import threading
import time
import urllib2
import urlparse
import lib.controller.checks
import lib.core.common
import lib.core.threads
import lib.core.convert
import lib.request.connect
from lib.controller.checks import checkConnection
from lib.core.common import Backend
from lib.core.common import boldifyMessage
from lib.core.common import checkFile
from lib.core.common import dataToStdout
from lib.core.common import getPublicTypeMembers
from lib.core.common import extractRegexResult
from lib.core.common import filterStringValue
from lib.core.common import findPageForms
from lib.core.common import getConsoleWidth
from lib.core.common import getFileItems
from lib.core.common import getFileType
from lib.core.common import getUnicode
from lib.core.common import isListLike
from lib.core.common import normalizePath
from lib.core.common import ntToPosixSlashes
from lib.core.common import openFile
from lib.core.common import parseTargetDirect
from lib.core.common import parseTargetUrl
from lib.core.common import paths
from lib.core.common import randomStr
from lib.core.common import readCachedFileContent
from lib.core.common import readInput
from lib.core.common import resetCookieJar
from lib.core.common import runningAsAdmin
from lib.core.common import sanitizeStr
from lib.core.common import setOptimize
from lib.core.common import setPaths
from lib.core.common import singleTimeWarnMessage
from lib.core.common import UnicodeRawConfigParser
from lib.core.common import urldecode
from lib.core.convert import base64unpickle
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import mergedOptions
from lib.core.data import queries
from lib.core.datatype import AttribDict
from lib.core.datatype import InjectionDict
from lib.core.defaults import defaults
from lib.core.dicts import DBMS_DICT
from lib.core.dicts import DUMP_REPLACEMENTS
from lib.core.enums import ADJUST_TIME_DELAY
from lib.core.enums import AUTH_TYPE
from lib.core.enums import CUSTOM_LOGGING
from lib.core.enums import DUMP_FORMAT
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import MOBILES
from lib.core.enums import OPTION_TYPE
from lib.core.enums import PAYLOAD
from lib.core.enums import PRIORITY
from lib.core.enums import PROXY_TYPE
from lib.core.enums import REFLECTIVE_COUNTER
from lib.core.enums import WIZARD
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapInstallationException
from lib.core.exception import SqlmapMissingDependence
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapMissingPrivileges
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapSyntaxException
from lib.core.exception import SqlmapSystemException
from lib.core.exception import SqlmapUnsupportedDBMSException
from lib.core.exception import SqlmapUserQuitException
from lib.core.log import FORMATTER
from lib.core.optiondict import optDict
from lib.core.settings import BURP_REQUEST_REGEX
from lib.core.settings import BURP_XML_HISTORY_REGEX
from lib.core.settings import CODECS_LIST_PAGE
from lib.core.settings import CRAWL_EXCLUDE_EXTENSIONS
from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR
from lib.core.settings import DBMS_ALIASES
from lib.core.settings import DEFAULT_PAGE_ENCODING
from lib.core.settings import DEFAULT_TOR_HTTP_PORTS
from lib.core.settings import DEFAULT_TOR_SOCKS_PORT
from lib.core.settings import DUMMY_URL
from lib.core.settings import INJECT_HERE_MARK
from lib.core.settings import IS_WIN
from lib.core.settings import KB_CHARS_BOUNDARY_CHAR
from lib.core.settings import KB_CHARS_LOW_FREQUENCY_ALPHABET
from lib.core.settings import LOCALHOST
from lib.core.settings import MAX_CONNECT_RETRIES
from lib.core.settings import MAX_NUMBER_OF_THREADS
from lib.core.settings import NULL
from lib.core.settings import PARAMETER_SPLITTING_REGEX
from lib.core.settings import PROBLEMATIC_CUSTOM_INJECTION_PATTERNS
from lib.core.settings import SITE
from lib.core.settings import SQLMAP_ENVIRONMENT_PREFIX
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import SUPPORTED_OS
from lib.core.settings import TIME_DELAY_CANDIDATES
from lib.core.settings import UNION_CHAR_REGEX
from lib.core.settings import UNKNOWN_DBMS_VERSION
from lib.core.settings import URI_INJECTABLE_REGEX
from lib.core.settings import VERSION_STRING
from lib.core.settings import WEBSCARAB_SPLITTER
from lib.core.threads import getCurrentThreadData
from lib.core.update import update
from lib.parse.configfile import configFileParser
from lib.parse.payloads import loadPayloads
from lib.parse.sitemap import parseSitemap
from lib.request.basic import checkCharEncoding
from lib.request.connect import Connect as Request
from lib.request.dns import DNSServer
from lib.request.basicauthhandler import SmartHTTPBasicAuthHandler
from lib.request.httpshandler import HTTPSHandler
from lib.request.pkihandler import HTTPSPKIAuthHandler
from lib.request.rangehandler import HTTPRangeHandler
from lib.request.redirecthandler import SmartRedirectHandler
from lib.request.templates import getPageTemplate
from lib.utils.crawler import crawl
from lib.utils.deps import checkDependencies
from lib.utils.google import Google
from lib.utils.purge import purge
from thirdparty.colorama.initialise import init as coloramainit
from thirdparty.keepalive import keepalive
from thirdparty.oset.pyoset import oset
from thirdparty.socks import socks
from xml.etree.ElementTree import ElementTree
authHandler = urllib2.BaseHandler()
httpsHandler = HTTPSHandler()
keepAliveHandler = keepalive.HTTPHandler()
proxyHandler = urllib2.ProxyHandler()
redirectHandler = SmartRedirectHandler()
rangeHandler = HTTPRangeHandler()
def _urllib2Opener():
"""
This function creates the urllib2 OpenerDirector.
"""
debugMsg = "creating HTTP requests opener object"
logger.debug(debugMsg)
handlers = [proxyHandler, authHandler, redirectHandler, rangeHandler, httpsHandler]
if not conf.dropSetCookie:
if not conf.loadCookies:
conf.cj = cookielib.CookieJar()
else:
conf.cj = cookielib.MozillaCookieJar()
resetCookieJar(conf.cj)
handlers.append(urllib2.HTTPCookieProcessor(conf.cj))
# Reference: http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html
if conf.keepAlive:
warnMsg = "persistent HTTP(s) connections, Keep-Alive, has "
warnMsg += "been disabled because of its incompatibility "
if conf.proxy:
warnMsg += "with HTTP(s) proxy"
logger.warn(warnMsg)
elif conf.authType:
warnMsg += "with authentication methods"
logger.warn(warnMsg)
else:
handlers.append(keepAliveHandler)
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
def _feedTargetsDict(reqFile, addedTargetUrls):
"""
Parses web scarab and burp logs and adds results to the target URL list
"""
def _parseWebScarabLog(content):
"""
Parses web scarab logs (POST method not supported)
"""
reqResList = content.split(WEBSCARAB_SPLITTER)
for request in reqResList:
url = extractRegexResult(r"URL: (?P<result>.+?)\n", request, re.I)
method = extractRegexResult(r"METHOD: (?P<result>.+?)\n", request, re.I)
cookie = extractRegexResult(r"COOKIE: (?P<result>.+?)\n", request, re.I)
if not method or not url:
logger.debug("not a valid WebScarab log data")
continue
if method.upper() == HTTPMETHOD.POST:
warnMsg = "POST requests from WebScarab logs aren't supported "
warnMsg += "as their body content is stored in separate files. "
warnMsg += "Nevertheless you can use -r to load them individually."
logger.warning(warnMsg)
continue
if not(conf.scope and not re.search(conf.scope, url, re.I)):
if not kb.targets or url not in addedTargetUrls:
kb.targets.add((url, method, None, cookie, None))
addedTargetUrls.add(url)
def _parseBurpLog(content):
"""
Parses burp logs
"""
if not re.search(BURP_REQUEST_REGEX, content, re.I | re.S):
if re.search(BURP_XML_HISTORY_REGEX, content, re.I | re.S):
reqResList = []
for match in re.finditer(BURP_XML_HISTORY_REGEX, content, re.I | re.S):
port, request = match.groups()
request = request.decode("base64")
_ = re.search(r"%s:.+" % re.escape(HTTP_HEADER.HOST), request)
if _:
host = _.group(0).strip()
if not re.search(r":\d+\Z", host):
request = request.replace(host, "%s:%d" % (host, int(port)))
reqResList.append(request)
else:
reqResList = [content]
else:
reqResList = re.finditer(BURP_REQUEST_REGEX, content, re.I | re.S)
for match in reqResList:
request = match if isinstance(match, basestring) else match.group(0)
request = re.sub(r"\A[^\w]+", "", request)
schemePort = re.search(r"(http[\w]*)\:\/\/.*?\:([\d]+).+?={10,}", request, re.I | re.S)
if schemePort:
scheme = schemePort.group(1)
port = schemePort.group(2)
else:
scheme, port = None, None
if not re.search(r"^[\n]*(%s).*?\sHTTP\/" % "|".join(getPublicTypeMembers(HTTPMETHOD, True)), request, re.I | re.M):
continue
if re.search(r"^[\n]*%s.*?\.(%s)\sHTTP\/" % (HTTPMETHOD.GET, "|".join(CRAWL_EXCLUDE_EXTENSIONS)), request, re.I | re.M):
continue
getPostReq = False
url = None
host = None
method = None
data = None
cookie = None
params = False
newline = None
lines = request.split('\n')
headers = []
for index in xrange(len(lines)):
line = lines[index]
if not line.strip() and index == len(lines) - 1:
break
newline = "\r\n" if line.endswith('\r') else '\n'
line = line.strip('\r')
match = re.search(r"\A(%s) (.+) HTTP/[\d.]+\Z" % "|".join(getPublicTypeMembers(HTTPMETHOD, True)), line) if not method else None
if len(line) == 0 and method and method != HTTPMETHOD.GET and data is None:
data = ""
params = True
elif match:
method = match.group(1)
url = match.group(2)
if any(_ in line for _ in ('?', '=', CUSTOM_INJECTION_MARK_CHAR)):
params = True
getPostReq = True
# POST parameters
elif data is not None and params:
data += "%s%s" % (line, newline)
# GET parameters
elif "?" in line and "=" in line and ": " not in line:
params = True
# Headers
elif re.search(r"\A\S+: ", line):
key, value = line.split(": ", 1)
# Cookie and Host headers
if key.upper() == HTTP_HEADER.COOKIE.upper():
cookie = value
elif key.upper() == HTTP_HEADER.HOST.upper():
if '://' in value:
scheme, value = value.split('://')[:2]
splitValue = value.split(":")
host = splitValue[0]
if len(splitValue) > 1:
port = filterStringValue(splitValue[1], "[0-9]")
# Avoid to add a static content length header to
# headers and consider the following lines as
# POSTed data
if key.upper() == HTTP_HEADER.CONTENT_LENGTH.upper():
params = True
# Avoid proxy and connection type related headers
elif key not in (HTTP_HEADER.PROXY_CONNECTION, HTTP_HEADER.CONNECTION):
headers.append((getUnicode(key), getUnicode(value)))
if CUSTOM_INJECTION_MARK_CHAR in re.sub(PROBLEMATIC_CUSTOM_INJECTION_PATTERNS, "", value or ""):
params = True
data = data.rstrip("\r\n") if data else data
if getPostReq and (params or cookie):
if not port and isinstance(scheme, basestring) and scheme.lower() == "https":
port = "443"
elif not scheme and port == "443":
scheme = "https"
if conf.forceSSL:
scheme = "https"
port = port or "443"
if not host:
errMsg = "invalid format of a request file"
raise SqlmapSyntaxException, errMsg
if not url.startswith("http"):
url = "%s://%s:%s%s" % (scheme or "http", host, port or "80", url)
scheme = None
port = None
if not(conf.scope and not re.search(conf.scope, url, re.I)):
if not kb.targets or url not in addedTargetUrls:
kb.targets.add((url, method, data, cookie, tuple(headers)))
addedTargetUrls.add(url)
checkFile(reqFile)
try:
with openFile(reqFile, "rb") as f:
content = f.read()
except (IOError, OSError, MemoryError), ex:
errMsg = "something went wrong while trying "
errMsg += "to read the content of file '%s' ('%s')" % (reqFile, ex)
raise SqlmapSystemException(errMsg)
if conf.scope:
logger.info("using regular expression '%s' for filtering targets" % conf.scope)
_parseBurpLog(content)
_parseWebScarabLog(content)
if not addedTargetUrls:
errMsg = "unable to find usable request(s) "
errMsg += "in provided file ('%s')" % reqFile
raise SqlmapGenericException(errMsg)
def _loadQueries():
"""
Loads queries from 'xml/queries.xml' file.
"""
def iterate(node, retVal=None):
class DictObject(object):
def __init__(self):
self.__dict__ = {}
def __contains__(self, name):
return name in self.__dict__
if retVal is None:
retVal = DictObject()
for child in node.findall("*"):
instance = DictObject()
retVal.__dict__[child.tag] = instance
if child.attrib:
instance.__dict__.update(child.attrib)
else:
iterate(child, instance)
return retVal
tree = ElementTree()
try:
tree.parse(paths.QUERIES_XML)
except Exception, ex:
errMsg = "something seems to be wrong with "
errMsg += "the file '%s' ('%s'). Please make " % (paths.QUERIES_XML, ex)
errMsg += "sure that you haven't made any changes to it"
raise SqlmapInstallationException, errMsg
for node in tree.findall("*"):
queries[node.attrib['value']] = iterate(node)
def _setMultipleTargets():
"""
Define a configuration parameter if we are running in multiple target
mode.
"""
initialTargetsCount = len(kb.targets)
addedTargetUrls = set()
if not conf.logFile:
return
debugMsg = "parsing targets list from '%s'" % conf.logFile
logger.debug(debugMsg)
if not os.path.exists(conf.logFile):
errMsg = "the specified list of targets does not exist"
raise SqlmapFilePathException(errMsg)
if os.path.isfile(conf.logFile):
_feedTargetsDict(conf.logFile, addedTargetUrls)
elif os.path.isdir(conf.logFile):
files = os.listdir(conf.logFile)
files.sort()
for reqFile in files:
if not re.search("([\d]+)\-request", reqFile):
continue
_feedTargetsDict(os.path.join(conf.logFile, reqFile), addedTargetUrls)
else:
errMsg = "the specified list of targets is not a file "
errMsg += "nor a directory"
raise SqlmapFilePathException(errMsg)
updatedTargetsCount = len(kb.targets)
if updatedTargetsCount > initialTargetsCount:
infoMsg = "sqlmap parsed %d " % (updatedTargetsCount - initialTargetsCount)
infoMsg += "(parameter unique) requests from the "
infoMsg += "targets list ready to be tested"
logger.info(infoMsg)
def _adjustLoggingFormatter():
"""
Solves problem of line deletition caused by overlapping logging messages
and retrieved data info in inference mode
"""
if hasattr(FORMATTER, '_format'):
return
def format(record):
_ = boldifyMessage(FORMATTER._format(record))
if kb.prependFlag:
_ = "\n%s" % _
kb.prependFlag = False
return _
FORMATTER._format = FORMATTER.format
FORMATTER.format = format
def _setRequestFromFile():
"""
This function checks if the way to make a HTTP request is through supplied
textual file, parses it and saves the information into the knowledge base.
"""
if not conf.requestFile:
return
addedTargetUrls = set()
conf.requestFile = os.path.expanduser(conf.requestFile)
infoMsg = "parsing HTTP request from '%s'" % conf.requestFile
logger.info(infoMsg)
if not os.path.isfile(conf.requestFile):
errMsg = "the specified HTTP request file "
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
_feedTargetsDict(conf.requestFile, addedTargetUrls)
def _setCrawler():
if not conf.crawlDepth:
return
if not any((conf.bulkFile, conf.sitemapUrl)):
crawl(conf.url)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
else:
targets = parseSitemap(conf.sitemapUrl)
for i in xrange(len(targets)):
try:
target = targets[i]
crawl(target)
if conf.verbose in (1, 2):
status = "%d/%d links visited (%d%%)" % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except Exception, ex:
errMsg = "problem occurred while crawling at '%s' ('%s')" % (target, ex)
logger.error(errMsg)
def _setGoogleDorking():
"""
This function checks if the way to request testable hosts is through
Google dorking then requests to Google the search parameter, parses
the results and save the testable hosts into the knowledge base.
"""
if not conf.googleDork:
return
global keepAliveHandler
global proxyHandler
debugMsg = "initializing Google dorking requests"
logger.debug(debugMsg)
infoMsg = "first request to Google to get the session cookie"
logger.info(infoMsg)
handlers = [proxyHandler]
# Reference: http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html
if conf.keepAlive:
if conf.proxy:
warnMsg = "persistent HTTP(s) connections, Keep-Alive, has "
warnMsg += "been disabled because of its incompatibility "
warnMsg += "with HTTP(s) proxy"
logger.warn(warnMsg)
else:
handlers.append(keepAliveHandler)
googleObj = Google(handlers)
kb.data.onlyGETs = None
def retrieve():
links = googleObj.search(conf.googleDork)
if not links:
errMsg = "unable to find results for your "
errMsg += "Google dork expression"
raise SqlmapGenericException(errMsg)
for link in links:
link = urldecode(link)
if re.search(r"(.*?)\?(.+)", link):
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
elif re.search(URI_INJECTABLE_REGEX, link, re.I):
if kb.data.onlyGETs is None and conf.data is None and not conf.googleDork:
message = "do you want to scan only results containing GET parameters? [Y/n] "
test = readInput(message, default="Y")
kb.data.onlyGETs = test.lower() != 'n'
if not kb.data.onlyGETs or conf.googleDork:
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
return links
while True:
links = retrieve()
if kb.targets:
infoMsg = "sqlmap got %d results for your " % len(links)
infoMsg += "Google dork expression, "
if len(links) == len(kb.targets):
infoMsg += "all "
else:
infoMsg += "%d " % len(kb.targets)
infoMsg += "of them are testable targets"
logger.info(infoMsg)
break
else:
message = "sqlmap got %d results " % len(links)
message += "for your Google dork expression, but none of them "
message += "have GET parameters to test for SQL injection. "
message += "Do you want to skip to the next result page? [Y/n]"
test = readInput(message, default="Y")
if test[0] in ("n", "N"):
raise SqlmapSilentQuitException
else:
conf.googlePage += 1
def _setBulkMultipleTargets():
if not conf.bulkFile:
return
conf.bulkFile = os.path.expanduser(conf.bulkFile)
infoMsg = "parsing multiple targets list from '%s'" % conf.bulkFile
logger.info(infoMsg)
if not os.path.isfile(conf.bulkFile):
errMsg = "the specified bulk file "
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
found = False
for line in getFileItems(conf.bulkFile):
if re.match(r"[^ ]+\?(.+)", line, re.I) or CUSTOM_INJECTION_MARK_CHAR in line:
found = True
kb.targets.add((line.strip(), None, None, None, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _setSitemapTargets():
if not conf.sitemapUrl:
return
infoMsg = "parsing sitemap '%s'" % conf.sitemapUrl
logger.info(infoMsg)
found = False
for item in parseSitemap(conf.sitemapUrl):
if re.match(r"[^ ]+\?(.+)", item, re.I):
found = True
kb.targets.add((item.strip(), None, None, None, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _findPageForms():
if not conf.forms or conf.crawlDepth:
return
if conf.url and not checkConnection():
return
infoMsg = "searching for forms"
logger.info(infoMsg)
if not any((conf.bulkFile, conf.googleDork, conf.sitemapUrl)):
page, _ = Request.queryPage(content=True)
findPageForms(page, conf.url, True, True)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
elif conf.sitemapUrl:
targets = parseSitemap(conf.sitemapUrl)
elif conf.googleDork:
targets = [_[0] for _ in kb.targets]
kb.targets.clear()
for i in xrange(len(targets)):
try:
target = targets[i]
page, _, _ = Request.getPage(url=target.strip(), crawling=True, raise404=False)
findPageForms(page, target, False, True)
if conf.verbose in (1, 2):
status = '%d/%d links visited (%d%%)' % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except KeyboardInterrupt:
break
except Exception, ex:
errMsg = "problem occurred while searching for forms at '%s' ('%s')" % (target, ex)
logger.error(errMsg)
def _setDBMSAuthentication():
"""
Check and set the DBMS authentication credentials to run statements as
another user, not the session user
"""
if not conf.dbmsCred:
return
debugMsg = "setting the DBMS authentication credentials"
logger.debug(debugMsg)
match = re.search("^(.+?):(.*?)$", conf.dbmsCred)
if not match:
errMsg = "DBMS authentication credentials value must be in format "
errMsg += "username:password"
raise SqlmapSyntaxException(errMsg)
conf.dbmsUsername = match.group(1)
conf.dbmsPassword = match.group(2)
def _setMetasploit():
if not conf.osPwn and not conf.osSmb and not conf.osBof:
return
debugMsg = "setting the takeover out-of-band functionality"
logger.debug(debugMsg)
msfEnvPathExists = False
if IS_WIN:
try:
import win32file
except ImportError:
errMsg = "sqlmap requires third-party module 'pywin32' "
errMsg += "in order to use Metasploit functionalities on "
errMsg += "Windows. You can download it from "
errMsg += "'http://sourceforge.net/projects/pywin32/files/pywin32/'"
raise SqlmapMissingDependence(errMsg)
if not conf.msfPath:
def _(key, value):
retVal = None
try:
from _winreg import ConnectRegistry, OpenKey, QueryValueEx, HKEY_LOCAL_MACHINE
_ = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
_ = OpenKey(_, key)
retVal = QueryValueEx(_, value)[0]
except:
logger.debug("unable to identify Metasploit installation path via registry key")
return retVal
conf.msfPath = _(r"SOFTWARE\Rapid7\Metasploit", "Location")
if conf.msfPath:
conf.msfPath = os.path.join(conf.msfPath, "msf3")
if conf.osSmb:
isAdmin = runningAsAdmin()
if not isAdmin:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a SMB relay attack because "
errMsg += "it will need to listen on a user-specified SMB "
errMsg += "TCP port for incoming connection attempts"
raise SqlmapMissingPrivileges(errMsg)
if conf.msfPath:
for path in (conf.msfPath, os.path.join(conf.msfPath, "bin")):
if all(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("", "msfcli", "msfconsole", "msfencode", "msfpayload")):
msfEnvPathExists = True
conf.msfPath = path
break
if msfEnvPathExists:
debugMsg = "provided Metasploit Framework path "
debugMsg += "'%s' is valid" % conf.msfPath
logger.debug(debugMsg)
else:
warnMsg = "the provided Metasploit Framework path "
warnMsg += "'%s' is not valid. The cause could " % conf.msfPath
warnMsg += "be that the path does not exists or that one "
warnMsg += "or more of the needed Metasploit executables "
warnMsg += "within msfcli, msfconsole, msfencode and "
warnMsg += "msfpayload do not exist"
logger.warn(warnMsg)
else:
warnMsg = "you did not provide the local path where Metasploit "
warnMsg += "Framework is installed"
logger.warn(warnMsg)
if not msfEnvPathExists:
warnMsg = "sqlmap is going to look for Metasploit Framework "
warnMsg += "installation inside the environment path(s)"
logger.warn(warnMsg)
envPaths = os.environ.get("PATH", "").split(";" if IS_WIN else ":")
for envPath in envPaths:
envPath = envPath.replace(";", "")
if all(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("", "msfcli", "msfconsole", "msfencode", "msfpayload")):
infoMsg = "Metasploit Framework has been found "
infoMsg += "installed in the '%s' path" % envPath
logger.info(infoMsg)
msfEnvPathExists = True
conf.msfPath = envPath
break
if not msfEnvPathExists:
errMsg = "unable to locate Metasploit Framework installation. "
errMsg += "You can get it at 'http://www.metasploit.com/download/'"
raise SqlmapFilePathException(errMsg)
def _setWriteFile():
if not conf.wFile:
return
debugMsg = "setting the write file functionality"
logger.debug(debugMsg)
if not os.path.exists(conf.wFile):
errMsg = "the provided local file '%s' does not exist" % conf.wFile
raise SqlmapFilePathException(errMsg)
if not conf.dFile:
errMsg = "you did not provide the back-end DBMS absolute path "
errMsg += "where you want to write the local file '%s'" % conf.wFile
raise SqlmapMissingMandatoryOptionException(errMsg)
conf.wFileType = getFileType(conf.wFile)
def _setOS():
"""
Force the back-end DBMS operating system option.
"""
if not conf.os:
return
if conf.os.lower() not in SUPPORTED_OS:
errMsg = "you provided an unsupported back-end DBMS operating "
errMsg += "system. The supported DBMS operating systems for OS "
errMsg += "and file system access are %s. " % ', '.join([o.capitalize() for o in SUPPORTED_OS])
errMsg += "If you do not know the back-end DBMS underlying OS, "
errMsg += "do not provide it and sqlmap will fingerprint it for "
errMsg += "you."
raise SqlmapUnsupportedDBMSException(errMsg)
debugMsg = "forcing back-end DBMS operating system to user defined "
debugMsg += "value '%s'" % conf.os
logger.debug(debugMsg)
Backend.setOs(conf.os)
def _setTechnique():
validTechniques = sorted(getPublicTypeMembers(PAYLOAD.TECHNIQUE), key=lambda x: x[1])
validLetters = [_[0][0].upper() for _ in validTechniques]
if conf.tech and isinstance(conf.tech, basestring):
_ = []
for letter in conf.tech.upper():
if letter not in validLetters:
errMsg = "value for --technique must be a string composed "
errMsg += "by the letters %s. Refer to the " % ", ".join(validLetters)
errMsg += "user's manual for details"
raise SqlmapSyntaxException(errMsg)
for validTech, validInt in validTechniques:
if letter == validTech[0]:
_.append(validInt)
break
conf.tech = _
def _setDBMS():
"""
Force the back-end DBMS option.
"""
if not conf.dbms:
return
debugMsg = "forcing back-end DBMS to user defined value"
logger.debug(debugMsg)
conf.dbms = conf.dbms.lower()
regex = re.search("%s ([\d\.]+)" % ("(%s)" % "|".join([alias for alias in SUPPORTED_DBMS])), conf.dbms, re.I)
if regex:
conf.dbms = regex.group(1)
Backend.setVersion(regex.group(2))
if conf.dbms not in SUPPORTED_DBMS:
errMsg = "you provided an unsupported back-end database management "
errMsg += "system. Supported DBMSes are as follows: %s. " % ', '.join(sorted(_ for _ in DBMS_DICT))
errMsg += "If you do not know the back-end DBMS, do not provide "
errMsg += "it and sqlmap will fingerprint it for you."
raise SqlmapUnsupportedDBMSException(errMsg)
for dbms, aliases in DBMS_ALIASES:
if conf.dbms in aliases:
conf.dbms = dbms
break
def _setTamperingFunctions():
"""
Loads tampering functions from given script(s)
"""
if conf.tamper:
last_priority = PRIORITY.HIGHEST
check_priority = True
resolve_priorities = False
priorities = []
for tfile in re.split(PARAMETER_SPLITTING_REGEX, conf.tamper):
found = False
tfile = tfile.strip()
if not tfile:
continue
elif os.path.exists(os.path.join(paths.SQLMAP_TAMPER_PATH, tfile if tfile.endswith('.py') else "%s.py" % tfile)):
tfile = os.path.join(paths.SQLMAP_TAMPER_PATH, tfile if tfile.endswith('.py') else "%s.py" % tfile)
elif not os.path.exists(tfile):
errMsg = "tamper script '%s' does not exist" % tfile
raise SqlmapFilePathException(errMsg)
elif not tfile.endswith('.py'):
errMsg = "tamper script '%s' should have an extension '.py'" % tfile
raise SqlmapSyntaxException(errMsg)
dirname, filename = os.path.split(tfile)
dirname = os.path.abspath(dirname)
infoMsg = "loading tamper script '%s'" % filename[:-3]
logger.info(infoMsg)
if not os.path.exists(os.path.join(dirname, '__init__.py')):
errMsg = "make sure that there is an empty file '__init__.py' "
errMsg += "inside of tamper scripts directory '%s'" % dirname
raise SqlmapGenericException(errMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3])
except (ImportError, SyntaxError), msg:
raise SqlmapSyntaxException("cannot import tamper script '%s' (%s)" % (filename[:-3], msg))
priority = PRIORITY.NORMAL if not hasattr(module, '__priority__') else module.__priority__
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "tamper" and inspect.getargspec(function).args and inspect.getargspec(function).keywords == "kwargs":
found = True
kb.tamperFunctions.append(function)
function.func_name = module.__name__
if check_priority and priority > last_priority:
message = "it seems that you might have mixed "
message += "the order of tamper scripts. "
message += "Do you want to auto resolve this? [Y/n/q] "
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
resolve_priorities = True
elif test[0] in ("n", "N"):
resolve_priorities = False
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
check_priority = False
priorities.append((priority, function))
last_priority = priority
break
elif name == "dependencies":
function()
if not found:
errMsg = "missing function 'tamper(payload, **kwargs)' "
errMsg += "in tamper script '%s'" % tfile
raise SqlmapGenericException(errMsg)
if resolve_priorities and priorities:
priorities.sort(reverse=True)
kb.tamperFunctions = []
for _, function in priorities:
kb.tamperFunctions.append(function)
def _setWafFunctions():
"""
Loads WAF/IDS/IPS detecting functions from script(s)
"""
if conf.identifyWaf:
for found in glob.glob(os.path.join(paths.SQLMAP_WAF_PATH, "*.py")):
dirname, filename = os.path.split(found)
dirname = os.path.abspath(dirname)
if filename == "__init__.py":
continue
debugMsg = "loading WAF script '%s'" % filename[:-3]
logger.debug(debugMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
if filename[:-3] in sys.modules:
del sys.modules[filename[:-3]]
module = __import__(filename[:-3])
except ImportError, msg:
raise SqlmapSyntaxException("cannot import WAF script '%s' (%s)" % (filename[:-3], msg))
_ = dict(inspect.getmembers(module))
if "detect" not in _:
errMsg = "missing function 'detect(get_page)' "
errMsg += "in WAF script '%s'" % found
raise SqlmapGenericException(errMsg)
else:
kb.wafFunctions.append((_["detect"], _.get("__product__", filename[:-3])))
def _setThreads():
if not isinstance(conf.threads, int) or conf.threads <= 0:
conf.threads = 1
def _setDNSCache():
"""
Makes a cached version of socket._getaddrinfo to avoid subsequent DNS requests.
"""
def _getaddrinfo(*args, **kwargs):
if args in kb.cache:
return kb.cache[args]
else:
kb.cache[args] = socket._getaddrinfo(*args, **kwargs)
return kb.cache[args]
if not hasattr(socket, '_getaddrinfo'):
socket._getaddrinfo = socket.getaddrinfo
socket.getaddrinfo = _getaddrinfo
def _setHTTPProxy():
"""
Check and set the HTTP/SOCKS proxy for all HTTP requests.
"""
global proxyHandler
for _ in ("http", "https"):
if hasattr(proxyHandler, "%s_open" % _):
delattr(proxyHandler, "%s_open" % _)
if not conf.proxy:
if conf.proxyList:
conf.proxy = conf.proxyList[0]
conf.proxyList = conf.proxyList[1:] + conf.proxyList[:1]
infoMsg = "loading proxy '%s' from a supplied proxy list file" % conf.proxy
logger.info(infoMsg)
else:
if conf.hostname in ('localhost', '127.0.0.1') or conf.ignoreProxy:
proxyHandler.proxies = {}
return
debugMsg = "setting the HTTP/SOCKS proxy for all HTTP requests"
logger.debug(debugMsg)
_ = urlparse.urlsplit(conf.proxy)
hostnamePort = _.netloc.split(":")
scheme = _.scheme.upper()
hostname = hostnamePort[0]
port = None
username = None
password = None
if len(hostnamePort) == 2:
try:
port = int(hostnamePort[1])
except:
pass # drops into the next check block
if not all((scheme, hasattr(PROXY_TYPE, scheme), hostname, port)):
errMsg = "proxy value must be in format '(%s)://address:port'" % "|".join(_[0].lower() for _ in getPublicTypeMembers(PROXY_TYPE))
raise SqlmapSyntaxException(errMsg)
if conf.proxyCred:
_ = re.search("^(.*?):(.*?)$", conf.proxyCred)
if not _:
errMsg = "Proxy authentication credentials "
errMsg += "value must be in format username:password"
raise SqlmapSyntaxException(errMsg)
else:
username = _.group(1)
password = _.group(2)
if scheme in (PROXY_TYPE.SOCKS4, PROXY_TYPE.SOCKS5):
proxyHandler.proxies = {}
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if scheme == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, hostname, port, username=username, password=password)
socks.wrapmodule(urllib2)
else:
socks.unwrapmodule(urllib2)
if conf.proxyCred:
# Reference: http://stackoverflow.com/questions/34079/how-to-specify-an-authenticated-proxy-for-a-python-http-connection
proxyString = "%s@" % conf.proxyCred
else:
proxyString = ""
proxyString += "%s:%d" % (hostname, port)
proxyHandler.proxies = {"http": proxyString, "https": proxyString}
proxyHandler.__init__(proxyHandler.proxies)
def _setSafeUrl():
"""
Check and set the safe URL options.
"""
if not conf.safUrl:
return
if not re.search("^http[s]*://", conf.safUrl):
if ":443/" in conf.safUrl:
conf.safUrl = "https://" + conf.safUrl
else:
conf.safUrl = "http://" + conf.safUrl
if conf.saFreq <= 0:
errMsg = "please provide a valid value (>0) for safe frequency (--safe-freq) while using safe URL feature"
raise SqlmapSyntaxException(errMsg)
def _setPrefixSuffix():
if conf.prefix is not None and conf.suffix is not None:
# Create a custom boundary object for user's supplied prefix
# and suffix
boundary = AttribDict()
boundary.level = 1
boundary.clause = [0]
boundary.where = [1, 2, 3]
boundary.prefix = conf.prefix
boundary.suffix = conf.suffix
if " like" in boundary.suffix.lower():
if "'" in boundary.suffix.lower():
boundary.ptype = 3
elif '"' in boundary.suffix.lower():
boundary.ptype = 5
elif "'" in boundary.suffix:
boundary.ptype = 2
elif '"' in boundary.suffix:
boundary.ptype = 4
else:
boundary.ptype = 1
# user who provides --prefix/--suffix does not want other boundaries
# to be tested for
conf.boundaries = [boundary]
def _setAuthCred():
"""
Adds authentication credentials (if any) for current target to the password manager
(used by connection handler)
"""
if kb.passwordMgr and all(_ is not None for _ in (conf.scheme, conf.hostname, conf.port, conf.authUsername, conf.authPassword)):
kb.passwordMgr.add_password(None, "%s://%s:%d" % (conf.scheme, conf.hostname, conf.port), conf.authUsername, conf.authPassword)
def _setHTTPAuthentication():
"""
Check and set the HTTP(s) authentication method (Basic, Digest, NTLM or PKI),
username and password for first three methods, or PEM private key file for
PKI authentication
"""
global authHandler
if not conf.authType and not conf.authCred and not conf.authPrivate:
return
elif conf.authType and not conf.authCred and not conf.authPrivate:
errMsg = "you specified the HTTP authentication type, but "
errMsg += "did not provide the credentials"
raise SqlmapSyntaxException(errMsg)
elif not conf.authType and conf.authCred:
errMsg = "you specified the HTTP authentication credentials, "
errMsg += "but did not provide the type"
raise SqlmapSyntaxException(errMsg)
elif conf.authType.lower() not in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST, AUTH_TYPE.NTLM, AUTH_TYPE.PKI):
errMsg = "HTTP authentication type value must be "
errMsg += "Basic, Digest, NTLM or PKI"
raise SqlmapSyntaxException(errMsg)
if not conf.authPrivate:
debugMsg = "setting the HTTP authentication type and credentials"
logger.debug(debugMsg)
aTypeLower = conf.authType.lower()
if aTypeLower in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST):
regExp = "^(.*?):(.*?)$"
errMsg = "HTTP %s authentication credentials " % aTypeLower
errMsg += "value must be in format 'username:password'"
elif aTypeLower == AUTH_TYPE.NTLM:
regExp = "^(.*\\\\.*):(.*?)$"
errMsg = "HTTP NTLM authentication credentials value must "
errMsg += "be in format 'DOMAIN\username:password'"
elif aTypeLower == AUTH_TYPE.PKI:
errMsg = "HTTP PKI authentication require "
errMsg += "usage of option `--auth-pki`"
raise SqlmapSyntaxException(errMsg)
aCredRegExp = re.search(regExp, conf.authCred)
if not aCredRegExp:
raise SqlmapSyntaxException(errMsg)
conf.authUsername = aCredRegExp.group(1)
conf.authPassword = aCredRegExp.group(2)
kb.passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
_setAuthCred()
if aTypeLower == AUTH_TYPE.BASIC:
authHandler = SmartHTTPBasicAuthHandler(kb.passwordMgr)
elif aTypeLower == AUTH_TYPE.DIGEST:
authHandler = urllib2.HTTPDigestAuthHandler(kb.passwordMgr)
elif aTypeLower == AUTH_TYPE.NTLM:
try:
from ntlm import HTTPNtlmAuthHandler
except ImportError:
errMsg = "sqlmap requires Python NTLM third-party library "
errMsg += "in order to authenticate via NTLM, "
errMsg += "http://code.google.com/p/python-ntlm/"
raise SqlmapMissingDependence(errMsg)
authHandler = HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(kb.passwordMgr)
else:
debugMsg = "setting the HTTP(s) authentication PEM private key"
logger.debug(debugMsg)
key_file = os.path.expanduser(conf.authPrivate)
checkFile(key_file)
authHandler = HTTPSPKIAuthHandler(key_file)
def _setHTTPExtraHeaders():
if conf.headers:
debugMsg = "setting extra HTTP headers"
logger.debug(debugMsg)
conf.headers = conf.headers.split("\n") if "\n" in conf.headers else conf.headers.split("\\n")
for headerValue in conf.headers:
if headerValue.count(':') >= 1:
header, value = (_.lstrip() for _ in headerValue.split(":", 1))
if header and value:
conf.httpHeaders.append((header, value))
else:
errMsg = "invalid header value: %s. Valid header format is 'name:value'" % repr(headerValue).lstrip('u')
raise SqlmapSyntaxException(errMsg)
elif not conf.httpHeaders or len(conf.httpHeaders) == 1:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_LANGUAGE, "en-us,en;q=0.5"))
if not conf.charset:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_CHARSET, "ISO-8859-15,utf-8;q=0.7,*;q=0.7"))
else:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_CHARSET, "%s;q=0.7,*;q=0.1" % conf.charset))
# Invalidating any caching mechanism in between
# Reference: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
conf.httpHeaders.append((HTTP_HEADER.CACHE_CONTROL, "no-cache,no-store"))
conf.httpHeaders.append((HTTP_HEADER.PRAGMA, "no-cache"))
def _defaultHTTPUserAgent():
"""
@return: default sqlmap HTTP User-Agent header
@rtype: C{str}
"""
return "%s (%s)" % (VERSION_STRING, SITE)
# Firefox 3 running on Ubuntu 9.04 updated at April 2009
#return "Mozilla/5.0 (X11; U; Linux i686; en-GB; rv:1.9.0.9) Gecko/2009042113 Ubuntu/9.04 (jaunty) Firefox/3.0.9"
# Internet Explorer 7.0 running on Windows 2003 Service Pack 2 english
# updated at March 2009
#return "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)"
def _setHTTPUserAgent():
"""
Set the HTTP User-Agent header.
Depending on the user options it can be:
* The default sqlmap string
* A default value read as user option
* A random value read from a list of User-Agent headers from a
file choosed as user option
"""
if conf.mobile:
message = "which smartphone do you want sqlmap to imitate "
message += "through HTTP User-Agent header?\n"
items = sorted(getPublicTypeMembers(MOBILES, True))
for count in xrange(len(items)):
item = items[count]
message += "[%d] %s%s\n" % (count + 1, item[0], " (default)" if item == MOBILES.IPHONE else "")
test = readInput(message.rstrip('\n'), default=items.index(MOBILES.IPHONE) + 1)
try:
item = items[int(test) - 1]
except:
item = MOBILES.IPHONE
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, item[1]))
elif conf.agent:
debugMsg = "setting the HTTP User-Agent header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, conf.agent))
elif not conf.randomAgent:
_ = True
for header, _ in conf.httpHeaders:
if header == HTTP_HEADER.USER_AGENT:
_ = False
break
if _:
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, _defaultHTTPUserAgent()))
else:
if not kb.userAgents:
debugMsg = "loading random HTTP User-Agent header(s) from "
debugMsg += "file '%s'" % paths.USER_AGENTS
logger.debug(debugMsg)
try:
kb.userAgents = getFileItems(paths.USER_AGENTS)
except IOError:
warnMsg = "unable to read HTTP User-Agent header "
warnMsg += "file '%s'" % paths.USER_AGENTS
logger.warn(warnMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, _defaultHTTPUserAgent()))
return
userAgent = random.sample(kb.userAgents or [_defaultHTTPUserAgent()], 1)[0]
infoMsg = "fetched random HTTP User-Agent header from "
infoMsg += "file '%s': '%s'" % (paths.USER_AGENTS, userAgent)
logger.info(infoMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, userAgent))
def _setHTTPReferer():
"""
Set the HTTP Referer
"""
if conf.referer:
debugMsg = "setting the HTTP Referer header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.REFERER, conf.referer))
def _setHTTPCookies():
"""
Set the HTTP Cookie header
"""
if conf.cookie:
debugMsg = "setting the HTTP Cookie header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.COOKIE, conf.cookie))
def _setHTTPTimeout():
"""
Set the HTTP timeout
"""
if conf.timeout:
debugMsg = "setting the HTTP timeout"
logger.debug(debugMsg)
conf.timeout = float(conf.timeout)
if conf.timeout < 3.0:
warnMsg = "the minimum HTTP timeout is 3 seconds, sqlmap "
warnMsg += "will going to reset it"
logger.warn(warnMsg)
conf.timeout = 3.0
else:
conf.timeout = 30.0
socket.setdefaulttimeout(conf.timeout)
def _checkDependencies():
"""
Checks for missing dependencies.
"""
if conf.dependencies:
checkDependencies()
def _cleanupOptions():
"""
Cleanup configuration attributes.
"""
debugMsg = "cleaning up configuration parameters"
logger.debug(debugMsg)
width = getConsoleWidth()
if conf.eta:
conf.progressWidth = width - 26
else:
conf.progressWidth = width - 46
for key, value in conf.items():
if value and any(key.endswith(_) for _ in ("Path", "File")):
conf[key] = os.path.expanduser(value)
if conf.testParameter:
conf.testParameter = urldecode(conf.testParameter)
conf.testParameter = conf.testParameter.replace(" ", "")
conf.testParameter = re.split(PARAMETER_SPLITTING_REGEX, conf.testParameter)
else:
conf.testParameter = []
if conf.user:
conf.user = conf.user.replace(" ", "")
if conf.rParam:
conf.rParam = conf.rParam.replace(" ", "")
conf.rParam = re.split(PARAMETER_SPLITTING_REGEX, conf.rParam)
else:
conf.rParam = []
if conf.paramDel and '\\' in conf.paramDel:
conf.paramDel = conf.paramDel.decode("string_escape")
if conf.skip:
conf.skip = conf.skip.replace(" ", "")
conf.skip = re.split(PARAMETER_SPLITTING_REGEX, conf.skip)
else:
conf.skip = []
if conf.delay:
conf.delay = float(conf.delay)
if conf.rFile:
conf.rFile = ntToPosixSlashes(normalizePath(conf.rFile))
if conf.wFile:
conf.wFile = ntToPosixSlashes(normalizePath(conf.wFile))
if conf.dFile:
conf.dFile = ntToPosixSlashes(normalizePath(conf.dFile))
if conf.sitemapUrl and not conf.sitemapUrl.lower().startswith("http"):
conf.sitemapUrl = "http%s://%s" % ('s' if conf.forceSSL else '', conf.sitemapUrl)
if conf.msfPath:
conf.msfPath = ntToPosixSlashes(normalizePath(conf.msfPath))
if conf.tmpPath:
conf.tmpPath = ntToPosixSlashes(normalizePath(conf.tmpPath))
if any((conf.googleDork, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.forms, conf.crawlDepth)):
conf.multipleTargets = True
if conf.optimize:
setOptimize()
if conf.data:
conf.data = re.sub(INJECT_HERE_MARK.replace(" ", r"[^A-Za-z]*"), CUSTOM_INJECTION_MARK_CHAR, conf.data, re.I)
if conf.url:
conf.url = re.sub(INJECT_HERE_MARK.replace(" ", r"[^A-Za-z]*"), CUSTOM_INJECTION_MARK_CHAR, conf.url, re.I)
if conf.os:
conf.os = conf.os.capitalize()
if conf.dbms:
conf.dbms = conf.dbms.capitalize()
if conf.testFilter:
conf.testFilter = conf.testFilter.strip('*+')
conf.testFilter = re.sub(r"([^.])([*+])", "\g<1>.\g<2>", conf.testFilter)
if "timeSec" not in kb.explicitSettings:
if conf.tor:
conf.timeSec = 2 * conf.timeSec
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
warnMsg = "increasing default value for "
warnMsg += "option '--time-sec' to %d because " % conf.timeSec
warnMsg += "switch '--tor' was provided"
logger.warn(warnMsg)
else:
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
if conf.retries:
conf.retries = min(conf.retries, MAX_CONNECT_RETRIES)
if conf.code:
conf.code = int(conf.code)
if conf.csvDel:
conf.csvDel = conf.csvDel.decode("string_escape") # e.g. '\\t' -> '\t'
if conf.torPort and isinstance(conf.torPort, basestring) and conf.torPort.isdigit():
conf.torPort = int(conf.torPort)
if conf.torType:
conf.torType = conf.torType.upper()
if conf.outputDir:
paths.SQLMAP_OUTPUT_PATH = conf.outputDir
setPaths()
if conf.string:
try:
conf.string = conf.string.decode("unicode_escape")
except:
charset = string.whitespace.replace(" ", "")
for _ in charset:
conf.string = conf.string.replace(_.encode("string_escape"), _)
if conf.getAll:
map(lambda x: conf.__setitem__(x, True), WIZARD.ALL)
if conf.noCast:
for _ in DUMP_REPLACEMENTS.keys():
del DUMP_REPLACEMENTS[_]
if conf.dumpFormat:
conf.dumpFormat = conf.dumpFormat.upper()
if conf.torType:
conf.torType = conf.torType.upper()
if conf.col:
conf.col = re.sub(r"\s*,\s*", ",", conf.col)
if conf.excludeCol:
conf.excludeCol = re.sub(r"\s*,\s*", ",", conf.excludeCol)
if conf.binaryFields:
conf.binaryFields = re.sub(r"\s*,\s*", ",", conf.binaryFields)
threadData = getCurrentThreadData()
threadData.reset()
def _purgeOutput():
"""
Safely removes (purges) output directory.
"""
if conf.purgeOutput:
purge(paths.SQLMAP_OUTPUT_PATH)
def _setConfAttributes():
"""
This function set some needed attributes into the configuration
singleton.
"""
debugMsg = "initializing the configuration"
logger.debug(debugMsg)
conf.authUsername = None
conf.authPassword = None
conf.boundaries = []
conf.cj = None
conf.dbmsConnector = None
conf.dbmsHandler = None
conf.dnsServer = None
conf.dumpPath = None
conf.hashDB = None
conf.hashDBFile = None
conf.httpHeaders = []
conf.hostname = None
conf.ipv6 = False
conf.multipleTargets = False
conf.outputPath = None
conf.paramDict = {}
conf.parameters = {}
conf.path = None
conf.port = None
conf.proxyList = []
conf.resultsFilename = None
conf.resultsFP = None
conf.scheme = None
conf.tests = []
conf.trafficFP = None
conf.wFileType = None
def _setKnowledgeBaseAttributes(flushAll=True):
"""
This function set some needed attributes into the knowledge base
singleton.
"""
debugMsg = "initializing the knowledge base"
logger.debug(debugMsg)
kb.absFilePaths = set()
kb.adjustTimeDelay = None
kb.alerted = False
kb.alwaysRefresh = None
kb.arch = None
kb.authHeader = None
kb.bannerFp = AttribDict()
kb.binaryField = False
kb.brute = AttribDict({"tables": [], "columns": []})
kb.bruteMode = False
kb.cache = AttribDict()
kb.cache.content = {}
kb.cache.regex = {}
kb.cache.stdev = {}
kb.chars = AttribDict()
kb.chars.delimiter = randomStr(length=6, lowercase=True)
kb.chars.start = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.stop = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.at, kb.chars.space, kb.chars.dollar, kb.chars.hash_ = ("%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, _, KB_CHARS_BOUNDARY_CHAR) for _ in randomStr(length=4, lowercase=True))
kb.columnExistsChoice = None
kb.commonOutputs = None
kb.counters = {}
kb.data = AttribDict()
kb.dataOutputFlag = False
# Active back-end DBMS fingerprint
kb.dbms = None
kb.dbmsVersion = [UNKNOWN_DBMS_VERSION]
kb.delayCandidates = TIME_DELAY_CANDIDATES * [0]
kb.dep = None
kb.dnsMode = False
kb.dnsTest = None
kb.docRoot = None
kb.dumpTable = None
kb.dynamicMarkings = []
kb.dynamicParameter = False
kb.endDetection = False
kb.explicitSettings = set()
kb.extendTests = None
kb.errorIsNone = True
kb.fileReadMode = False
kb.followSitemapRecursion = None
kb.forcedDbms = None
kb.forcePartialUnion = False
kb.headersFp = {}
kb.heuristicDbms = None
kb.heuristicMode = False
kb.heuristicTest = None
kb.hintValue = None
kb.htmlFp = []
kb.httpErrorCodes = {}
kb.inferenceMode = False
kb.ignoreCasted = None
kb.ignoreNotFound = False
kb.ignoreTimeout = False
kb.injection = InjectionDict()
kb.injections = []
kb.laggingChecked = False
kb.lastParserStatus = None
kb.locks = AttribDict()
for _ in ("cache", "count", "index", "io", "limit", "log", "redirect", "request", "value"):
kb.locks[_] = threading.Lock()
kb.matchRatio = None
kb.maxConnectionsFlag = False
kb.mergeCookies = None
kb.multiThreadMode = False
kb.negativeLogic = False
kb.nullConnection = None
kb.orderByColumns = None
kb.originalCode = None
kb.originalPage = None
kb.originalTimeDelay = None
kb.originalUrls = dict()
# Back-end DBMS underlying operating system fingerprint via banner (-b)
# parsing
kb.os = None
kb.osVersion = None
kb.osSP = None
kb.pageCompress = True
kb.pageTemplate = None
kb.pageTemplates = dict()
kb.pageEncoding = DEFAULT_PAGE_ENCODING
kb.pageStable = None
kb.partRun = None
kb.permissionFlag = False
kb.postHint = None
kb.postSpaceToPlus = False
kb.postUrlEncode = True
kb.prependFlag = False
kb.processResponseCounter = 0
kb.previousMethod = None
kb.processUserMarks = None
kb.proxyAuthHeader = None
kb.queryCounter = 0
kb.redirectChoice = None
kb.reflectiveMechanism = True
kb.reflectiveCounters = {REFLECTIVE_COUNTER.MISS: 0, REFLECTIVE_COUNTER.HIT: 0}
kb.requestCounter = 0
kb.resendPostOnRedirect = None
kb.responseTimes = []
kb.resumeValues = True
kb.safeCharEncode = False
kb.singleLogFlags = set()
kb.reduceTests = None
kb.stickyDBMS = False
kb.stickyLevel = None
kb.storeCrawlingChoice = None
kb.storeHashesChoice = None
kb.suppressResumeInfo = False
kb.technique = None
kb.testMode = False
kb.testQueryCount = 0
kb.testType = None
kb.threadContinue = True
kb.threadException = False
kb.tableExistsChoice = None
kb.timeValidCharsRun = 0
kb.uChar = NULL
kb.unionDuplicates = False
kb.xpCmdshellAvailable = False
if flushAll:
kb.headerPaths = {}
kb.keywords = set(getFileItems(paths.SQL_KEYWORDS))
kb.passwordMgr = None
kb.skipVulnHost = None
kb.tamperFunctions = []
kb.targets = oset()
kb.testedParams = set()
kb.userAgents = None
kb.vainRun = True
kb.vulnHosts = set()
kb.wafFunctions = []
kb.wordlists = None
def _useWizardInterface():
"""
Presents simple wizard interface for beginner users
"""
if not conf.wizard:
return
logger.info("starting wizard interface")
while not conf.url:
message = "Please enter full target URL (-u): "
conf.url = readInput(message, default=None)
message = "%s data (--data) [Enter for None]: " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
conf.data = readInput(message, default=None)
if not (filter(lambda _: '=' in unicode(_), (conf.url, conf.data)) or '*' in conf.url):
warnMsg = "no GET and/or %s parameter(s) found for testing " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
warnMsg += "(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). "
if not conf.crawlDepth and not conf.forms:
warnMsg += "Will search for forms"
conf.forms = True
logger.warn(warnMsg)
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Injection difficulty (--level/--risk). Please choose:\n"
message += "[1] Normal (default)\n[2] Medium\n[3] Hard"
choice = readInput(message, default='1')
if choice == '2':
conf.risk = 2
conf.level = 3
elif choice == '3':
conf.risk = 3
conf.level = 5
else:
conf.risk = 1
conf.level = 1
if not conf.getAll:
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Enumeration (--banner/--current-user/etc). Please choose:\n"
message += "[1] Basic (default)\n[2] Intermediate\n[3] All"
choice = readInput(message, default='1')
if choice == '2':
map(lambda x: conf.__setitem__(x, True), WIZARD.INTERMEDIATE)
elif choice == '3':
map(lambda x: conf.__setitem__(x, True), WIZARD.ALL)
else:
map(lambda x: conf.__setitem__(x, True), WIZARD.BASIC)
logger.debug("muting sqlmap.. it will do the magic for you")
conf.verbose = 0
conf.batch = True
conf.threads = 4
dataToStdout("\nsqlmap is running, please wait..\n\n")
def _saveCmdline():
"""
Saves the command line options on a sqlmap configuration INI file
Format.
"""
if not conf.saveCmdline:
return
debugMsg = "saving command line options on a sqlmap configuration INI file"
logger.debug(debugMsg)
config = UnicodeRawConfigParser()
userOpts = {}
for family in optDict.keys():
userOpts[family] = []
for option, value in conf.items():
for family, optionData in optDict.items():
if option in optionData:
userOpts[family].append((option, value, optionData[option]))
for family, optionData in userOpts.items():
config.add_section(family)
optionData.sort()
for option, value, datatype in optionData:
if datatype and isListLike(datatype):
datatype = datatype[0]
if value is None:
if datatype == OPTION_TYPE.BOOLEAN:
value = "False"
elif datatype in (OPTION_TYPE.INTEGER, OPTION_TYPE.FLOAT):
if option in defaults:
value = str(defaults[option])
else:
value = "0"
elif datatype == OPTION_TYPE.STRING:
value = ""
if isinstance(value, basestring):
value = value.replace("\n", "\n ")
config.set(family, option, value)
confFP = openFile(paths.SQLMAP_CONFIG, "wb")
config.write(confFP)
infoMsg = "saved command line options on '%s' configuration file" % paths.SQLMAP_CONFIG
logger.info(infoMsg)
def setVerbosity():
"""
This function set the verbosity of sqlmap output messages.
"""
if conf.verbose is None:
conf.verbose = 1
conf.verbose = int(conf.verbose)
if conf.verbose == 0:
logger.setLevel(logging.ERROR)
elif conf.verbose == 1:
logger.setLevel(logging.INFO)
elif conf.verbose > 2 and conf.eta:
conf.verbose = 2
logger.setLevel(logging.DEBUG)
elif conf.verbose == 2:
logger.setLevel(logging.DEBUG)
elif conf.verbose == 3:
logger.setLevel(CUSTOM_LOGGING.PAYLOAD)
elif conf.verbose == 4:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_OUT)
elif conf.verbose >= 5:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_IN)
def _mergeOptions(inputOptions, overrideOptions):
"""
Merge command line options with configuration file and default options.
@param inputOptions: optparse object with command line options.
@type inputOptions: C{instance}
"""
if inputOptions.pickledOptions:
inputOptions = base64unpickle(inputOptions.pickledOptions)
if inputOptions.configFile:
configFileParser(inputOptions.configFile)
if hasattr(inputOptions, "items"):
inputOptionsItems = inputOptions.items()
else:
inputOptionsItems = inputOptions.__dict__.items()
for key, value in inputOptionsItems:
if key not in conf or value not in (None, False) or overrideOptions:
conf[key] = value
for key, value in conf.items():
if value is not None:
kb.explicitSettings.add(key)
for key, value in defaults.items():
if hasattr(conf, key) and conf[key] is None:
conf[key] = value
_ = {}
for key, value in os.environ.items():
if key.upper().startswith(SQLMAP_ENVIRONMENT_PREFIX):
_[key[len(SQLMAP_ENVIRONMENT_PREFIX):].upper()] = value
types_ = {}
for group in optDict.keys():
types_.update(optDict[group])
for key in conf:
if key.upper() in _ and key in types_:
value = _[key.upper()]
if types_[key] == OPTION_TYPE.BOOLEAN:
try:
value = bool(value)
except ValueError:
value = False
elif types_[key] == OPTION_TYPE.INTEGER:
try:
value = int(value)
except ValueError:
value = 0
elif types_[key] == OPTION_TYPE.FLOAT:
try:
value = float(value)
except ValueError:
value = 0.0
conf[key] = value
mergedOptions.update(conf)
def _setTrafficOutputFP():
if conf.trafficFile:
infoMsg = "setting file for logging HTTP traffic"
logger.info(infoMsg)
conf.trafficFP = openFile(conf.trafficFile, "w+")
def _setDNSServer():
if not conf.dnsName:
return
infoMsg = "setting up DNS server instance"
logger.info(infoMsg)
isAdmin = runningAsAdmin()
if isAdmin:
try:
conf.dnsServer = DNSServer()
conf.dnsServer.run()
except socket.error, msg:
errMsg = "there was an error while setting up "
errMsg += "DNS server instance ('%s')" % msg
raise SqlmapGenericException(errMsg)
else:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a DNS data exfiltration attack "
errMsg += "as it will need to listen on privileged UDP port 53 "
errMsg += "for incoming address resolution attempts"
raise SqlmapMissingPrivileges(errMsg)
def _setProxyList():
if not conf.proxyFile:
return
conf.proxyList = []
for match in re.finditer(r"(?i)((http[^:]*|socks[^:]*)://)?([\w.]+):(\d+)", readCachedFileContent(conf.proxyFile)):
_, type_, address, port = match.groups()
conf.proxyList.append("%s://%s:%s" % (type_ or "http", address, port))
def _setTorProxySettings():
if not conf.tor:
return
if conf.torType == PROXY_TYPE.HTTP:
_setTorHttpProxySettings()
else:
_setTorSocksProxySettings()
def _setTorHttpProxySettings():
infoMsg = "setting Tor HTTP proxy settings"
logger.info(infoMsg)
found = None
for port in (DEFAULT_TOR_HTTP_PORTS if not conf.torPort else (conf.torPort,)):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((LOCALHOST, port))
found = port
break
except socket.error:
pass
s.close()
if found:
conf.proxy = "http://%s:%d" % (LOCALHOST, found)
else:
errMsg = "can't establish connection with the Tor proxy. "
errMsg += "Please make sure that you have Vidalia, Privoxy or "
errMsg += "Polipo bundle installed for you to be able to "
errMsg += "successfully use switch '--tor' "
if IS_WIN:
errMsg += "(e.g. https://www.torproject.org/projects/vidalia.html.en)"
else:
errMsg += "(e.g. http://www.coresec.org/2011/04/24/sqlmap-with-tor/)"
raise SqlmapConnectionException(errMsg)
if not conf.checkTor:
warnMsg = "use switch '--check-tor' at "
warnMsg += "your own convenience when accessing "
warnMsg += "Tor anonymizing network because of "
warnMsg += "known issues with default settings of various 'bundles' "
warnMsg += "(e.g. Vidalia)"
logger.warn(warnMsg)
def _setTorSocksProxySettings():
infoMsg = "setting Tor SOCKS proxy settings"
logger.info(infoMsg)
# Has to be SOCKS5 to prevent DNS leaks (http://en.wikipedia.org/wiki/Tor_%28anonymity_network%29)
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if conf.torType == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, LOCALHOST, conf.torPort or DEFAULT_TOR_SOCKS_PORT)
socks.wrapmodule(urllib2)
def _checkTor():
if not conf.checkTor:
return
infoMsg = "checking Tor connection"
logger.info(infoMsg)
page, _, _ = Request.getPage(url="https://check.torproject.org/", raise404=False)
if not page or 'Congratulations' not in page:
errMsg = "it seems that Tor is not properly set. Please try using options '--tor-type' and/or '--tor-port'"
raise SqlmapConnectionException(errMsg)
else:
infoMsg = "Tor is properly being used"
logger.info(infoMsg)
def _basicOptionValidation():
if conf.limitStart is not None and not (isinstance(conf.limitStart, int) and conf.limitStart > 0):
errMsg = "value for option '--start' (limitStart) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.limitStop is not None and not (isinstance(conf.limitStop, int) and conf.limitStop > 0):
errMsg = "value for option '--stop' (limitStop) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.level is not None and not (isinstance(conf.level, int) and conf.level >= 1 and conf.level <= 5):
errMsg = "value for option '--level' must be an integer value from range [1, 5]"
raise SqlmapSyntaxException(errMsg)
if conf.risk is not None and not (isinstance(conf.risk, int) and conf.risk >= 1 and conf.risk <= 3):
errMsg = "value for option '--risk' must be an integer value from range [1, 3]"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.limitStart, int) and conf.limitStart > 0 and \
isinstance(conf.limitStop, int) and conf.limitStop < conf.limitStart:
errMsg = "value for option '--start' (limitStart) must be smaller or equal than value for --stop (limitStop) option"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.firstChar, int) and conf.firstChar > 0 and \
isinstance(conf.lastChar, int) and conf.lastChar < conf.firstChar:
errMsg = "value for option '--first' (firstChar) must be smaller than or equal to value for --last (lastChar) option"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.cpuThrottle, int) and (conf.cpuThrottle > 100 or conf.cpuThrottle < 0):
errMsg = "value for option '--cpu-throttle' (cpuThrottle) must be in range [0,100]"
raise SqlmapSyntaxException(errMsg)
if conf.textOnly and conf.nullConnection:
errMsg = "switch '--text-only' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.url:
errMsg = "option '-d' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.titles and conf.nullConnection:
errMsg = "switch '--titles' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.search:
errMsg = "switch '--dump' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.data and conf.nullConnection:
errMsg = "option '--data' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.nullConnection:
errMsg = "option '--string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.notString and conf.nullConnection:
errMsg = "option '--not-string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.noCast and conf.hexConvert:
errMsg = "switch '--no-cast' is incompatible with switch '--hex'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpAll and conf.search:
errMsg = "switch '--dump-all' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.notString:
errMsg = "option '--string' is incompatible with switch '--not-string'"
raise SqlmapSyntaxException(errMsg)
if conf.regexp and conf.nullConnection:
errMsg = "option '--regexp' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.dumpAll:
errMsg = "switch '--dump' is incompatible with switch '--dump-all'"
raise SqlmapSyntaxException(errMsg)
if conf.predictOutput and (conf.threads > 1 or conf.optimize):
errMsg = "switch '--predict-output' is incompatible with option '--threads' and switch '-o'"
raise SqlmapSyntaxException(errMsg)
if conf.threads > MAX_NUMBER_OF_THREADS:
errMsg = "maximum number of used threads is %d avoiding potential connection issues" % MAX_NUMBER_OF_THREADS
raise SqlmapSyntaxException(errMsg)
if conf.forms and not any((conf.url, conf.googleDork, conf.bulkFile, conf.sitemapUrl)):
errMsg = "switch '--forms' requires usage of option '-u' ('--url'), '-g', '-m' or '-x'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfUrl and not conf.csrfToken:
errMsg = "option '--csrf-url' requires usage of option '--csrf-token'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfToken and conf.threads > 1:
errMsg = "option '--csrf-url' is incompatible with option '--threads'"
raise SqlmapSyntaxException(errMsg)
if conf.requestFile and conf.url and conf.url != DUMMY_URL:
errMsg = "option '-r' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.proxy:
errMsg = "option '-d' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.tor:
errMsg = "option '-d' is incompatible with switch '--tor'"
raise SqlmapSyntaxException(errMsg)
if not conf.tech:
errMsg = "option '--technique' can't be empty"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.ignoreProxy:
errMsg = "switch '--tor' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.proxy:
errMsg = "switch '--tor' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.checkTor and not any((conf.tor, conf.proxy)):
errMsg = "switch '--check-tor' requires usage of switch '--tor' (or option '--proxy' with HTTP proxy address using Tor)"
raise SqlmapSyntaxException(errMsg)
if conf.torPort is not None and not (isinstance(conf.torPort, int) and conf.torPort > 0):
errMsg = "value for option '--tor-port' must be a positive integer"
raise SqlmapSyntaxException(errMsg)
if conf.torType not in getPublicTypeMembers(PROXY_TYPE, True):
errMsg = "option '--tor-type' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(PROXY_TYPE, True))
raise SqlmapSyntaxException(errMsg)
if conf.dumpFormat not in getPublicTypeMembers(DUMP_FORMAT, True):
errMsg = "option '--dump-format' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(DUMP_FORMAT, True))
raise SqlmapSyntaxException(errMsg)
if conf.skip and conf.testParameter:
errMsg = "option '--skip' is incompatible with option '-p'"
raise SqlmapSyntaxException(errMsg)
if conf.mobile and conf.agent:
errMsg = "switch '--mobile' is incompatible with option '--user-agent'"
raise SqlmapSyntaxException(errMsg)
if conf.proxy and conf.ignoreProxy:
errMsg = "option '--proxy' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.timeSec < 1:
errMsg = "value for option '--time-sec' must be a positive integer"
raise SqlmapSyntaxException(errMsg)
if conf.uChar and not re.match(UNION_CHAR_REGEX, conf.uChar):
errMsg = "value for option '--union-char' must be an alpha-numeric value (e.g. 1)"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.uCols, basestring):
if not conf.uCols.isdigit() and ("-" not in conf.uCols or len(conf.uCols.split("-")) != 2):
errMsg = "value for option '--union-cols' must be a range with hyphon "
errMsg += "(e.g. 1-10) or integer value (e.g. 5)"
raise SqlmapSyntaxException(errMsg)
if conf.dbmsCred and ':' not in conf.dbmsCred:
errMsg = "value for option '--dbms-cred' must be in "
errMsg += "format <username>:<password> (e.g. \"root:pass\")"
raise SqlmapSyntaxException(errMsg)
if conf.charset:
_ = checkCharEncoding(conf.charset, False)
if _ is None:
errMsg = "unknown charset '%s'. Please visit " % conf.charset
errMsg += "'%s' to get the full list of " % CODECS_LIST_PAGE
errMsg += "supported charsets"
raise SqlmapSyntaxException(errMsg)
else:
conf.charset = _
if conf.loadCookies:
if not os.path.exists(conf.loadCookies):
errMsg = "cookies file '%s' does not exist" % conf.loadCookies
raise SqlmapFilePathException(errMsg)
def _resolveCrossReferences():
lib.core.threads.readInput = readInput
lib.core.common.getPageTemplate = getPageTemplate
lib.core.convert.singleTimeWarnMessage = singleTimeWarnMessage
lib.request.connect.setHTTPProxy = _setHTTPProxy
lib.controller.checks.setVerbosity = setVerbosity
def initOptions(inputOptions=AttribDict(), overrideOptions=False):
if IS_WIN:
coloramainit()
_setConfAttributes()
_setKnowledgeBaseAttributes()
_mergeOptions(inputOptions, overrideOptions)
def init():
"""
Set attributes into both configuration and knowledge base singletons
based upon command line and configuration file options.
"""
_useWizardInterface()
setVerbosity()
_saveCmdline()
_setRequestFromFile()
_cleanupOptions()
_purgeOutput()
_checkDependencies()
_basicOptionValidation()
_setProxyList()
_setTorProxySettings()
_setDNSServer()
_adjustLoggingFormatter()
_setMultipleTargets()
_setTamperingFunctions()
_setWafFunctions()
_setTrafficOutputFP()
_resolveCrossReferences()
parseTargetUrl()
parseTargetDirect()
if any((conf.url, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.requestFile, conf.googleDork, conf.liveTest)):
_setHTTPTimeout()
_setHTTPExtraHeaders()
_setHTTPCookies()
_setHTTPReferer()
_setHTTPUserAgent()
_setHTTPAuthentication()
_setHTTPProxy()
_setDNSCache()
_setSafeUrl()
_setGoogleDorking()
_setBulkMultipleTargets()
_setSitemapTargets()
_urllib2Opener()
_checkTor()
_setCrawler()
_findPageForms()
_setDBMS()
_setTechnique()
_setThreads()
_setOS()
_setWriteFile()
_setMetasploit()
_setDBMSAuthentication()
loadPayloads()
_setPrefixSuffix()
update()
_loadQueries()
|
Snifer/BurpSuite-Plugins
|
Sqlmap/lib/core/option.py
|
Python
|
gpl-2.0
| 82,251
|
[
"VisIt"
] |
e78a17c8acf9f790152a3dfb4b021618bd2b36890b3aee68d6c5aadf8e83677f
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
"""
Module for objects and functions that are commonly used throughout the MooseDocs system.
"""
from .storage import Storage
from .parse_settings import match_settings, parse_settings, get_settings_as_dict
from .box import box
from .load_config import load_config, load_extensions
from .build_class_database import build_class_database
from .read import read, write, get_language
from .regex import regex
from .project_find import project_find
from .check_filenames import check_filenames
from .submodule_status import submodule_status
from .get_requirements import get_requirements
from .extract_content import extractContent, extractContentSettings, fix_moose_header
from .log import report_exception
from .report_error import report_error
from .exceptions import MooseDocsException
from .get_content import get_content, get_files, create_file_page, get_items
|
nuclear-wizard/moose
|
python/MooseDocs/common/__init__.py
|
Python
|
lgpl-2.1
| 1,172
|
[
"MOOSE"
] |
1f2f75f2d6c4158527b6e3aa61ae559d56761b4bea7eea893e7d590bd243e2a9
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This tests the scafacos p2nfft dipolar calculations by matching against
# reference data from direct summation. In 2d, reference data from the mdlc
# test case is used
import os
import numpy as np
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.magnetostatics as magnetostatics
from tests_common import abspath
@utx.skipIfMissingFeatures(["SCAFACOS_DIPOLES"])
class Scafacos1d2d(ut.TestCase):
def test_scafacos(self):
rho = 0.3
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 100
particle_radius = 0.5
dipole_lambda = 3.0
#################################################
box_l = pow(((4 * n_particle * np.pi) / (3 * rho)),
1.0 / 3.0) * particle_radius
skin = 0.5
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.seed = s.cell_system.get_state()['n_nodes'] * [1234]
# give Espresso some parameters
s.time_step = 0.01
s.cell_system.skin = skin
s.box_l = 3 * [box_l]
for dim in 2, 1:
print("Dimension", dim)
# Read reference data
if dim == 2:
file_prefix = "data/mdlc"
s.periodicity = [1, 1, 0]
else:
s.periodicity = [1, 0, 0]
file_prefix = "data/scafacos_dipoles_1d"
with open(abspath(file_prefix + "_reference_data_energy.dat")) as f:
ref_E = float(f.readline())
# Particles
data = np.genfromtxt(abspath(
file_prefix + "_reference_data_forces_torques.dat"))
for p in data[:, :]:
s.part.add(
id=int(p[0]), pos=p[1:4], dip=p[4:7], rotation=(1, 1, 1))
if dim == 2:
scafacos = magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 0,
"pnfft_N": "80,80,160",
"pnfft_window_name": "bspline",
"pnfft_m": "4",
"p2nfft_ignore_tolerance": "1",
"pnfft_diff_ik": "0",
"p2nfft_r_cut": "6",
"p2nfft_alpha": "0.8",
"p2nfft_epsB": "0.05"})
s.actors.add(scafacos)
# change box geometry in x,y direction to ensure that
# scafacos survives it
s.box_l = np.array((1, 1, 1.3)) * box_l
else:
if dim == 1:
# 1d periodic in x
scafacos = magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 1,
"pnfft_N": "32,128,128",
"pnfft_direct": 0,
"p2nfft_r_cut": 2.855,
"p2nfft_alpha": "1.5",
"p2nfft_intpol_order": "-1",
"p2nfft_reg_kernel_name": "ewald",
"p2nfft_p": "16",
"p2nfft_ignore_tolerance": "1",
"pnfft_window_name": "bspline",
"pnfft_m": "8",
"pnfft_diff_ik": "1",
"p2nfft_epsB": "0.125"})
s.box_l = np.array((1, 1, 1)) * box_l
s.actors.add(scafacos)
else:
raise Exception("This shouldn't happen.")
s.thermostat.turn_off()
s.integrator.run(0)
# Calculate errors
err_f = np.sum(np.sqrt(
np.sum((s.part[:].f - data[:, 7:10])**2, 1)), 0) / np.sqrt(data.shape[0])
err_t = np.sum(np.sqrt(np.sum(
(s.part[:].torque_lab - data[:, 10:13])**2, 1)), 0) / np.sqrt(data.shape[0])
err_e = s.analysis.energy()["dipolar"] - ref_E
print("Energy difference", err_e)
print("Force difference", err_f)
print("Torque difference", err_t)
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(
abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(
abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(
abs(err_f), tol_f, "Force difference too large")
s.part.clear()
del s.actors[0]
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/python/scafacos_dipoles_1d_2d.py
|
Python
|
gpl-3.0
| 5,604
|
[
"ESPResSo"
] |
87ff1676c783b46f3057f1cee42e45d9cfb40da241313a3cabf4018326191b17
|
#!/usr/bin/python
import math
import argparse
from string import maketrans
#Script used in order to obtain primers.
parser = argparse.ArgumentParser()
parser.add_argument('-file', action="store", dest = 'File', required = "True")
parser.add_argument('-fasta', action = "store", dest = "genome", required = "True")
parser.add_argument('-fq', action = "store", dest = "fq_lim")
parser.add_argument('-out', action = "store", dest = "output", required = "True")
args = parser.parse_args()
genome = args.genome
#Tm calculation of an oligo. Based on biophp script of Joseba Bikandi https://www.biophp.org/minitools/melting_temperature/demo.php
def Tm_calculation(oligo):
primer = float(400) #400 nM are supposed as a standard [primer]
mg = float(2) #2 mM are supposed as a standard [Mg2+]
salt = float(40) #40 mM are supposed as a standard salt concentration
s = 0
h = 0
#Enthalpy and entrophy values from http://www.ncbi.nlm.nih.gov/pmc/articles/PMC19045/table/T2/ (SantaLucia, 1998)
dic = {
"AA": [-7.9,-22.2],
"AC": [-8.4,-22.4],
"AG": [-7.8, -21.0],
"AT": [-7.2,-20.4],
"CA": [-8.5,-22.7],
"CC": [-8.0, -19.9],
"CG": [-10.6,-27.2],
"CT": [-7.8,-21.0],
"GA": [-8.2,-22.2],
"GC": [-9.8, -24.4],
"GG": [-8.0, -19.9],
"GT": [-8.4, -22.4],
"TA": [-7.2,-21.3],
"TC": [-8.2,-22.2],
"TG": [-8.5,-22.7],
"TT": [-7.9,-22.2]}
#Effect on entropy by salt correction; von Ahsen et al 1999
#Increase of stability due to presence of Mg
salt_effect = (salt/1000)+((mg/1000)*140)
#effect on entropy
s+=0.368 * (len(oligo)-1)* math.log(salt_effect)
#terminal corrections. Santalucia 1998
firstnucleotide= oligo[0]
if firstnucleotide=="G" or firstnucleotide=="C": h+=0.1; s+=-2.8
if firstnucleotide=="A" or firstnucleotide=="T": h+=2.3; s+=4.1
lastnucleotide= oligo[-1]
if lastnucleotide=="G" or lastnucleotide=="C": h+=0.1; s+=-2.8
if lastnucleotide=="A" or lastnucleotide=="T": h+=2.3; s+=4.1
#compute new H and s based on sequence. Santalucia 1998
for i in range(0,len(oligo)-1):
f = i+ 2
substring = oligo[i:f]
try:
h = h + float(dic[substring][0])
s =s + float(dic[substring][1])
except:
return 0
tm=((1000*h)/(s+(1.987*math.log(primer/2000000000))))-273.15
return tm
def reverse_complementary(oligo):
revcomp = oligo.translate(maketrans('ACGT', 'TGCA'))[::-1]
return revcomp
def find_repetitive(oligo,repetitions):
for i in range(len(oligo)-repetitions):
subdivision = oligo[i:i+repetitions]
sub_list = list(subdivision)
if len(set(sub_list)) == 1:
return "yes"
# Function to parse fasta file (based on one of the Biopython IOs)
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith('>'):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
def genome_selection(contig,genome):
with open(genome) as fp:
for name_contig, seq_contig in read_fasta(fp):
if name_contig[1:].lower() == contig:
genome = seq_contig
return genome
def rule_1(oligo,sense):
last_element = len(oligo)
if sense == "reverse" : oligo = reverse_complementary(oligo)
while True:
end_of_primer = 21
begin_of_primer = 0
oligo = oligo[:last_element]
guanine = oligo.rfind("G")
cytosine = oligo.rfind("C")
#Checking wheter there are both G and C in the oligo
if guanine != -1 and cytosine != -1:
last_element = max([guanine,cytosine])
else:
if guanine == -1 and cytosine == -1:
found = "no"
return found, found, "-"
break
elif guanine == -1 and cytosine != -1:
last_element = cytosine
elif guanine != -1 and cytosine == -1:
last_element = guanine
begin = last_element - end_of_primer
end = last_element
while end - begin < 26 and end - begin > 16:
if begin < 0:
break
primer = oligo[begin:end+1]
Tm = Tm_calculation(primer)
if Tm > 60 and Tm < 64:
found = "yes"
return found, primer, Tm
elif Tm < 60:
begin -= 1
elif Tm > 64:
begin += 1
def rule_2(oligo,sense):
#This rule just looks for primers which
begin_of_primer = 0
if sense == "reverse" : oligo = reverse_complementary(oligo)
while True:
end_of_primer =21 + begin_of_primer
if end_of_primer > len(oligo):
found = "no"
return found, found, "-"
while end_of_primer - begin_of_primer < 26 and end_of_primer - begin_of_primer > 16:
primer = oligo[begin_of_primer:end_of_primer+1]
Tm = Tm_calculation(primer)
if Tm >= 60 and Tm <= 64:
found = "yes"
return found, primer, Tm
elif Tm < 60:
end_of_primer += 1
elif Tm > 64:
end_of_primer -= 1
begin_of_primer +=1
def snp_calculation(position,genome):
#Total size of the amplification
size_i = 300
size_f = 500
oligos = []
Tms = []
#upstream primer
up_primer_pos = int(position) - size_i
oligo = genome[up_primer_pos-1 : up_primer_pos + 100]
result = rule_1(oligo, "upstream")
if result[0] == "no":
result = rule_2(oligo, "upstream")
if result[0] == "no":
oligos.append("not found")
oligos.append("-")
Tms.append("-")
Tms.append("-")
if result[0] == "yes":
oligos.append(result[1])
Tms.append(str(result[2]))
#downstream primer
down_primer_pos = int(position) + size_f
oligo = genome[down_primer_pos-1 : down_primer_pos + 100]
result = rule_1(oligo,"downstream")
if result[0] == "no":
result = rule_2(oligo, "downstream")
if result[0] == "no":
oligos.append("not found")
Tms.append("-")
if result[0] == "yes":
oligos.append(result[1])
Tms.append(str(result[2]))
return oligos,Tms
def insertion_calculation(position,genome,contig_used):
size= 600
oligos = []
Tms = []
selection = "5"
try_size = 100
pos_n_contig = contig_used+"_"+position
if pos_n_contig not in consensus_3:
oligos.extend(["not found","-","-","-"])
Tms.extend(["-","-","-","-"])
return oligos,Tms
#insertion primer
#Generation of the oligo from the insertion where the primer will be searched
for selection in [5,3]:
if selection == 3:
lenght_consensus = len(consensus_3[pos_n_contig])
how = "forward"
try_oligo = consensus_3[pos_n_contig][:lenght_consensus]
elif selection == 5:
lenght_consensus = len(consensus_5[pos_n_contig])
how = "reverse"
try_oligo = consensus_5[pos_n_contig][:lenght_consensus]
if lenght_consensus < 10:
oligos.extend(["not found","-","-","-"])
Tms.extend(["-","-","-","-"])
result = rule_1(try_oligo,how)
if result[0] == "no":
result = rule_2(try_oligo, how)
if result[0] == "no":
oligos.extend(["not found","-","-","-"])
Tms.extend(["-","-","-","-"])
return oligos,Tms
if result[0] == "yes":
oligos.append(result[1])
Tms.append(str(result[2]))
#Generation of the forward and reverse oligos
up_primer_pos = int(position) - size
try_oligo = genome[up_primer_pos-1 : up_primer_pos + 100]
result = rule_1(try_oligo, "forward")
if result[0] == "no":
result = rule_2(try_oligo, "forward")
if result[0] == "no":
oligos.append("not found")
oligos.append("-")
Tms.append("-")
Tms.append("-")
if result[0] == "yes":
oligos.append(result[1])
Tms.append(str(result[2]))
#downstream primer
down_primer_pos = int(position) + size
try_oligo = genome[down_primer_pos-1 : down_primer_pos + 100]
result = rule_1(try_oligo,"reverse")
if result[0] == "no":
result = rule_2(try_oligo, "reverse")
if result[0] == "no":
oligos.append("not found")
Tms.append("-")
if result[0] == "yes":
oligos.append(result[1])
Tms.append(str(result[2]))
return oligos,Tms
def fastaq_to_dic(fq):
#It gets two dictionaries for 3' and 5' sequences.
dic_fas_3= {}
dic_fas_5= {}
#fq = open(fq,"r")
i = 0
m = 0
with open(fq,"r") as fq:
for line in fq.readlines():
line = line.rstrip()
if line.startswith("@user_projects"):
gene = line.split("/")[-1]
split = gene.split("_")
h = split[0]+"_"+split[1]
m = 0
#Depending on whether the reads are in the 3' or 5' extreme they will go to one dic or another.
if split[2].strip()=="3":
n = 1
dic_fas_3[h]=""
if split[2].strip()=="5":
n= 2
dic_fas_5[h]=""
continue
if m == 0 and not line.startswith("+"):
line = line.upper()
if n == 1:
dic_fas_3[h]+= line
if n == 2:
dic_fas_5[h]+= line
if line.startswith("+"):
m =1
return dic_fas_3,dic_fas_5
positions = open(args.File,"r")
result = open(args.output,"w")
n = 0
first_list = []
for line in positions.readlines():
line = line.split("\t")
if n != 0:
first_list.append(line)
n += 1
former = ""
list2= []
contig_used = ""
mode = first_list[0][0]
if mode == "snp":
result.write("@type\tcontig\tposition\tref_base\talt_base\thit\tmrna_start\tmrna_end\tstrand\tgene_model\tgene_element\taa_pos\taa_ref\taa_alt\tgene_funct_annotation\tforward primer\tTm forward\treverse primer\tTm reverse\n")
if mode == "lim":
#try:
consensus = fastaq_to_dic(args.fq_lim)
consensus_3 = consensus[0]
consensus_5 = consensus[1]
#except:
# print "Fq file missing"
# exit()
result.write("@type\tcontig\tposition\tref_base\talt_base\thit\tmrna_start\tmrna_end\tstrand\tgene_model\tgene_element\taa_pos\taa_ref\taa_alt\tgene_funct_annotation\tforward primer\tTm forward\tinsertion primer 5'\tTm insertion primer 5'\tinsertion primer 3'\tTm insertion primer 3'\treverse primer\tTm reverse\n")
for line in first_list:
if mode == "snp": v = line[0]+"\t"+line[1]+"\t"+line[2]+"\t"+line[3]+"\t"+line[4]+"\t"+line[5]+"\t"+line[6]+"\t"+line[7]+"\t"+line[8]+"\t"+line[9]+"\t"+line[10]+"\t"+line[11]+"\t"+line[12]+"\t"+line[13]+"\t"+line[14].rstrip()
else: v = line[0]+"\t"+line[1]+"\t"+line[2]+"\t"+line[3]+"\t"+line[4]+"\t"+line[5]+"\t"+line[6]+"\t"+line[7]+"\t"+line[8]+"\t"+line[9]+"\t"+line[10]+"\t"+line[11]+"\t"+line[12]+"\t"+line[13]+"\t"+line[14].rstrip()
#if line[5] == "nh":
# if mode == "snp": list2.append(v+"\t-\t-\t-\t-\n")
# else: list2.append(v+"\t-\t-\t-\t-\t-\t-\n")
#else:
a = line[1]+"-"+line[2]
if a == former:
if mode == "snp": list2.append(v+"\t-\t-\t-\t-\n")
else: list2.append(v+"\t-\t-\t-\t-\t-\t-\n")
elif a != former:
if mode == "snp":
if line[1] != contig_used:
genom = genome_selection(line[1],genome)
contig_used = line[1]
r = snp_calculation(line[2],genom)
list2.append(v+"\t"+r[0][0]+"\t"+r[1][0]+"\t"+r[0][1]+"\t"+r[1][1]+"\n")
if mode == "lim":
if line[1] != contig_used:
genom = genome_selection(line[1],genome)
contig_used = line[1]
r = insertion_calculation(line[2],genom,contig_used)
list2.append(v+"\t"+r[0][2]+"\t"+r[1][2]+"\t"+r[0][0]+"\t"+r[1][0]+"\t"+r[0][1]+"\t"+r[1][1]+"\t"+r[0][3]+"\t"+r[1][3]+"\n")
former = a
for items in list2:
result.write(items)
|
davidwilson-85/easymap
|
primers/primer-generation-old.py
|
Python
|
gpl-3.0
| 10,749
|
[
"Biopython"
] |
47d5fbf56395905bbddbcbf2345fefee24c9f4c5ad80ee53d75680dc4f78bcd9
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, print_function, unicode_literals, division)
class BaseNode:
def accept(self, visitor):
visitor.visit(self)
class TopLevelQuery(BaseNode):
def __init__(self, query, q_size=None, q_from=None, aggs=None, sort=None):
self.query = query
self.q_size = q_size
self.q_from = q_from
self.aggs = aggs
self.sort = sort
class FilteredQuery(BaseNode):
def __init__(self, query, filter):
self.query = query
self.filter = filter
class MatchQuery(BaseNode):
def __init__(self, field_name, text_query, query_type, analyzer):
self.field_name = field_name
self.text_query = text_query
self.query_type = query_type
self.analyzer = analyzer
class MatchAllQuery(BaseNode):
def __init__(self):
pass
class TermQuery(BaseNode):
def __init__(self, field_name, value, boost=1.0):
self.field_name = field_name
self.value = value
self.boost = boost
class TermsQuery(BaseNode):
def __init__(self, field_name, values):
self.field_name = field_name
self.values = values
class NestedQuery(BaseNode):
def __init__(self, path, queries):
self.path = path
self.queries = queries
class BoolQuery(BaseNode):
def __init__(self, must=None, must_not=None, should=None):
self.must = must
self.must_not = must_not
self.should = should
class Range(BaseNode):
#FIXME: hard-code to half-close range at this moment
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
class HasChildFilter(BaseNode):
def __init__(self, clause, doc_type):
self.clause = clause
self.doc_type = doc_type
class MatchAllFilter(BaseNode):
def __init__(self):
pass
class RangeFilter(BaseNode):
def __init__(self, field_name, range):
self.field_name = field_name
self.range = range
class BoolFilter(BaseNode):
def __init__(self, must=None, must_not=None, should=None):
self.must = must
self.must_not = must_not
self.should = should
class AndFilter(BaseNode):
def __init__(self, clauses):
self.clauses = clauses
class OrFilter(BaseNode):
def __init__(self, clauses):
self.clauses = clauses
class NotFilter(BaseNode):
def __init__(self, clause):
self.clause = clause
class TermFilter(BaseNode):
def __init__(self, field_name, value):
self.field_name = field_name
self.value = value
class TermsFilter(BaseNode):
def __init__(self, field_name, values, execution):
self.field_name = field_name
self.values = values
self.execution = execution
class NestedFilter(BaseNode):
def __init__(self, path, filters):
self.path = path
self.filters = filters
class MissingFilter(BaseNode):
def __init__(self, field_name, existence=True, null_value=True):
self.field_name = field_name
self.existence = existence
self.null_value = null_value
class GeoDistanceFilter(BaseNode):
def __init__(self, field_name, center_lat, center_lng, distance_in_km=1):
self.field_name = field_name
self.center_lat = center_lat
self.center_lng = center_lng
self.distance_in_km = distance_in_km
class TopLevelAggregation(BaseNode):
def __init__(self, field_name, agg):
self.field_name = field_name
self.agg = agg
class TermsAggregation(BaseNode):
def __init__(self, field_name, size=0, order_type="_count", order="desc", min_doc_count=None):
self.field_name = field_name
self.size = size
self.order_type = order_type
self.order = order
self.min_doc_count = min_doc_count
class SumAggregation(BaseNode):
def __init__(self, field_name):
self.field_name = field_name
class AvgAggregation(BaseNode):
def __init__(self, field_name):
self.field_name = field_name
class StatsAggregation(BaseNode):
def __init__(self, field_name):
self.field_name = field_name
class MinAggregation(BaseNode):
def __init__(self, field_name):
self.field_name = field_name
class MaxAggregation(BaseNode):
def __init__(self, field_name):
self.field_name = field_name
class ValueAccountAggregation(BaseNode):
def __init__(self, field_name):
self.field_name = field_name
class NestedAggregation(BaseNode):
def __init__(self, field_name, path, sub_agg):
self.field_name = field_name
self.path = path
self.sub_agg = sub_agg
|
BrandKarma/elasticsearch_dsl
|
elasticsearch_dsl/ast.py
|
Python
|
bsd-3-clause
| 4,718
|
[
"VisIt"
] |
3d2f104498d8de867bb3114793e208c71cac48e0bb60e1eea27926a518cba76f
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.config.committers import Account, CommitterList, Contributor, Committer, Reviewer
class CommittersTest(unittest.TestCase):
def test_committer_lookup(self):
account = Account('Test Zero', ['zero@test.com', 'zero@gmail.com'], 'zero')
committer = Committer('Test One', 'one@test.com', 'one')
reviewer = Reviewer('Test Two', ['two@test.com', 'Two@rad.com', 'so_two@gmail.com'])
contributor = Contributor('Test Three', ['Three@test.com'], 'three')
contributor_with_two_nicknames = Contributor('Other Four', ['otherfour@webkit.org', 'otherfour@webkit2.org'], ['four', 'otherfour'])
contributor_with_same_email_username = Contributor('Yet Another Four', ['otherfour@webkit.com'], ['yetanotherfour'])
committer_list = CommitterList(watchers=[account], committers=[committer], reviewers=[reviewer],
contributors=[contributor, contributor_with_two_nicknames, contributor_with_same_email_username])
# Test valid committer, reviewer and contributor lookup
self.assertEqual(committer_list.account_by_email('zero@test.com'), account)
self.assertEqual(committer_list.committer_by_email('one@test.com'), committer)
self.assertEqual(committer_list.reviewer_by_email('two@test.com'), reviewer)
self.assertEqual(committer_list.committer_by_email('two@test.com'), reviewer)
self.assertEqual(committer_list.committer_by_email('two@rad.com'), reviewer)
self.assertEqual(committer_list.reviewer_by_email('so_two@gmail.com'), reviewer)
self.assertEqual(committer_list.contributor_by_email('three@test.com'), contributor)
# Test valid committer, reviewer and contributor lookup
self.assertEqual(committer_list.committer_by_name("Test One"), committer)
self.assertEqual(committer_list.committer_by_name("Test Two"), reviewer)
self.assertIsNone(committer_list.committer_by_name("Test Three"))
self.assertEqual(committer_list.contributor_by_name("Test Three"), contributor)
self.assertEqual(committer_list.contributor_by_name("test one"), committer)
self.assertEqual(committer_list.contributor_by_name("test two"), reviewer)
self.assertEqual(committer_list.contributor_by_name("test three"), contributor)
# Test that the first email is assumed to be the Bugzilla email address (for now)
self.assertEqual(committer_list.committer_by_email('two@rad.com').bugzilla_email(), 'two@test.com')
# Test lookup by login email address
self.assertEqual(committer_list.account_by_login('zero@test.com'), account)
self.assertIsNone(committer_list.account_by_login('zero@gmail.com'))
self.assertEqual(committer_list.account_by_login('one@test.com'), committer)
self.assertEqual(committer_list.account_by_login('two@test.com'), reviewer)
self.assertIsNone(committer_list.account_by_login('Two@rad.com'))
self.assertIsNone(committer_list.account_by_login('so_two@gmail.com'))
# Test that a known committer is not returned during reviewer lookup
self.assertIsNone(committer_list.reviewer_by_email('one@test.com'))
self.assertIsNone(committer_list.reviewer_by_email('three@test.com'))
# and likewise that a known contributor is not returned for committer lookup.
self.assertIsNone(committer_list.committer_by_email('three@test.com'))
# Test that unknown email address fail both committer and reviewer lookup
self.assertIsNone(committer_list.committer_by_email('bar@bar.com'))
self.assertIsNone(committer_list.reviewer_by_email('bar@bar.com'))
# Test that emails returns a list.
self.assertEqual(committer.emails, ['one@test.com'])
self.assertEqual(committer.irc_nicknames, ['one'])
self.assertEqual(committer_list.contributor_by_irc_nickname('one'), committer)
self.assertEqual(committer_list.contributor_by_irc_nickname('three'), contributor)
self.assertEqual(committer_list.contributor_by_irc_nickname('four'), contributor_with_two_nicknames)
self.assertEqual(committer_list.contributor_by_irc_nickname('otherfour'), contributor_with_two_nicknames)
# Test that the lists returned are are we expect them.
self.assertEqual(committer_list.contributors(), [contributor, contributor_with_two_nicknames, contributor_with_same_email_username, committer, reviewer])
self.assertEqual(committer_list.committers(), [committer, reviewer])
self.assertEqual(committer_list.reviewers(), [reviewer])
self.assertEqual(committer_list.contributors_by_search_string('test'), [contributor, committer, reviewer])
self.assertEqual(committer_list.contributors_by_search_string('rad'), [reviewer])
self.assertEqual(committer_list.contributors_by_search_string('Two'), [reviewer])
self.assertEqual(committer_list.contributors_by_search_string('otherfour'), [contributor_with_two_nicknames])
self.assertEqual(committer_list.contributors_by_search_string('*otherfour*'), [contributor_with_two_nicknames, contributor_with_same_email_username])
self.assertEqual(committer_list.contributors_by_email_username("one"), [committer])
self.assertEqual(committer_list.contributors_by_email_username("four"), [])
self.assertEqual(committer_list.contributors_by_email_username("otherfour"), [contributor_with_two_nicknames, contributor_with_same_email_username])
def _assert_fuzz_match(self, text, name_of_expected_contributor, expected_distance):
committers = CommitterList()
contributors, distance = committers.contributors_by_fuzzy_match(text)
if type(name_of_expected_contributor) is list:
expected_names = name_of_expected_contributor
else:
expected_names = [name_of_expected_contributor] if name_of_expected_contributor else []
self.assertEqual(([contributor.full_name for contributor in contributors], distance), (expected_names, expected_distance))
# Basic testing of the edit distance matching ...
def test_contributors_by_fuzzy_match(self):
self._assert_fuzz_match('Geoff Garen', 'Geoffrey Garen', 3)
self._assert_fuzz_match('Kenneth Christiansen', 'Kenneth Rohde Christiansen', 6)
self._assert_fuzz_match('Sam', 'Sam Weinig', 0)
self._assert_fuzz_match('me', None, 2)
# The remaining tests test that certain names are resolved in a specific way.
# We break this up into multiple tests so that each is faster and they can
# be run in parallel. Unfortunately each test scans the entire committers list,
# so these are inherently slow (see https://bugs.webkit.org/show_bug.cgi?id=79179).
#
# Commented out lines are test cases imported from the bug 26533 yet to pass.
def integration_test_contributors__none(self):
self._assert_fuzz_match('myself', None, 6)
self._assert_fuzz_match('others', None, 6)
self._assert_fuzz_match('BUILD FIX', None, 9)
def integration_test_contributors__none_2(self):
self._assert_fuzz_match('but Dan Bernstein also reviewed', None, 31)
self._assert_fuzz_match('asked thoughtful questions', None, 26)
self._assert_fuzz_match('build fix of mac', None, 16)
def integration_test_contributors__none_3(self):
self._assert_fuzz_match('a spell checker', None, 15)
self._assert_fuzz_match('nobody, build fix', None, 17)
self._assert_fuzz_match('NOBODY (chromium build fix)', None, 27)
def integration_test_contributors_ada_chan(self):
self._assert_fuzz_match('Ada', 'Ada Chan', 0)
def integration_test_contributors_adele_peterson(self):
self._assert_fuzz_match('adele', 'Adele Peterson', 0)
def integration_test_contributors_adele_peterson(self):
# self._assert_fuzz_match('Adam', 'Adam Roben', 0)
self._assert_fuzz_match('aroben', 'Adam Roben', 0)
def integration_test_contributors_alexey_proskuryakov(self):
# self._assert_fuzz_match('Alexey', 'Alexey Proskuryakov', 0)
self._assert_fuzz_match('ap', 'Alexey Proskuryakov', 0)
self._assert_fuzz_match('Alexey P', 'Alexey Proskuryakov', 0)
def integration_test_contributors_alice_liu(self):
# self._assert_fuzz_match('Alice', 'Alice Liu', 0)
self._assert_fuzz_match('aliu', 'Alice Liu', 0)
self._assert_fuzz_match('Liu', 'Alice Liu', 0)
def integration_test_contributors_alp_toker(self):
self._assert_fuzz_match('Alp', 'Alp Toker', 0)
def integration_test_contributors_anders_carlsson(self):
self._assert_fuzz_match('Anders', 'Anders Carlsson', 0)
self._assert_fuzz_match('andersca', 'Anders Carlsson', 0)
self._assert_fuzz_match('anders', 'Anders Carlsson', 0)
self._assert_fuzz_match('Andersca', 'Anders Carlsson', 0)
def integration_test_contributors_antti_koivisto(self):
self._assert_fuzz_match('Antti "printf" Koivisto', 'Antti Koivisto', 9)
self._assert_fuzz_match('Antti', 'Antti Koivisto', 0)
def integration_test_contributors_beth_dakin(self):
self._assert_fuzz_match('Beth', 'Beth Dakin', 0)
self._assert_fuzz_match('beth', 'Beth Dakin', 0)
self._assert_fuzz_match('bdakin', 'Beth Dakin', 0)
def integration_test_contributors_brady_eidson(self):
self._assert_fuzz_match('Brady', 'Brady Eidson', 0)
self._assert_fuzz_match('bradee-oh', 'Brady Eidson', 0)
self._assert_fuzz_match('Brady', 'Brady Eidson', 0)
def integration_test_contributors_cameron_zwarich(self):
pass # self._assert_fuzz_match('Cameron', 'Cameron Zwarich', 0)
# self._assert_fuzz_match('cpst', 'Cameron Zwarich', 1)
def integration_test_contributors_chris_blumenberg(self):
# self._assert_fuzz_match('Chris', 'Chris Blumenberg', 0)
self._assert_fuzz_match('cblu', 'Chris Blumenberg', 0)
def integration_test_contributors_dan_bernstein(self):
self._assert_fuzz_match('Dan', ['Dan Winship', 'Dan Bernstein'], 0)
self._assert_fuzz_match('Dan B', 'Dan Bernstein', 0)
# self._assert_fuzz_match('mitz', 'Dan Bernstein', 0)
self._assert_fuzz_match('Mitz Pettel', 'Dan Bernstein', 1)
self._assert_fuzz_match('Mitzpettel', 'Dan Bernstein', 0)
self._assert_fuzz_match('Mitz Pettel RTL', 'Dan Bernstein', 5)
def integration_test_contributors_dan_bernstein_2(self):
self._assert_fuzz_match('Teh Mitzpettel', 'Dan Bernstein', 4)
# self._assert_fuzz_match('The Mitz', 'Dan Bernstein', 0)
self._assert_fuzz_match('Dr Dan Bernstein', 'Dan Bernstein', 3)
def integration_test_contributors_darin_adler(self):
self._assert_fuzz_match('Darin Adler\'', 'Darin Adler', 1)
self._assert_fuzz_match('Darin', 'Darin Adler', 0) # Thankfully "Fisher" is longer than "Adler"
self._assert_fuzz_match('darin', 'Darin Adler', 0)
def integration_test_contributors_david_harrison(self):
self._assert_fuzz_match('Dave Harrison', 'David Harrison', 2)
self._assert_fuzz_match('harrison', 'David Harrison', 0)
self._assert_fuzz_match('Dr. Harrison', 'David Harrison', 4)
def integration_test_contributors_david_harrison_2(self):
self._assert_fuzz_match('Dave Harrson', 'David Harrison', 3)
self._assert_fuzz_match('Dave Harrsion', 'David Harrison', 4) # Damerau-Levenshtein distance is 3
def integration_test_contributors_david_hyatt(self):
self._assert_fuzz_match('Dave Hyatt', 'David Hyatt', 2)
self._assert_fuzz_match('Daddy Hyatt', 'David Hyatt', 3)
# self._assert_fuzz_match('Dave', 'David Hyatt', 0) # 'Dave' could mean harrison.
self._assert_fuzz_match('hyatt', 'David Hyatt', 0)
# self._assert_fuzz_match('Haytt', 'David Hyatt', 0) # Works if we had implemented Damerau-Levenshtein distance!
def integration_test_contributors_david_kilzer(self):
self._assert_fuzz_match('Dave Kilzer', 'David Kilzer', 2)
self._assert_fuzz_match('David D. Kilzer', 'David Kilzer', 3)
self._assert_fuzz_match('ddkilzer', 'David Kilzer', 0)
def integration_test_contributors_don_melton(self):
self._assert_fuzz_match('Don', 'Don Melton', 0)
self._assert_fuzz_match('Gramps', 'Don Melton', 0)
def integration_test_contributors_eric_seidel(self):
# self._assert_fuzz_match('eric', 'Eric Seidel', 0)
self._assert_fuzz_match('Eric S', 'Eric Seidel', 0)
# self._assert_fuzz_match('MacDome', 'Eric Seidel', 0)
self._assert_fuzz_match('eseidel', 'Eric Seidel', 0)
def integration_test_contributors_geoffrey_garen(self):
# self._assert_fuzz_match('Geof', 'Geoffrey Garen', 4)
# self._assert_fuzz_match('Geoff', 'Geoffrey Garen', 3)
self._assert_fuzz_match('Geoff Garen', 'Geoffrey Garen', 3)
self._assert_fuzz_match('ggaren', 'Geoffrey Garen', 0)
# self._assert_fuzz_match('geoff', 'Geoffrey Garen', 0)
self._assert_fuzz_match('Geoffrey', 'Geoffrey Garen', 0)
self._assert_fuzz_match('GGaren', 'Geoffrey Garen', 0)
def integration_test_contributors_greg_bolsinga(self):
pass # self._assert_fuzz_match('Greg', 'Greg Bolsinga', 0)
def integration_test_contributors_holger_freyther(self):
self._assert_fuzz_match('Holger', 'Holger Freyther', 0)
self._assert_fuzz_match('Holger Hans Peter Freyther', 'Holger Freyther', 11)
def integration_test_contributors_jon_sullivan(self):
# self._assert_fuzz_match('john', 'John Sullivan', 0)
self._assert_fuzz_match('sullivan', 'John Sullivan', 0)
def integration_test_contributors_jon_honeycutt(self):
self._assert_fuzz_match('John Honeycutt', 'Jon Honeycutt', 1)
# self._assert_fuzz_match('Jon', 'Jon Honeycutt', 0)
def integration_test_contributors_jon_honeycutt(self):
# self._assert_fuzz_match('justin', 'Justin Garcia', 0)
self._assert_fuzz_match('justing', 'Justin Garcia', 0)
def integration_test_contributors_joseph_pecoraro(self):
self._assert_fuzz_match('Joe Pecoraro', 'Joseph Pecoraro', 3)
def integration_test_contributors_ken_kocienda(self):
self._assert_fuzz_match('ken', 'Ken Kocienda', 0)
self._assert_fuzz_match('kocienda', 'Ken Kocienda', 0)
def integration_test_contributors_kenneth_russell(self):
self._assert_fuzz_match('Ken Russell', 'Kenneth Russell', 4)
def integration_test_contributors_kevin_decker(self):
self._assert_fuzz_match('kdecker', 'Kevin Decker', 0)
def integration_test_contributors_kevin_mccullough(self):
self._assert_fuzz_match('Kevin M', 'Kevin McCullough', 0)
self._assert_fuzz_match('Kevin McCulough', 'Kevin McCullough', 1)
self._assert_fuzz_match('mccullough', 'Kevin McCullough', 0)
def integration_test_contributors_lars_knoll(self):
self._assert_fuzz_match('lars', 'Lars Knoll', 0)
def integration_test_contributors_lars_weintraub(self):
self._assert_fuzz_match('levi', 'Levi Weintraub', 0)
def integration_test_contributors_maciej_stachowiak(self):
self._assert_fuzz_match('Maciej', 'Maciej Stachowiak', 0)
# self._assert_fuzz_match('mjs', 'Maciej Stachowiak', 0)
self._assert_fuzz_match('Maciej S', 'Maciej Stachowiak', 0)
def integration_test_contributors_mark_rowe(self):
# self._assert_fuzz_match('Mark', 'Mark Rowe', 0)
self._assert_fuzz_match('bdash', 'Mark Rowe', 0)
self._assert_fuzz_match('mrowe', 'Mark Rowe', 0)
# self._assert_fuzz_match('Brian Dash', 'Mark Rowe', 0)
def integration_test_contributors_nikolas_zimmermann(self):
# self._assert_fuzz_match('Niko', 'Nikolas Zimmermann', 1)
self._assert_fuzz_match('Niko Zimmermann', 'Nikolas Zimmermann', 3)
self._assert_fuzz_match('Nikolas', 'Nikolas Zimmermann', 0)
def integration_test_contributors_oliver_hunt(self):
# self._assert_fuzz_match('Oliver', 'Oliver Hunt', 0)
self._assert_fuzz_match('Ollie', 'Oliver Hunt', 1)
self._assert_fuzz_match('Olliej', 'Oliver Hunt', 0)
self._assert_fuzz_match('Olliej Hunt', 'Oliver Hunt', 3)
self._assert_fuzz_match('olliej', 'Oliver Hunt', 0)
self._assert_fuzz_match('ollie', 'Oliver Hunt', 1)
self._assert_fuzz_match('ollliej', 'Oliver Hunt', 1)
def integration_test_contributors_oliver_hunt(self):
self._assert_fuzz_match('Richard', 'Richard Williamson', 0)
self._assert_fuzz_match('rjw', 'Richard Williamson', 0)
def integration_test_contributors_oliver_hunt(self):
self._assert_fuzz_match('Rob', 'Rob Buis', 0)
self._assert_fuzz_match('rwlbuis', 'Rob Buis', 0)
def integration_test_contributors_rniwa(self):
self._assert_fuzz_match('rniwa@webkit.org', 'Ryosuke Niwa', 0)
def disabled_integration_test_contributors_simon_fraser(self):
pass # self._assert_fuzz_match('Simon', 'Simon Fraser', 0)
def integration_test_contributors_steve_falkenburg(self):
self._assert_fuzz_match('Sfalken', 'Steve Falkenburg', 0)
# self._assert_fuzz_match('Steve', 'Steve Falkenburg', 0)
def integration_test_contributors_sam_weinig(self):
self._assert_fuzz_match('Sam', 'Sam Weinig', 0)
# self._assert_fuzz_match('Weinig Sam', 'weinig', 0)
self._assert_fuzz_match('Weinig', 'Sam Weinig', 0)
self._assert_fuzz_match('Sam W', 'Sam Weinig', 0)
self._assert_fuzz_match('Sammy Weinig', 'Sam Weinig', 2)
def integration_test_contributors_tim_omernick(self):
# self._assert_fuzz_match('timo', 'Tim Omernick', 0)
self._assert_fuzz_match('TimO', 'Tim Omernick', 0)
# self._assert_fuzz_match('Timo O', 'Tim Omernick', 0)
# self._assert_fuzz_match('Tim O.', 'Tim Omernick', 0)
self._assert_fuzz_match('Tim O', 'Tim Omernick', 0)
def integration_test_contributors_timothy_hatcher(self):
# self._assert_fuzz_match('Tim', 'Timothy Hatcher', 0)
# self._assert_fuzz_match('Tim H', 'Timothy Hatcher', 0)
self._assert_fuzz_match('Tim Hatcher', 'Timothy Hatcher', 4)
self._assert_fuzz_match('Tim Hatcheri', 'Timothy Hatcher', 5)
self._assert_fuzz_match('timothy', 'Timothy Hatcher', 0)
self._assert_fuzz_match('thatcher', 'Timothy Hatcher', 1)
self._assert_fuzz_match('xenon', 'Timothy Hatcher', 0)
self._assert_fuzz_match('Hatcher', 'Timothy Hatcher', 0)
# self._assert_fuzz_match('TimH', 'Timothy Hatcher', 0)
def integration_test_contributors_tor_arne_vestbo(self):
self._assert_fuzz_match('Tor Arne', u"Tor Arne Vestb\u00f8", 1) # Matches IRC nickname
def integration_test_contributors_vicki_murley(self):
self._assert_fuzz_match('Vicki', u"Vicki Murley", 0)
def integration_test_contributors_zack_rusin(self):
self._assert_fuzz_match('Zack', 'Zack Rusin', 0)
|
windyuuy/opera
|
chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/common/config/committers_unittest.py
|
Python
|
bsd-3-clause
| 20,632
|
[
"Brian"
] |
2687431fc9e226bfdf4ecfcc5489d86c25966ea1a52b1ebc9a98cf9c6aa436fc
|
import cPickle as pickle
import copy
import os
import menpo.io as mio
import numpy as np
from menpo.feature import no_op
from menpo.landmark import labeller, face_ibug_68_to_face_ibug_66_trimesh
from menpo.math import as_matrix
from menpo.model import PCAModel
from menpo.transform import PiecewiseAffine
from menpofit.aam import HolisticAAM, LucasKanadeAAMFitter
from menpofit.builder import (build_reference_frame, warp_images, align_shapes, rescale_images_to_reference_shape)
from menpofit.transform import DifferentiableAlignmentSimilarity
from pathlib import Path
from scipy.stats import multivariate_normal
#=======================================================================================================================
### Function for Reading {LFPW-AFW-Helen-Ibug} Images ###
#=======================================================================================================================
def load_image(i):
i = i.crop_to_landmarks_proportion(0.5)
if i.n_channels == 3:
i = i.as_greyscale()
labeller(i, 'PTS', face_ibug_68_to_face_ibug_66_trimesh)
del i.landmarks['PTS']
return i
#=======================================================================================================================
### Function for Reading Extended-Cohn-Kanade Images ###
#=======================================================================================================================
def Read_CK(land_tmp,image_path):
img = mio.import_image(image_path)
land_path = image_path[:-4] + "_landmarks.txt"
with open(land_path) as file:
tmp = np.array([[float(x) for x in line.split()] for line in file])
# Removing Extra Points - Lips inner corner
tmp = np.delete(tmp, (60), axis=0)
tmp = np.delete(tmp, (63), axis=0)
# Swapping Columns (Y,X) -> (X,Y)
result = np.zeros((66,2))
result [:,0] = tmp[:,1]
result [:,1] = tmp[:,0]
# Adding Landmarks
land_tmp.lms.points= result
img.landmarks['face_ibug_66_trimesh'] = land_tmp
# Gray_Scaling
img = img.crop_to_landmarks_proportion(0.5)
if img.n_channels == 3:
img = img.as_greyscale()
return img
#=======================================================================================================================
### Function for Reading UNBC-McMaster Images ###
#=======================================================================================================================
def Read_UNBC(land_tmp,image_path):
img = mio.import_image(image_path)
land_path = image_path[:-4] + "_landmarks.txt"
with open(land_path) as file:
tmp = np.array([[float(x) for x in line.split()] for line in file])
# Swapping Columns (Y,X) -> (X,Y)
result = np.zeros((66,2))
result [:,0] = tmp[:,1]
result [:,1] = tmp[:,0]
# Adding Landmarks
land_tmp.lms.points= result
img.landmarks['face_ibug_66_trimesh'] = land_tmp
# Gray_Scaling
img = img.crop_to_landmarks_proportion(0.5)
if img.n_channels == 3:
img = img.as_greyscale()
return img
#=======================================================================================================================
### Function for Computing Shape Weights ###
#=======================================================================================================================
def Compute_Shape_Gauss_Weight(S_Data,S_mean,T_mean,S_Comp,T_Comp,S_landas,T_landas):
# Computing Number of Target eigen vectors with captured variance more than 95%
T_var = np.cumsum(T_landas)
T_var_ratio = T_var/np.sum(T_landas)
T_indices = np.argwhere((T_var_ratio-0.95)>0)
T_index = T_indices[0][0]
# Computing Number of Source eigen vectors with captured variance more than 95%
S_var = np.cumsum(S_landas)
S_var_ratio = S_var/np.sum(S_landas)
S_indices = np.argwhere((S_var_ratio-0.95)>0)
S_index = S_indices[0][0]
# Computing data args for gaussian distributions
Nom = np.dot((S_Data-T_mean),T_Comp.T)
Nom = Nom[:,:T_index]
Denom = np.dot((S_Data-S_mean),S_Comp.T)
Denom = Denom[:,:S_index]
# Defining Multivariate Gaussian Dists
MG_t = multivariate_normal(cov=np.diag(T_landas[:T_index]))
MG_s = multivariate_normal(cov=np.diag(S_landas[:S_index]))
# Computing Weights
res = MG_t.logpdf(Nom) - MG_s.logpdf(Denom)
res = np.exp(res)
# Normalizing Weights
f_res = (res - np.min(res)) / (np.max(res) - np.min(res))
return np.array(f_res)
#=======================================================================================================================
### Function for Computing Appearance Weights ###
#=======================================================================================================================
def Compute_App_Gauss_Weight(S_Data,S_mean,T_mean,S_Comp,T_Comp,S_landas,T_landas,pwa_sut_t,pwa_sut_s,SUT_tmp,T_ref,S_ref):
# Computing Number of Target eigen vectors with captured variance more than 95%
T_var = np.cumsum(T_landas)
T_var_ratio = T_var/np.sum(T_landas)
T_indices = np.argwhere((T_var_ratio-0.95)>0)
T_index = T_indices[0][0]
# Computing Number of Source eigen vectors with captured variance more than 95%
S_var = np.cumsum(S_landas)
S_var_ratio = S_var/np.sum(S_landas)
S_indices = np.argwhere((S_var_ratio-0.95)>0)
S_index = S_indices[0][0]
Warped_S_Data = []
for i in range(0,S_Data.shape[0]):
img = SUT_tmp.from_vector(S_Data[i])
warped = img.as_unmasked(copy=False).warp_to_mask(T_ref.mask, pwa_sut_t)
Warped_S_Data.append(warped)
Warped_SUT_S = []
for i in range(0,S_Data.shape[0]):
img = SUT_tmp.from_vector(S_Data[i])
warped = img.as_unmasked(copy=False).warp_to_mask(S_ref.mask, pwa_sut_s)
Warped_SUT_S.append(warped)
# Computing data args for gaussian distributions
Warped_S_Data_new = as_matrix(Warped_S_Data, return_template=False, verbose=True)
S_Data = as_matrix(Warped_SUT_S, return_template=False, verbose=True)
Nom = np.dot((Warped_S_Data_new - T_mean),T_Comp.T)
Nom = Nom[:,:T_index]
Denom = np.dot((S_Data - S_mean),S_Comp.T)
Denom = Denom [:,:S_index]
# Defining Multivariate Gaussian Dists
MG_t = multivariate_normal(cov=np.diag(T_landas[:T_index]))
MG_s = multivariate_normal(cov=np.diag(S_landas[:S_index]))
# Computing Weights
res = MG_t.logpdf(Nom) - MG_s.logpdf(Denom)
res = np.exp(res)
# Normalizing Weights
f_res = (res - np.min(res)) / (np.max(res) - np.min(res))
return np.array(f_res)
#=======================================================================================================================
### Loading Data ###
#=======================================================================================================================
# Loading the face_ibug_66_trimesh template
with open('/u/azinasg/Code/face_ibug_66_trimesh_temp.pkl', 'rb') as input:
land_tmp = pickle.load(input)
# Loading (LFPW-AFW-Helen-Ibug) Images (Source Images)
source_path = Path('/u/azinasg/Research/Source_Small')
source_images = [load_image(i) for i in mio.import_images(source_path, verbose=True)]
# Loading CK Images (Source Images)
CK_root = "/u/azinasg/Research/Sample_CK+_Small"
for root, dirs, filenames in os.walk(CK_root):
for filename in filenames:
if (".png" in filename) and (".DS_Store" not in filename) :
tmp_image = Read_CK(land_tmp,root+"/"+filename)
source_images.append(tmp_image)
# Loading UNBC Images - Target
UNBC_root = "/u/azinasg/Research/Sample_UNBC_Small_Target"
target_images = []
for root, dirs, filenames in os.walk(UNBC_root):
for filename in filenames:
if (".png" in filename) and (".DS_Store" not in filename) :
target_images.append(Read_UNBC(land_tmp,root+"/"+filename))
# Loading UNBC Images - Test
UNBC_root = "/u/azinasg/Research/Sample_UNBC_Small_Test_2"
test_images = []
for root, dirs, filenames in os.walk(UNBC_root):
for filename in filenames:
if (".png" in filename) and (".DS_Store" not in filename):
test_images.append(Read_UNBC(land_tmp, root + "/" + filename))
#=======================================================================================================================
### Data Split Into Test, Target ###
#=======================================================================================================================
all_images = copy.deepcopy(target_images+source_images)
#=======================================================================================================================
### Pre-Computation ###
#=======================================================================================================================
# Building Source_AAM
source_aam = HolisticAAM(
source_images,
group='face_ibug_66_trimesh',
holistic_features=no_op,
scales=1,
diagonal=150,
max_appearance_components=200,
max_shape_components=100,
verbose=True
)
# Building Target AAM
target_aam = HolisticAAM(
target_images,
group='face_ibug_66_trimesh',
holistic_features=no_op,
scales=1,
diagonal=150,
max_appearance_components=200,
max_shape_components=100,
verbose=True
)
# Building SUT AAM
SUT_aam = HolisticAAM(
all_images,
group='face_ibug_66_trimesh',
holistic_features=no_op,
scales=1,
diagonal=150,
max_appearance_components=200,
max_shape_components=100,
verbose=True
)
#=======================================================================================================================
### Main Body ###
#=======================================================================================================================
for x in range(0,11,1):
#===================================================================================================================
### Setting Hyper-Parameters ###
#===================================================================================================================
alpha = x/10.0
beta = 0.5
#===================================================================================================================
### Rescaling Images to the reference shape of SUT Model (Mean Shape of Model with Diagonal = 150) ###
#===================================================================================================================
ST = rescale_images_to_reference_shape(target_images, 'face_ibug_66_trimesh' ,
SUT_aam.reference_shape, verbose=True)
SS = rescale_images_to_reference_shape(source_images, 'face_ibug_66_trimesh' ,
SUT_aam.reference_shape, verbose=True)
#===================================================================================================================
### Building Shape Model ###
#===================================================================================================================
# Mean-Centering Target Sample Shapes
ST_scaled_shapes = [i.landmarks['face_ibug_66_trimesh'].lms for i in ST]
ST_aligned_shapes = align_shapes(ST_scaled_shapes)
ST_data, ST_template = as_matrix(ST_aligned_shapes, return_template=True, verbose=True)
ST_N = ST_data.shape[0]
ST_mean = np.mean(ST_data, axis=0)
# Mean-Centering Source Sample Shapes
SS_scaled_shapes = [i.landmarks['face_ibug_66_trimesh'].lms for i in SS]
SS_aligned_shapes = align_shapes(SS_scaled_shapes)
SS_data, SS_template = as_matrix(SS_aligned_shapes, return_template=True, verbose=True)
SS_N = SS_data.shape[0]
SS_mean = np.mean(SS_data, axis=0)
# Defining Source Weight Vector
Source_weights = Compute_Shape_Gauss_Weight(SS_data,source_aam.shape_models[0].model._mean,
target_aam.shape_models[0].model._mean,
source_aam.shape_models[0].model._components,
target_aam.shape_models[0].model._components,
source_aam.shape_models[0].model._eigenvalues,
target_aam.shape_models[0].model._eigenvalues)
SS_weights = np.sqrt(((1 - alpha) / float(SS_N)) * Source_weights)
WSS = np.diag(SS_weights)
SS_data -= SS_mean
SS_data = WSS.dot(SS_data)
# Defining Target Weight Vector
ST_weights = np.sqrt((alpha / float(ST_N)) * np.ones(ST_N))
WST = np.diag(ST_weights)
ST_data -= ST_mean
ST_data = WST.dot(ST_data)
# Buidling Data for PCA
S_Star = np.vstack((ST_data,SS_data))
n_new_samples = ST_N + SS_N
Shape_e_mean = ( beta*ST_N*ST_mean + (1-beta)*SS_N*SS_mean ) / (beta*ST_N + (1-beta)*SS_N)
S_Star += Shape_e_mean
# Computing PCA Model
Shape_tmp = PCAModel(S_Star,centre=True,n_samples=n_new_samples,max_n_components=SUT_aam.max_shape_components[0]
,inplace=True,verbose=True,azin_run=False,azin_temp=ST_template)
# Setting Models info
SUT_aam.shape_models[0].model=Shape_tmp
SUT_aam.shape_models[0]._target = None
SUT_aam.shape_models[0]._weights = np.zeros(SUT_aam.shape_models[0].model.n_active_components)
SUT_aam.shape_models[0]._target = SUT_aam.shape_models[0].model.mean()
mean = SUT_aam.shape_models[0].model.mean()
SUT_aam.shape_models[0].global_transform = DifferentiableAlignmentSimilarity(mean, mean)
# Re-orthonormalize
SUT_aam.shape_models[0]._construct_similarity_model()
# Set the target to the new mean
SUT_aam.shape_models[0]._sync_target_from_state()
#===================================================================================================================
#### Shape Model Finished ###
#===================================================================================================================
### Building Appearance Model ###
#===================================================================================================================
# Building SUT reference frame
SUT_reference_frame = build_reference_frame(SUT_aam.reference_shape)
# Obtain warped target samples
ST_warped = warp_images(ST, ST_scaled_shapes, SUT_reference_frame, SUT_aam.transform,verbose=True)
# Obtain warped source samples
SS_warped = warp_images(SS, SS_scaled_shapes, SUT_reference_frame, SUT_aam.transform,verbose=True)
# Building Data Matrix
ST_App_data, SUT_App_template = as_matrix(ST_warped, return_template=True, verbose=True)
ST_App_N = ST_App_data.shape[0]
SS_App_data, SUT_App_template = as_matrix(SS_warped, return_template=True, verbose=True)
SS_App_N = SS_App_data.shape[0]
# Defining Appearance Target Weight Vector
ST_App_weights = np.sqrt((alpha / float(ST_App_N)) * np.ones(ST_App_N))
App_WST = np.diag(ST_App_weights)
# Defining the warping from Samples Images to Mean of the Target Images
T_reference_frame = build_reference_frame(target_aam.reference_shape)
S_reference_frame = build_reference_frame(source_aam.reference_shape)
pwa_sut_t = PiecewiseAffine(T_reference_frame.landmarks['source'].lms, SUT_reference_frame.landmarks['source'].lms)
pwa_sut_s = PiecewiseAffine(S_reference_frame.landmarks['source'].lms, SUT_reference_frame.landmarks['source'].lms)
# Defining Source Weight Vector
Source_App_weights = Compute_App_Gauss_Weight(SS_App_data,source_aam.appearance_models[0]._mean,
target_aam.appearance_models[0]._mean,
source_aam.appearance_models[0]._components,
target_aam.appearance_models[0]._components,
source_aam.appearance_models[0]._eigenvalues,
target_aam.appearance_models[0]._eigenvalues,
pwa_sut_t,pwa_sut_s,SUT_App_template, T_reference_frame,S_reference_frame)
SS_App_weights = np.sqrt(((1 - alpha) / float(SS_App_N)) * Source_App_weights)
App_WSS = np.diag(SS_App_weights)
# Mean Centering the data
SS_App_mean = np.mean(SS_App_data, axis=0)
ST_App_mean = np.mean(ST_App_data, axis=0)
SS_App_data -= SS_App_mean
ST_App_data -= ST_App_mean
# Building S* and Appling PCA on it
SS_App_data = App_WSS.dot(SS_App_data)
ST_App_data = App_WST.dot(ST_App_data)
S_Star_App = np.vstack((ST_App_data,SS_App_data))
App_n_new_samples = ST_App_N + SS_App_N
# Calculating the Mean Appearance
App_e_mean = ( beta*ST_App_N*ST_App_mean + (1-beta)*SS_App_N*SS_App_mean ) / ( beta*ST_App_N + (1-beta)*SS_App_N )
S_Star_App += App_e_mean
App_tmp = PCAModel(S_Star_App, centre=True, n_samples=n_new_samples,
max_n_components=SUT_aam.max_appearance_components[0],
inplace=True, verbose=True, azin_run=False, azin_temp=SUT_App_template)
del SUT_aam.appearance_models[0]
SUT_aam.appearance_models.append(App_tmp)
#===================================================================================================================
#### Appearance Model Finished ###
#===================================================================================================================
### Bulding the Fitter ###
#===================================================================================================================
# Building the Fitter
fitter = LucasKanadeAAMFitter(
SUT_aam,
n_shape=15,
n_appearance=100
)
#===================================================================================================================
errors = []
n_iters = []
final_errors = []
# fitting
for k,i in enumerate(test_images):
gt_s = i.landmarks['face_ibug_66_trimesh'].lms
# Loading the perturbations
with open('/u/azinasg/Research/Sample_UNBC_Small_Init_2/' + i.path.name[:-4] + '.pkl',
'rb') as input:
perturbations = pickle.load(input)
for j in range(0, 10):
initial_s = perturbations[j]
# fit image
fr = fitter.fit_from_shape(i, initial_s, gt_shape=gt_s, max_iters=300)
errors.append(fr.errors())
n_iters.append(fr.n_iters)
final_errors.append(fr.final_error())
print "Ins_Gauss : alpha=" + str(alpha) + " beta=" + str(beta)+" k=" + str(k) + " j=" + str(j) + \
" initial err: " + str(fr.initial_error()) + " final err: " + str(fr.final_error())
with open(r'/u/azinasg/res16/Instance_Gauss_a='+str(alpha)+'.pkl', 'wb') as f:
pickle.dump(errors, f)
pickle.dump(n_iters, f)
pickle.dump(final_errors, f)
|
azinasg/AAM_TL
|
Instance_Gaussian_WithoutLog.py
|
Python
|
bsd-3-clause
| 19,070
|
[
"Gaussian"
] |
61eadc95e246d8e279cde355680cd667eabbb407b19630458722911fd02732fa
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The basic photometry class for the TASOC Photometry pipeline.
All other specific photometric algorithms will inherit from BasePhotometry.
.. codeauthor:: Rasmus Handberg <rasmush@phys.au.dk>
"""
import numpy as np
import h5py
import sqlite3
import logging
import datetime
import os.path
import glob
import contextlib
import warnings
import enum
from copy import deepcopy
from astropy._erfa.core import ErfaWarning
from astropy.io import fits
from astropy.table import Table, Column
from astropy import units
import astropy.coordinates as coord
from astropy.time import Time
from astropy.wcs import WCS, FITSFixedWarning
from bottleneck import nanmedian, nanvar, nanstd, allnan
from .image_motion import ImageMovementKernel
from .quality import TESSQualityFlags, PixelQualityFlags, CorrectorQualityFlags
from .utilities import (find_tpf_files, find_hdf5_files, find_catalog_files, rms_timescale,
find_nearest, ListHandler, load_settings, load_sector_settings)
from .catalog import catalog_sqlite_search_footprint
from .psf import PSF
from .plots import plot_image, plt, save_figure
from .spice import TESS_SPICE
from .version import get_version
from . import fixes
# Filter out annoying warnings:
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=ErfaWarning, module="astropy")
__version__ = get_version()
hdf5_cache = {}
#--------------------------------------------------------------------------------------------------
@enum.unique
class STATUS(enum.Enum):
"""
Status indicator of the status of the photometry.
"""
UNKNOWN = 0 #: The status is unknown. The actual calculation has not started yet.
STARTED = 6 #: The calculation has started, but not yet finished.
OK = 1 #: Everything has gone well.
ERROR = 2 #: Encountered a catastrophic error that I could not recover from.
WARNING = 3 #: Something is a bit fishy. Maybe we should try again with a different algorithm?
ABORT = 4 #: The calculation was aborted.
SKIPPED = 5 #: The target was skipped because the algorithm found that to be the best solution.
#--------------------------------------------------------------------------------------------------
class BasePhotometry(object):
"""
The basic photometry class for the TASOC Photometry pipeline.
All other specific photometric algorithms will inherit from this.
Attributes:
starid (int): TIC number of star being processed.
input_folder (str): Root directory where files are loaded from.
output_folder (str): Root directory where output files are saved.
plot (bool): Indicates wheter plots should be created as part of the output.
plot_folder (str): Directory where plots are saved to.
method (str): String indication the method of photometry.
sector (int): TESS observing sector.
camera (int): TESS camera (1-4).
ccd (int): TESS CCD (1-4).
data_rel (int): Data release number.
n_readout (int): Number of frames co-added in each timestamp.
header (dict-like): Primary header, either from TPF or HDF5 files.
target (dict): Catalog information about the main target.
target_pos_column (float): Main target CCD column position.
target_pos_row (float): Main target CCD row position.
target_pos_column_stamp (float): Main target CCD column position in stamp.
target_pos_row_stamp (float): Main target CCD row position in stamp.
wcs (:class:`astropy.wcs.WCS`): World Coordinate system solution.
lightcurve (:class:`astropy.table.Table`): Table to be filled with an extracted lightcurve.
final_phot_mask (numpy.ndarray): Mask indicating which pixels were used in extraction of
lightcurve. ``True`` if used, ``False`` otherwise.
final_position_mask (numpy.ndarray): Mask indicating which pixels were used in extraction
of positions. ``True`` if used, ``False`` otherwise.
additional_headers (dict): Additional headers to be included in FITS files.
.. codeauthor:: Rasmus Handberg <rasmush@phys.au.dk>
"""
#----------------------------------------------------------------------------------------------
def __init__(self, starid, input_folder, output_folder, datasource='ffi',
sector=None, camera=None, ccd=None, cadence=None, plot=False, cache='basic', version=6):
"""
Initialize the photometry object.
Parameters:
starid (int): TIC number of star to be processed.
input_folder (str): Root directory where files are loaded from.
output_folder (str): Root directory where output files are saved.
datasource (str): Source of the data. Options are ``'ffi'`` or ``'tpf'``.
Default is ``'ffi'``.
plot (bool): Create plots as part of the output. Default is ``False``.
camera (int): TESS camera (1-4) to load target from (Only used for FFIs).
ccd (int): TESS CCD (1-4) to load target from (Only used for FFIs).
cadence (int, optional): Not used for ``datasource='ffi'``.
cache (str): Optional values are ``'none'``, ``'full'``
or ``'basic'`` (Default).
version (int): Data release number to be added to headers. Default=6.
Raises:
Exception: If starid could not be found in catalog.
FileNotFoundError: If input file (HDF5, TPF, Catalog) could not be found.
ValueError: On invalid datasource.
ValueError: If ``camera`` and ``ccd`` is not provided together with ``datasource='ffi'``.
"""
logger = logging.getLogger(__name__)
if datasource != 'ffi' and not datasource.startswith('tpf'):
raise ValueError(f"Invalid datasource: '{datasource:s}'")
if cache not in ('basic', 'none', 'full'):
raise ValueError(f"Invalid cache: '{cache:s}'")
# Store the input:
self.starid = starid
self.input_folder = os.path.abspath(input_folder)
self.output_folder_base = os.path.abspath(output_folder)
self.plot = plot
self.datasource = datasource
self.version = version
# Further checks of inputs:
if os.path.isfile(self.input_folder):
self.input_folder = os.path.dirname(self.input_folder)
if not os.path.isdir(self.input_folder):
raise FileNotFoundError(f"Not a valid input directory: '{self.input_folder:s}'")
# Extract which photometric method that is being used by checking the
# name of the class that is running:
self.method = {
'BasePhotometry': 'base',
'AperturePhotometry': 'aperture',
'PSFPhotometry': 'psf',
'LinPSFPhotometry': 'linpsf',
'HaloPhotometry': 'halo'
}.get(self.__class__.__name__, None)
logger.info('STARID = %d, DATASOURCE = %s, METHOD = %s',
self.starid, self.datasource, self.method)
self._status = STATUS.UNKNOWN
self._details = {}
self.tpf = None
self.hdf = None
self._MovementKernel = None
self._images_cube_full = None
self._images_err_cube_full = None
self._backgrounds_cube_full = None
self._pixelflags_cube_full = None
self._sumimage_full = None
# Add a ListHandler to the logging of the photometry module.
# This is needed to catch any errors and warnings made by the photometries
# for ultimately storing them in the TODO-file.
# https://stackoverflow.com/questions/36408496/python-logging-handler-to-append-to-list
self.message_queue = []
handler = ListHandler(message_queue=self.message_queue, level=logging.WARNING)
formatter = logging.Formatter('%(levelname)s: %(message)s')
handler.setFormatter(formatter)
logging.getLogger('photometry').addHandler(handler)
# Init table that will be filled with lightcurve stuff:
self.lightcurve = Table()
if self.datasource == 'ffi':
# The camera and CCD should also come as input
# They will be needed to find the correct input files
if sector is None or camera is None or ccd is None:
raise ValueError("SECTOR, CAMERA and CCD keywords must be provided for FFI targets.")
self.sector = sector # TESS observing sector.
self.camera = camera # TESS camera.
self.ccd = ccd # TESS CCD.
logger.debug('SECTOR = %s', self.sector)
logger.debug('CAMERA = %s', self.camera)
logger.debug('CCD = %s', self.ccd)
# Load stuff from the common HDF5 file:
filepath_hdf5 = find_hdf5_files(self.input_folder, sector=self.sector, camera=self.camera, ccd=self.ccd)
if len(filepath_hdf5) != 1:
raise FileNotFoundError(f"HDF5 File not found. SECTOR={self.sector:d}, CAMERA={self.camera:d}, CCD={self.ccd:d}")
filepath_hdf5 = filepath_hdf5[0]
self.filepath_hdf5 = filepath_hdf5
logger.debug("CACHE = %s", cache)
load_into_cache = False
if cache == 'none':
load_into_cache = True
else:
global hdf5_cache
if filepath_hdf5 not in hdf5_cache:
hdf5_cache[filepath_hdf5] = {}
load_into_cache = True
elif cache == 'full' and hdf5_cache[filepath_hdf5].get('_images_cube_full') is None:
load_into_cache = True
# Open the HDF5 file for reading if we are not holding everything in memory:
if load_into_cache or cache != 'full':
self.hdf = h5py.File(filepath_hdf5, 'r')
if load_into_cache:
logger.debug('Loading basic data into cache...')
attrs = {}
# Just a shorthand for the attributes we use as "headers":
hdr = dict(self.hdf['images'].attrs)
attrs['header'] = hdr
attrs['data_rel'] = hdr['DATA_REL'] # Data release number
attrs['cadence'] = hdr.get('CADENCE')
if attrs['cadence'] is None:
attrs['cadence'] = load_sector_settings(self.sector)['ffi_cadence']
# Start filling out the basic vectors:
self.lightcurve['time'] = Column(self.hdf['time'], description='Time', dtype='float64', unit='TBJD')
N = len(self.lightcurve['time'])
self.lightcurve['cadenceno'] = Column(self.hdf['cadenceno'], description='Cadence number', dtype='int32')
self.lightcurve['quality'] = Column(self.hdf['quality'], description='Quality flags', dtype='int32')
if 'timecorr' in self.hdf:
self.lightcurve['timecorr'] = Column(self.hdf['timecorr'], description='Barycentric time correction', unit='days', dtype='float32')
else:
self.lightcurve['timecorr'] = Column(np.zeros(N, dtype='float32'), description='Barycentric time correction', unit='days', dtype='float32')
# Correct timestamp offset that was in early data releases:
self.lightcurve['time'] = fixes.time_offset(self.lightcurve['time'], hdr, datatype='ffi')
attrs['lightcurve'] = self.lightcurve
# World Coordinate System solution:
if isinstance(self.hdf['wcs'], h5py.Group):
refindx = self.hdf['wcs'].attrs['ref_frame']
hdr_string = self.hdf['wcs'][f'{refindx:04d}'][0]
else:
hdr_string = self.hdf['wcs'][0]
if not hdr_string:
raise ValueError("Invalid WCS header string.")
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FITSFixedWarning)
self.wcs = WCS(header=fits.Header.fromstring(hdr_string), relax=True)
attrs['wcs'] = self.wcs
# Get shape of sumimage from hdf5 file:
attrs['_max_stamp'] = (0, self.hdf['sumimage'].shape[0], 0, self.hdf['sumimage'].shape[1])
attrs['pixel_offset_row'] = hdr.get('PIXEL_OFFSET_ROW', 0)
attrs['pixel_offset_col'] = hdr.get('PIXEL_OFFSET_COLUMN', 44) # Default for TESS data
# Get info for psf fit Gaussian statistic:
attrs['readnoise'] = hdr.get('READNOIS', 10)
attrs['gain'] = hdr.get('GAIN', 100)
attrs['num_frm'] = hdr.get('NUM_FRM', 900) # Number of frames co-added in each timestamp (Default=TESS).
attrs['n_readout'] = hdr.get('NREADOUT', int(attrs['num_frm']*(1-2/hdr.get('CRBLKSZ', np.inf)))) # Number of readouts
# Load MovementKernel into memory:
attrs['_MovementKernel'] = self.MovementKernel
# The full sum-image:
attrs['_sumimage_full'] = np.asarray(self.hdf['sumimage'])
# Store attr in global variable:
hdf5_cache[filepath_hdf5] = deepcopy(attrs)
# If we are doing a full cache (everything in memory) load the image cubes as well.
# Note that this will take up A LOT of memory!
if cache == 'full':
logger.warning('Loading full image cubes into cache...')
hdf5_cache[filepath_hdf5]['_images_cube_full'] = np.empty((attrs['_max_stamp'][1], attrs['_max_stamp'][3], N), dtype='float32')
hdf5_cache[filepath_hdf5]['_images_err_cube_full'] = np.empty((attrs['_max_stamp'][1], attrs['_max_stamp'][3], N), dtype='float32')
hdf5_cache[filepath_hdf5]['_backgrounds_cube_full'] = np.empty((attrs['_max_stamp'][1], attrs['_max_stamp'][3], N), dtype='float32')
hdf5_cache[filepath_hdf5]['_pixelflags_cube_full'] = np.empty((attrs['_max_stamp'][1], attrs['_max_stamp'][3], N), dtype='uint8')
for k in range(N):
hdf5_cache[filepath_hdf5]['_images_cube_full'][:, :, k] = self.hdf['images/%04d' % k]
hdf5_cache[filepath_hdf5]['_images_err_cube_full'][:, :, k] = self.hdf['images_err/%04d' % k]
hdf5_cache[filepath_hdf5]['_backgrounds_cube_full'][:, :, k] = self.hdf['backgrounds/%04d' % k]
hdf5_cache[filepath_hdf5]['_pixelflags_cube_full'][:, :, k] = self.hdf['pixelflags/%04d' % k]
# We dont need the file anymore!
self.hdf.close()
self.hdf = None
else:
logger.debug('Loaded data from cache!')
attrs = hdf5_cache[filepath_hdf5] # Pointer to global variable
# Set all the attributes from the cache:
# TODO: Does this create copies of data? - if so we should mayde delete "attrs" again?
for key, value in attrs.items():
setattr(self, key, value)
else:
# If the datasource was specified as 'tpf:starid' it means
# that we should load from the specified starid instead of
# the starid of the current main target.
if self.datasource.startswith('tpf:'):
starid_to_load = int(self.datasource[4:])
self.datasource = 'tpf'
else:
starid_to_load = self.starid
# Find the target pixel file for this star:
fname = find_tpf_files(self.input_folder, starid=starid_to_load, sector=sector, cadence=cadence)
if len(fname) == 1:
fname = fname[0]
elif len(fname) == 0:
raise FileNotFoundError("Target Pixel File not found")
elif len(fname) > 1:
raise FileNotFoundError("Multiple Target Pixel Files found matching pattern")
# Open the FITS file:
self.tpf = fits.open(fname, mode='readonly', memmap=True)
# Load sector, camera and CCD from the FITS header:
self.header = self.tpf[0].header
self.sector = self.tpf[0].header['SECTOR']
self.camera = self.tpf[0].header['CAMERA']
self.ccd = self.tpf[0].header['CCD']
self.data_rel = self.tpf[0].header['DATA_REL'] # Data release number
self.cadence = cadence if cadence is not None else int(np.round(self.tpf[1].header['TIMEDEL']*86400))
# Fix for timestamps that are not defined. Simply remove them from the table:
# This is seen in some file from sector 1.
indx_good_times = np.isfinite(self.tpf['PIXELS'].data['TIME'])
self.tpf['PIXELS'].data = self.tpf['PIXELS'].data[indx_good_times]
# Extract the relevant information from the FITS file:
self.lightcurve['time'] = Column(self.tpf['PIXELS'].data['TIME'], description='Time', dtype='float64', unit='TBJD')
self.lightcurve['timecorr'] = Column(self.tpf['PIXELS'].data['TIMECORR'], description='Barycentric time correction', unit='days', dtype='float32')
self.lightcurve['cadenceno'] = Column(self.tpf['PIXELS'].data['CADENCENO'], description='Cadence number', dtype='int32')
self.lightcurve['quality'] = Column(self.tpf['PIXELS'].data['QUALITY'], description='Quality flags', dtype='int32')
# World Coordinate System solution:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FITSFixedWarning)
self.wcs = WCS(header=self.tpf['APERTURE'].header, relax=True)
# Get the positions of the stamp from the FITS header:
self._max_stamp = (
self.tpf['APERTURE'].header['CRVAL2P'] - 1,
self.tpf['APERTURE'].header['CRVAL2P'] - 1 + self.tpf[2].header['NAXIS2'],
self.tpf['APERTURE'].header['CRVAL1P'] - 1,
self.tpf['APERTURE'].header['CRVAL1P'] - 1 + self.tpf[2].header['NAXIS1']
)
self.pixel_offset_row = self.tpf['APERTURE'].header['CRVAL2P'] - 1
self.pixel_offset_col = self.tpf['APERTURE'].header['CRVAL1P'] - 1
logger.debug(
'Max stamp size: (%d, %d)',
self._max_stamp[1] - self._max_stamp[0],
self._max_stamp[3] - self._max_stamp[2]
)
# Get info for psf fit Gaussian statistic:
self.readnoise = self.tpf['PIXELS'].header.get('READNOIA', 10) # FIXME: This only loads readnoise from channel A!
self.gain = self.tpf['PIXELS'].header.get('GAINA', 100) # FIXME: This only loads gain from channel A!
self.num_frm = self.tpf['PIXELS'].header.get('NUM_FRM', 60) # Number of frames co-added in each timestamp.
self.n_readout = self.tpf['PIXELS'].header.get('NREADOUT', 48) # Number of frames co-added in each timestamp.
# Load stuff from the common HDF5 file:
filepath_hdf5 = find_hdf5_files(self.input_folder, sector=self.sector, camera=self.camera, ccd=self.ccd)
if len(filepath_hdf5) != 1:
raise FileNotFoundError(f"HDF5 File not found. SECTOR={self.sector:d}, CAMERA={self.camera:d}, CCD={self.ccd:d}")
filepath_hdf5 = filepath_hdf5[0]
self.filepath_hdf5 = filepath_hdf5
self.hdf = h5py.File(filepath_hdf5, 'r')
# Correct timestamp offset that was in early data releases:
self.lightcurve['time'] = fixes.time_offset(self.lightcurve['time'], self.header, datatype='tpf')
# Directory where output files will be saved:
self.output_folder = os.path.join(
self.output_folder_base,
f'c{self.cadence:04d}',
f'{self.starid:011d}'[:5]
)
# Set directory where diagnostics plots should be saved to:
self.plot_folder = None
if self.plot:
self.plot_folder = os.path.join(self.output_folder, 'plots', f'{self.starid:011d}')
os.makedirs(self.plot_folder, exist_ok=True)
# The file to load the star catalog from:
self.catalog_file = find_catalog_files(self.input_folder, sector=self.sector, camera=self.camera, ccd=self.ccd)
self._catalog = None
logger.debug('Catalog file: %s', self.catalog_file)
if len(self.catalog_file) != 1:
raise FileNotFoundError(f"Catalog file not found: SECTOR={self.sector:d}, CAMERA={self.camera:d}, CCD={self.ccd:d}")
self.catalog_file = self.catalog_file[0]
# Load information about main target:
with contextlib.closing(sqlite3.connect(self.catalog_file)) as conn:
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT ra,decl,ra_J2000,decl_J2000,pm_ra,pm_decl,tmag,teff FROM catalog WHERE starid=?;", [self.starid])
target = cursor.fetchone()
if target is None:
raise RuntimeError(f"Star could not be found in catalog: {self.starid:d}")
self.target = dict(target) # Dictionary of all main target properties.
cursor.execute("SELECT sector,reference_time,ticver FROM settings LIMIT 1;")
target = cursor.fetchone()
if target is not None:
self._catalog_reference_time = target['reference_time']
self.ticver = target['ticver']
cursor.close()
# Define the columns that have to be filled by the do_photometry method:
self.Ntimes = len(self.lightcurve['time'])
self.lightcurve['flux'] = Column(length=self.Ntimes, description='Flux', dtype='float64')
self.lightcurve['flux_err'] = Column(length=self.Ntimes, description='Flux Error', dtype='float64')
self.lightcurve['flux_background'] = Column(length=self.Ntimes, description='Background flux', dtype='float64')
self.lightcurve['pos_centroid'] = Column(length=self.Ntimes, shape=(2,), description='Centroid position', unit='pixels', dtype='float64')
self.lightcurve['pos_corr'] = Column(length=self.Ntimes, shape=(2,), description='Position correction', unit='pixels', dtype='float64')
# Correct timestamps for light-travel time in FFIs:
# http://docs.astropy.org/en/stable/time/#barycentric-and-heliocentric-light-travel-time-corrections
if self.datasource == 'ffi':
# Coordinates of the target as astropy SkyCoord object:
star_coord = coord.SkyCoord(
ra=self.target['ra'],
dec=self.target['decl'],
unit=units.deg,
frame='icrs'
)
# Use the SPICE kernels to get accurate positions of TESS, to be used in calculating
# the light-travel-time corrections:
with TESS_SPICE() as knl:
# Change the timestamps back to uncorrected JD (TDB) in the TESS frame:
time_nocorr = np.asarray(self.lightcurve['time'] - self.lightcurve['timecorr'])
# Use SPICE kernels to get new barycentric time correction for the stars coordinates:
tm, tc = knl.barycorr(time_nocorr + 2457000, star_coord)
self.lightcurve['time'] = tm - 2457000
self.lightcurve['timecorr'] = tc
# Init arrays that will be filled with lightcurve stuff:
self.final_phot_mask = None # Mask indicating which pixels were used in extraction of lightcurve.
self.final_position_mask = None # Mask indicating which pixels were used in extraction of position.
self.additional_headers = {} # Additional headers to be included in FITS files.
# Project target position onto the pixel plane:
self.target_pos_column, self.target_pos_row = self.wcs.all_world2pix(self.target['ra'], self.target['decl'], 0, ra_dec_order=True)
if self.datasource.startswith('tpf'):
self.target_pos_column += self.pixel_offset_col
self.target_pos_row += self.pixel_offset_row
logger.info("Target column: %f", self.target_pos_column)
logger.info("Target row: %f", self.target_pos_row)
# Store the jitter at the target position:
# TODO: TPF and FFI may end up with slightly different zero-points.
if self.datasource.startswith('tpf'):
self.lightcurve['pos_corr'][:] = np.column_stack((self.tpf[1].data['POS_CORR1'], self.tpf[1].data['POS_CORR2']))
else:
self.lightcurve['pos_corr'][:] = self.MovementKernel.jitter(self.lightcurve['time'] - self.lightcurve['timecorr'], self.target_pos_column, self.target_pos_row)
# Init the stamp:
self._stamp = None
self.target_pos_column_stamp = None # Main target CCD column position in stamp.
self.target_pos_row_stamp = None # Main target CCD row position in stamp.
self._set_stamp()
self._sumimage = None
self._images_cube = None
self._images_err_cube = None
self._backgrounds_cube = None
self._pixelflags_cube = None
self._aperture = None
self._psf = None
#----------------------------------------------------------------------------------------------
def __enter__(self):
return self
#----------------------------------------------------------------------------------------------
def __exit__(self, *args):
self.close()
#----------------------------------------------------------------------------------------------
def __del__(self):
self.close()
#----------------------------------------------------------------------------------------------
def close(self):
"""Close photometry object and close all associated open file handles."""
if hasattr(self, 'hdf') and self.hdf:
self.hdf.close()
if hasattr(self, 'tpf') and self.tpf:
self.tpf.close()
#----------------------------------------------------------------------------------------------
def clear_cache(self):
"""Clear internal cache"""
global hdf5_cache
hdf5_cache = {}
#----------------------------------------------------------------------------------------------
@property
def status(self):
"""The status of the photometry. From :py:class:`STATUS`."""
return self._status
#----------------------------------------------------------------------------------------------
def default_stamp(self):
"""
The default size of the stamp to use.
The stamp will be centered on the target star position, with
a width and height specified by this function. The stamp can
later be resized using :py:func:`resize_stamp`.
Returns:
int: Number of rows
int: Number of columns
Note:
This function is only used for FFIs. For postage stamps
the default stamp is the entire available postage stamp.
See Also:
:py:func:`resize_stamp`
"""
# Decide how many pixels to use based on lookup tables as a function of Tmag:
tmag = np.array([0.0, 0.52631579, 1.05263158, 1.57894737, 2.10526316,
2.63157895, 3.15789474, 3.68421053, 4.21052632, 4.73684211,
5.26315789, 5.78947368, 6.31578947, 6.84210526, 7.36842105,
7.89473684, 8.42105263, 8.94736842, 9.47368421, 10.0, 13.0])
height = np.array([831.98319063, 533.58494422, 344.0840884, 223.73963332,
147.31365728, 98.77856016, 67.95585074, 48.38157414,
35.95072974, 28.05639497, 23.043017, 19.85922009,
17.83731732, 16.5532873, 15.73785092, 15.21999971,
14.89113301, 14.68228285, 14.54965042, 14.46542084, 14.0])
width = np.array([157.71602062, 125.1238281, 99.99440209, 80.61896267,
65.6799962, 54.16166547, 45.28073365, 38.4333048,
33.15375951, 28.05639497, 23.043017, 19.85922009,
17.83731732, 16.5532873, 15.73785092, 15.21999971,
14.89113301, 14.68228285, 14.54965042, 14.46542084, 14.0])
Ncolumns = np.interp(self.target['tmag'], tmag, width)
Nrows = np.interp(self.target['tmag'], tmag, height)
# Round off and make sure we have minimum 15 pixels:
Nrows = np.maximum(np.ceil(Nrows), 15)
Ncolumns = np.maximum(np.ceil(Ncolumns), 15)
return Nrows, Ncolumns
#----------------------------------------------------------------------------------------------
def resize_stamp(self, down=None, up=None, left=None, right=None, width=None, height=None):
"""
Resize the stamp in a given direction.
Parameters:
down (int, optional): Number of pixels to extend downwards.
up (int, optional): Number of pixels to extend upwards.
left (int, optional): Number of pixels to extend left.
right (int, optional): Number of pixels to extend right.
width (int, optional): Set the width of the stamp to this number of pixels.
This takes presendence over ``left`` and ``right`` if they are also provided.
height (int, optional): Set the height of the stamp to this number of pixels.
This takes presendence over ``up`` and ``down`` if they are also provided.
Returns:
bool: `True` if the stamp could be resized, `False` otherwise.
"""
old_stamp = self._stamp
self._stamp = list(self._stamp)
if up:
self._stamp[1] += up
if down:
self._stamp[0] -= down
if left:
self._stamp[2] -= left
if right:
self._stamp[3] += right
if height:
self._stamp[0] = int(np.round(self.target_pos_row)) - height//2
self._stamp[1] = int(np.round(self.target_pos_row)) + height//2 + 1
if width:
self._stamp[2] = int(np.round(self.target_pos_column)) - width//2
self._stamp[3] = int(np.round(self.target_pos_column)) + width//2 + 1
self._stamp = tuple(self._stamp)
# Set stamp and check if the stamp actually changed:
stamp_changed = self._set_stamp(compare_stamp=old_stamp)
# Count the number of times that we are resizing the stamp:
if stamp_changed:
self._details['stamp_resizes'] = self._details.get('stamp_resizes', 0) + 1
# Return if the stamp actually changed:
return stamp_changed
#----------------------------------------------------------------------------------------------
def _set_stamp(self, compare_stamp=None):
"""
The default size of the stamp to use.
The stamp will be centered on the target star position, with
a width and height specified by this function. The stamp can
later be resized using :py:func:`resize_stamp`.
Parameters:
compare_stamp (tuple): Stamp to compare against whether anything changed.
Returns:
bool: `True` if ``compare_stamp`` is set and has changed. If ``compare_stamp``
is not provided, always returns `True`.
See Also:
:py:func:`resize_stamp`
Note:
Stamp is zero-based counted from the TOP of the image.
"""
logger = logging.getLogger(__name__)
if not self._stamp:
if self.datasource == 'ffi':
Nrows, Ncolumns = self.default_stamp()
logger.info("Setting default stamp with sizes (%d,%d)", Nrows, Ncolumns)
self._stamp = (
int(np.round(self.target_pos_row)) - Nrows//2,
int(np.round(self.target_pos_row)) + Nrows//2 + 1,
int(np.round(self.target_pos_column)) - Ncolumns//2,
int(np.round(self.target_pos_column)) + Ncolumns//2 + 1
)
else:
Nrows = self._max_stamp[1] - self._max_stamp[0]
Ncolumns = self._max_stamp[3] - self._max_stamp[2]
logger.info("Setting default stamp with sizes (%d,%d)", Nrows, Ncolumns)
self._stamp = self._max_stamp
# Limit the stamp to not go outside the limits of the images:
# TODO: We really should have a thourgh cleanup in the self._stamp, self._maxstamp and self.pixel_offset_* mess!
self._stamp = list(self._stamp)
if self.datasource == 'ffi':
self._stamp[0] = int(np.maximum(self._stamp[0], self._max_stamp[0] + self.pixel_offset_row))
self._stamp[1] = int(np.minimum(self._stamp[1], self._max_stamp[1] + self.pixel_offset_row))
self._stamp[2] = int(np.maximum(self._stamp[2], self._max_stamp[2] + self.pixel_offset_col))
self._stamp[3] = int(np.minimum(self._stamp[3], self._max_stamp[3] + self.pixel_offset_col))
else:
self._stamp[0] = int(np.maximum(self._stamp[0], self._max_stamp[0]))
self._stamp[1] = int(np.minimum(self._stamp[1], self._max_stamp[1]))
self._stamp[2] = int(np.maximum(self._stamp[2], self._max_stamp[2]))
self._stamp[3] = int(np.minimum(self._stamp[3], self._max_stamp[3]))
self._stamp = tuple(self._stamp)
# Sanity checks:
if self._stamp[0] > self._stamp[1] or self._stamp[2] > self._stamp[3]:
raise ValueError("Invalid stamp selected")
# Store the stamp in details:
self._details['stamp'] = self._stamp
# Check if the stamp actually changed:
if self._stamp == compare_stamp:
return False
# Calculate main target position in stamp:
self.target_pos_row_stamp = self.target_pos_row - self._stamp[0]
self.target_pos_column_stamp = self.target_pos_column - self._stamp[2]
# Force sum-image and catalog to be recalculated next time:
self._sumimage = None
self._catalog = None
self._images_cube = None
self._backgrounds_cube = None
self._pixelflags_cube = None
self._aperture = None
self._psf = None
return True
#----------------------------------------------------------------------------------------------
def get_pixel_grid(self):
"""
Returns mesh-grid of the pixels (1-based) in the stamp.
Returns:
tuple(cols, rows): Meshgrid of pixel coordinates in the current stamp.
"""
return np.meshgrid(
np.arange(self._stamp[2]+1, self._stamp[3]+1, 1, dtype='int32'),
np.arange(self._stamp[0]+1, self._stamp[1]+1, 1, dtype='int32')
)
#----------------------------------------------------------------------------------------------
@property
def stamp(self):
"""
Tuple indicating the stamps position within the larger image.
Returns:
tuple: Tuple of (row_min, row_max, col_min, col_max).
"""
return self._stamp
#----------------------------------------------------------------------------------------------
def _load_cube(self, tpf_field='FLUX', hdf_group='images', full_cube=None):
"""
Load data cube into memory from TPF and HDF5 files depending on datasource.
"""
if self.datasource == 'ffi':
ir1 = self._stamp[0] - self.pixel_offset_row
ir2 = self._stamp[1] - self.pixel_offset_row
ic1 = self._stamp[2] - self.pixel_offset_col
ic2 = self._stamp[3] - self.pixel_offset_col
if full_cube is None:
# We dont have an in-memory version of the full cube, so let us
# create the cube by loading the cutouts of each image:
cube = np.empty((ir2-ir1, ic2-ic1, self.Ntimes), dtype='float32')
if hdf_group in self.hdf:
for k in range(self.Ntimes):
cube[:, :, k] = self.hdf[hdf_group + '/%04d' % k][ir1:ir2, ic1:ic2]
else:
cube[:, :, :] = np.NaN
else:
# We have an in-memory version of the full cube.
# TODO: Will this create copy of data in memory?
cube = full_cube[ir1:ir2, ic1:ic2, :]
else:
ir1 = self._stamp[0] - self._max_stamp[0]
ir2 = self._stamp[1] - self._max_stamp[0]
ic1 = self._stamp[2] - self._max_stamp[2]
ic2 = self._stamp[3] - self._max_stamp[2]
cube = np.empty((ir2-ir1, ic2-ic1, self.Ntimes), dtype='float32')
for k in range(self.Ntimes):
cube[:, :, k] = self.tpf['PIXELS'].data[tpf_field][k][ir1:ir2, ic1:ic2]
return cube
#----------------------------------------------------------------------------------------------
@property
def images_cube(self):
"""
Image cube containing all the images as a function of time.
Returns:
ndarray: Three dimentional array with shape ``(rows, cols, times)``, where
``rows`` is the number of rows in the image, ``cols`` is the number
of columns and ``times`` is the number of timestamps.
Note:
The images has had the large-scale background subtracted. If needed
the backgrounds can be added again from :py:meth:`backgrounds`
or :py:meth:`backgrounds_cube`.
Example:
>>> pho = BasePhotometry(starid)
>>> print(pho.images_cube.shape)
>>> (10, 10, 1399)
See Also:
:py:meth:`images`, :py:meth:`backgrounds`, :py:meth:`backgrounds_cube`
"""
if self._images_cube is None:
self._images_cube = self._load_cube(tpf_field='FLUX', hdf_group='images', full_cube=self._images_cube_full)
return self._images_cube
#----------------------------------------------------------------------------------------------
@property
def images_err_cube(self):
"""
Image cube containing all the uncertainty images as a function of time.
Returns:
ndarray: Three dimentional array with shape ``(rows, cols, times)``, where
``rows`` is the number of rows in the image, ``cols`` is the number
of columns and ``times`` is the number of timestamps.
Example:
>>> pho = BasePhotometry(starid)
>>> print(pho.images_err_cube.shape)
>>> (10, 10, 1399)
See Also:
:py:meth:`images`, :py:meth:`backgrounds`, :py:meth:`backgrounds_cube`
"""
if self._images_err_cube is None:
self._images_err_cube = self._load_cube(tpf_field='FLUX_ERR', hdf_group='images_err', full_cube=self._images_err_cube_full)
return self._images_err_cube
#----------------------------------------------------------------------------------------------
@property
def backgrounds_cube(self):
"""
Image cube containing all the background images as a function of time.
Returns:
ndarray: Three dimentional array with shape ``(rows, cols, times)``, where
``rows`` is the number of rows in the image, ``cols`` is the number
of columns and ``times`` is the number of timestamps.
Example:
>>> pho = BasePhotometry(starid)
>>> print(pho.backgrounds_cube.shape):
>>> (10, 10, 1399)
See Also:
:py:meth:`backgrounds`, :py:meth:`images_cube`, :py:meth:`images`
"""
if self._backgrounds_cube is None:
self._backgrounds_cube = self._load_cube(tpf_field='FLUX_BKG', hdf_group='backgrounds', full_cube=self._backgrounds_cube_full)
return self._backgrounds_cube
#----------------------------------------------------------------------------------------------
@property
def pixelflags_cube(self):
"""
Cube containing all pixel flag images as a function of time.
Returns:
ndarray: Three dimentional array with shape ``(rows, cols, ffi_times)``, where
``rows`` is the number of rows in the image, ``cols`` is the number
of columns and ``ffi_times`` is the number of timestamps in the FFIs.
Note:
This function will only return flags on the timestamps of the FFIs, even though
an TPF is being processed.
Example:
>>> pho = BasePhotometry(starid)
>>> print(pho.pixelflags_cube.shape):
>>> (10, 10, 1399)
See Also:
:py:meth:`pixelflags`, :py:meth:`backgrounds_cube`, :py:meth:`images_cube`.
"""
if self._pixelflags_cube is None:
# We can't used the _loac_cube function here, since we always have
# to load from the HDF5 file, even though we are running an TPF.
ir1 = self._stamp[0] - self.hdf['images'].attrs.get('PIXEL_OFFSET_ROW', 0)
ir2 = self._stamp[1] - self.hdf['images'].attrs.get('PIXEL_OFFSET_ROW', 0)
ic1 = self._stamp[2] - self.hdf['images'].attrs.get('PIXEL_OFFSET_COLUMN', 44)
ic2 = self._stamp[3] - self.hdf['images'].attrs.get('PIXEL_OFFSET_COLUMN', 44)
if self._pixelflags_cube_full is None:
# We dont have an in-memory version of the full cube, so let us
# create the cube by loading the cutouts of each image:
cube = np.empty((ir2-ir1, ic2-ic1, len(self.hdf['time'])), dtype='uint8')
if 'pixel_flags' in self.hdf:
for k in range(len(self.hdf['time'])):
cube[:, :, k] = self.hdf['pixel_flags/%04d' % k][ir1:ir2, ic1:ic2]
else:
cube[:, :, :] = 0
else:
# We have an in-memory version of the full cube.
# TODO: Will this create copy of data in memory?
cube = self._pixelflags_cube_full[ir1:ir2, ic1:ic2, :]
self._pixelflags_cube = cube
return self._pixelflags_cube
#----------------------------------------------------------------------------------------------
@property
def pixelflags(self):
"""
Iterator that will loop through the pixel flag images.
Returns:
iterator: Iterator which can be used to loop through the pixel flags images.
Example:
>>> pho = BasePhotometry(starid)
>>> for img in pho.pixelflags:
>>> print(img)
See Also:
:py:meth:`pixelflags_cube`, :py:meth:`images`, :py:meth:`backgrounds`
"""
# Yield slices from the data-cube as an iterator:
if self.datasource == 'ffi':
for k in range(self.Ntimes):
yield self.pixelflags_cube[:, :, k]
else:
hdf_times = np.asarray(self.hdf['time']) - np.asarray(self.hdf['timecorr'])
for k in range(self.Ntimes):
indx = find_nearest(hdf_times, self.lightcurve['time'][k] - self.lightcurve['timecorr'][k])
yield self.pixelflags_cube[:, :, indx]
#----------------------------------------------------------------------------------------------
@property
def images(self):
"""
Iterator that will loop through the image stamps.
Returns:
iterator: Iterator which can be used to loop through the image stamps.
Note:
The images has had the large-scale background subtracted. If needed
the backgrounds can be added again from :py:meth:`backgrounds`.
Note:
For each image, this function will actually load the necessary
data from disk, so don't loop through it more than you absolutely
have to to save I/O.
Example:
>>> pho = BasePhotometry(starid)
>>> for img in pho.images:
>>> print(img)
See Also:
:py:meth:`images_cube`, :py:meth:`images_err`, :py:meth:`backgrounds`
"""
# Yield slices from the data-cube as an iterator:
for k in range(self.Ntimes):
yield self.images_cube[:, :, k]
#----------------------------------------------------------------------------------------------
@property
def images_err(self):
"""
Iterator that will loop through the uncertainty image stamps.
Returns:
iterator: Iterator which can be used to loop through the uncertainty image stamps.
Example:
>>> pho = BasePhotometry(starid)
>>> for imgerr in pho.images_err:
>>> print(imgerr)
See Also:
:py:meth:`images_err_cube`, :py:meth:`images`, :py:meth:`images_cube`, :py:meth:`backgrounds`
"""
# Yield slices from the data-cube as an iterator:
for k in range(self.Ntimes):
yield self.images_err_cube[:, :, k]
#----------------------------------------------------------------------------------------------
@property
def backgrounds(self):
"""
Iterator that will loop through the background-image stamps.
Returns:
iterator: Iterator which can be used to loop through the background-image stamps.
Note:
For each image, this function will actually load the necessary
data from disk, so don't loop through it more than you absolutely
have to to save I/O.
Example:
>>> pho = BasePhotometry(starid)
>>> for img in pho.backgrounds:
>>> print(img)
See Also:
:py:meth:`backgrounds_cube`, :py:meth:`images`
"""
# Yield slices from the data-cube as an iterator:
for k in range(self.Ntimes):
yield self.backgrounds_cube[:, :, k]
#----------------------------------------------------------------------------------------------
@property
def sumimage(self):
"""
Average image.
Calculated as the mean of all good images (quality=0) as a function of time.
For FFIs this has been pre-calculated and for postage-stamps it is calculated
on-the-fly when needed.
Returns:
numpy.array: Summed image across all valid timestamps.
"""
if self._sumimage is None:
if self.datasource == 'ffi':
ir1 = self._stamp[0] - self.pixel_offset_row
ir2 = self._stamp[1] - self.pixel_offset_row
ic1 = self._stamp[2] - self.pixel_offset_col
ic2 = self._stamp[3] - self.pixel_offset_col
self._sumimage = self._sumimage_full[ir1:ir2, ic1:ic2]
else:
self._sumimage = np.zeros((self._stamp[1]-self._stamp[0], self._stamp[3]-self._stamp[2]), dtype='float64')
Nimg = np.zeros_like(self._sumimage, dtype='int32')
for k, img in enumerate(self.images):
if TESSQualityFlags.filter(self.lightcurve['quality'][k]):
isgood = np.isfinite(img)
img[~isgood] = 0
Nimg += np.asarray(isgood, dtype='int32')
self._sumimage += img
isgood = (Nimg > 0)
self._sumimage[isgood] /= Nimg[isgood]
self._sumimage[~isgood] = np.NaN
if self.plot:
fig, ax = plt.subplots()
plot_image(self._sumimage, ax=ax, offset_axes=(self._stamp[2]+1, self._stamp[0]+1),
xlabel='Pixel Column Number', ylabel='Pixel Row Number', cbar='right')
ax.plot(self.target_pos_column + 1, self.target_pos_row + 1, 'r+')
save_figure(os.path.join(self.plot_folder, 'sumimage'), fig=fig)
plt.close(fig)
return self._sumimage
#----------------------------------------------------------------------------------------------
@property
def aperture(self):
"""
Flags for each pixel, as defined by the TESS data product manual.
Returns:
numpy.array: 2D array of flags for each pixel.
"""
if self._aperture is None:
if self.datasource == 'ffi':
# Make aperture image:
cols, rows = self.get_pixel_grid()
self._aperture = np.asarray(np.isfinite(self.sumimage), dtype='int32')
# Add mapping onto TESS output channels:
self._aperture[(45 <= cols) & (cols <= 556)] |= 32 # CCD output A
self._aperture[(557 <= cols) & (cols <= 1068)] |= 64 # CCD output B
self._aperture[(1069 <= cols) & (cols <= 1580)] |= 128 # CCD output C
self._aperture[(1581 <= cols) & (cols <= 2092)] |= 256 # CCD output D
# Add information about which pixels were used for background calculation:
if 'backgrounds_pixels_used' in self.hdf:
# Coordinates in the FFI of image:
ir1 = self._stamp[0] - self.pixel_offset_row
ir2 = self._stamp[1] - self.pixel_offset_row
ic1 = self._stamp[2] - self.pixel_offset_col
ic2 = self._stamp[3] - self.pixel_offset_col
# Extract the subimage of which pixels were used in background:
bpu = self.hdf['backgrounds_pixels_used'][ir1:ir2, ic1:ic2]
self._aperture[bpu] |= 4
else:
# Load the aperture from the TPF:
ir1 = self._stamp[0] - self._max_stamp[0]
ir2 = self._stamp[1] - self._max_stamp[0]
ic1 = self._stamp[2] - self._max_stamp[2]
ic2 = self._stamp[3] - self._max_stamp[2]
self._aperture = np.asarray(self.tpf['APERTURE'].data[ir1:ir2, ic1:ic2], dtype='int32')
# Remove the flags for SPOC mask and centroids:
self._aperture[(self._aperture & 2) != 0] -= 2
self._aperture[(self._aperture & 8) != 0] -= 8
return self._aperture
#----------------------------------------------------------------------------------------------
@property
def settings(self):
"""
Pipeline settings and constants.
Returns:
:class:`configparser.ConfigParser`: Pipeline settings, loaded from settings file.
See also:
:func:`load_settings`.
"""
if not hasattr(self, '_settings') or self._settings is None:
self._settings = load_settings()
return self._settings
#----------------------------------------------------------------------------------------------
@property
def catalog(self):
"""
Catalog of stars in the current stamp.
The table contains the following columns:
* ``starid``: TIC identifier.
* ``tmag``: TESS magnitude.
* ``ra``: Right ascension in degrees at time of observation.
* ``dec``: Declination in degrees at time of observation.
* ``row``: Pixel row on CCD.
* ``column``: Pixel column on CCD.
* ``row_stamp``: Pixel row relative to the stamp.
* ``column_stamp``: Pixel column relative to the stamp.
Returns:
``astropy.table.Table``: Table with all known stars falling within the current stamp.
Example:
If ``pho`` is an instance of :py:class:`BasePhotometry`:
>>> pho.catalog['tmag']
>>> pho.catalog[('starid', 'tmag', 'row', 'column')]
See Also:
:py:meth:`catalog_attime`
"""
if not self._catalog:
# Pixel-positions of the corners of the current stamp:
corners = np.array([
[self._stamp[2]-0.5, self._stamp[0]-0.5],
[self._stamp[2]-0.5, self._stamp[1]-0.5],
[self._stamp[3]-0.5, self._stamp[0]-0.5],
[self._stamp[3]-0.5, self._stamp[1]-0.5]
], dtype='float64')
# Because the TPF world coordinate solution is relative to the stamp,
# add the pixel offset to these:
if self.datasource.startswith('tpf'):
corners[:, 0] -= self.pixel_offset_col
corners[:, 1] -= self.pixel_offset_row
corners_radec = self.wcs.all_pix2world(corners, 0, ra_dec_order=True)
# Select only the stars within the current stamp:
# TODO: Change to opening in read-only mode: sqlite3.connect("file:" + self.catalog_file + "?mode=ro", uri=True). Requires Python 3.4
with contextlib.closing(sqlite3.connect(self.catalog_file)) as conn:
cursor = conn.cursor()
cat = catalog_sqlite_search_footprint(cursor, corners_radec, columns='starid,ra,decl,tmag', buffer_size=5)
cursor.close()
if not cat:
# Nothing was found. Return an empty table with the correct format:
self._catalog = Table(
names=('starid', 'ra', 'dec', 'tmag', 'column', 'row', 'column_stamp', 'row_stamp'),
dtype=('int64', 'float64', 'float64', 'float32', 'float32', 'float32', 'float32', 'float32')
)
else:
# Convert data to astropy table for further use:
self._catalog = Table(
rows=cat,
names=('starid', 'ra', 'dec', 'tmag'),
dtype=('int64', 'float64', 'float64', 'float32')
)
# Use the WCS to find pixel coordinates of stars in mask:
pixel_coords = self.wcs.all_world2pix(np.column_stack((self._catalog['ra'], self._catalog['dec'])), 0, ra_dec_order=True)
# Because the TPF world coordinate solution is relative to the stamp,
# add the pixel offset to these:
if self.datasource.startswith('tpf'):
pixel_coords[:,0] += self.pixel_offset_col
pixel_coords[:,1] += self.pixel_offset_row
# Create columns with pixel coordinates:
col_x = Column(data=pixel_coords[:,0], name='column', dtype='float32')
col_y = Column(data=pixel_coords[:,1], name='row', dtype='float32')
# Subtract the positions of the edge of the current stamp:
pixel_coords[:,0] -= self._stamp[2]
pixel_coords[:,1] -= self._stamp[0]
# Add the pixel positions to the catalog table:
col_x_stamp = Column(data=pixel_coords[:,0], name='column_stamp', dtype='float32')
col_y_stamp = Column(data=pixel_coords[:,1], name='row_stamp', dtype='float32')
self._catalog.add_columns([col_x, col_y, col_x_stamp, col_y_stamp])
return self._catalog
#----------------------------------------------------------------------------------------------
@property
def MovementKernel(self):
"""
Movement Kernel which allows calculation of positions on the focal plane as a function of time.
Instance of :py:class:`image_motion.ImageMovementKernel`.
"""
if self._MovementKernel is None:
default_movement_kernel = 'wcs' # The default kernel to use - set to 'hdf5' if we should use the one from prepare instead
if self.datasource == 'ffi' and default_movement_kernel == 'wcs' and isinstance(self.hdf['wcs'], h5py.Group):
self._MovementKernel = ImageMovementKernel(warpmode='wcs', wcs_ref=self.wcs)
self._MovementKernel.load_series(self.lightcurve['time'] - self.lightcurve['timecorr'], [self.hdf['wcs'][dset][0] for dset in self.hdf['wcs']])
elif self.datasource == 'ffi' and 'movement_kernel' in self.hdf:
self._MovementKernel = ImageMovementKernel(warpmode=self.hdf['movement_kernel'].attrs.get('warpmode'))
self._MovementKernel.load_series(self.lightcurve['time'] - self.lightcurve['timecorr'], self.hdf['movement_kernel'])
elif self.datasource.startswith('tpf'):
# Create translation kernel from the positions provided in the
# target pixel file.
# Load kernels from FITS file:
kernels = np.column_stack((self.tpf[1].data['POS_CORR1'], self.tpf[1].data['POS_CORR2']))
indx = np.isfinite(self.lightcurve['time']) & np.all(np.isfinite(kernels), axis=1)
times = self.lightcurve['time'][indx] - self.lightcurve['timecorr'][indx]
kernels = kernels[indx]
# Find the timestamp closest to the reference time:
refindx = find_nearest(times, self._catalog_reference_time)
# Rescale kernels to the reference point:
kernels = np.column_stack((self.tpf[1].data['POS_CORR1'][indx], self.tpf[1].data['POS_CORR2'][indx]))
kernels[:, 0] -= kernels[refindx, 0]
kernels[:, 1] -= kernels[refindx, 1]
# Create kernel object:
self._MovementKernel = ImageMovementKernel(warpmode='translation')
self._MovementKernel.load_series(times, kernels)
else:
# If we reached this point, we dont have enough information to
# define the ImageMovementKernel, so we should just return the
# unaltered catalog:
self._MovementKernel = ImageMovementKernel(warpmode='unchanged')
return self._MovementKernel
#----------------------------------------------------------------------------------------------
def catalog_attime(self, time):
"""
Catalog of stars, calculated at a given time-stamp, so CCD positions are
modified according to the measured spacecraft jitter.
Parameters:
time (float): Time in MJD when to calculate catalog.
Returns:
`astropy.table.Table`: Table with the same columns as :py:meth:`catalog`,
but with ``column``, ``row``, ``column_stamp`` and ``row_stamp`` calculated
at the given timestamp.
See Also:
:py:meth:`catalog`
"""
# If we didn't have enough information, just return the unchanged catalog:
if self.MovementKernel.warpmode == 'unchanged':
return self.catalog
# Get the reference catalog:
xy = np.column_stack((self.catalog['column'], self.catalog['row']))
# Lookup the position corrections in CCD coordinates:
jitter = self.MovementKernel.interpolate(time, xy)
# Modify the reference catalog:
cat = deepcopy(self.catalog)
cat['column'] += jitter[:, 0]
cat['row'] += jitter[:, 1]
cat['column_stamp'] += jitter[:, 0]
cat['row_stamp'] += jitter[:, 1]
return cat
#----------------------------------------------------------------------------------------------
@property
def psf(self):
"""
Point Spread Function.
Returns:
:class:`psf.PSF`: PSF object for the given target position.
See Also:
:class:`psf.PSF`
"""
if self._psf is None:
self._psf = PSF(self.sector, self.camera, self.ccd, self.stamp)
return self._psf
#----------------------------------------------------------------------------------------------
def delete_plots(self):
"""
Delete all files in :py:attr:`plot_folder`.
If plotting is not enabled, this method does nothing and will therefore
leave any existing files in the plot folder, should it already exists.
"""
logger = logging.getLogger(__name__)
if self.plot and self.plot_folder is not None:
for f in glob.iglob(os.path.join(self.plot_folder, '*')):
logger.debug("Deleting plot '%s'", f)
os.unlink(f)
#----------------------------------------------------------------------------------------------
def report_details(self, error=None, skip_targets=None):
"""
Report details of the processing back to the overlying scheduler system.
Parameters:
error (string): Error message the be logged with the results.
skip_targets (list): List of starids that can be safely skipped.
"""
if skip_targets is not None:
self._details['skip_targets'] = skip_targets
if error is not None:
if 'errors' not in self._details: self._details['errors'] = []
self._details['errors'].append(error)
#----------------------------------------------------------------------------------------------
def do_photometry(self):
"""
Run photometry algorithm.
This should fill the :py:attr:`lightcurve` table with all relevant parameters.
Returns:
The status of the photometry.
Raises:
NotImplementedError
"""
raise NotImplementedError("You have to implement the actual lightcurve extraction yourself... Sorry!")
#----------------------------------------------------------------------------------------------
def photometry(self, *args, **kwargs):
"""
Run photometry.
Will run the :py:meth:`do_photometry` method and
check some of the output and calculate various
performance metrics.
See Also:
:py:meth:`do_photometry`
"""
logger = logging.getLogger(__name__)
# Run the photometry:
self._status = self.do_photometry(*args, **kwargs)
# Check that the status has been changed:
if self._status == STATUS.UNKNOWN:
raise ValueError("STATUS was not set by do_photometry")
# Calculate performance metrics if status was not an error:
if self._status in (STATUS.OK, STATUS.WARNING):
# Simple check that entire lightcurve is not NaN:
if allnan(self.lightcurve['flux']):
raise ValueError("Final lightcurve fluxes are all NaNs")
if allnan(self.lightcurve['flux_err']):
raise ValueError("Final lightcurve errors are all NaNs")
# Pick out the part of the lightcurve that has a good quality
# and only use this subset to calculate the diagnostic metrics:
indx_good = TESSQualityFlags.filter(self.lightcurve['quality'])
goodlc = self.lightcurve[indx_good]
# Calculate the mean flux level:
self._details['mean_flux'] = nanmedian(goodlc['flux'])
# Convert to relative flux:
flux = (goodlc['flux'] / self._details['mean_flux']) - 1
flux_err = np.abs(1/self._details['mean_flux']) * goodlc['flux_err']
# Calculate noise metrics of the relative flux:
self._details['variance'] = nanvar(flux, ddof=1)
self._details['rms_hour'] = rms_timescale(goodlc['time'], flux, timescale=3600/86400)
self._details['ptp'] = nanmedian(np.abs(np.diff(flux)))
# Calculate the median centroid position in pixel coordinates:
self._details['pos_centroid'] = nanmedian(goodlc['pos_centroid'], axis=0)
# Calculate variability used e.g. in CBV selection of stars:
indx = np.isfinite(goodlc['time']) & np.isfinite(flux) & np.isfinite(flux_err)
# Do a more robust fitting with a third-order polynomial,
# where we are catching cases where the fitting goes bad.
# This happens in the test-data because there are so few points.
if np.any(indx):
mintime = np.nanmin(goodlc['time'][indx])
with warnings.catch_warnings():
warnings.filterwarnings('error', category=np.RankWarning)
try:
p = np.polyfit(goodlc['time'][indx] - mintime, flux[indx], 3, w=1/flux_err[indx])
detrend = np.polyval(p, goodlc['time'] - mintime)
except np.RankWarning: # pragma: no cover
logger.warning("Could not detrend lightcurve for variability calculation.")
detrend = 0
else:
logger.warning("Could not detrend lightcurve for variability calculation.")
detrend = 0
# Calculate the variability as the standard deviation of the
# polynomial-subtracted lightcurve devided by the median error:
self._details['variability'] = nanstd(flux - detrend) / nanmedian(flux_err)
if self.final_phot_mask is not None:
# Calculate the total number of pixels in the mask:
self._details['mask_size'] = int(np.sum(self.final_phot_mask))
# Measure the total flux on the edge of the stamp,
# if the mask is touching the edge of the stamp:
# The np.sum here should return zero on an empty array.
edge = np.zeros_like(self.sumimage, dtype='bool')
edge[:, (0,-1)] = True
edge[(0,-1), 1:-1] = True
self._details['edge_flux'] = np.nansum(self.sumimage[self.final_phot_mask & edge])
if self.additional_headers and 'AP_CONT' in self.additional_headers:
self._details['contamination'] = self.additional_headers['AP_CONT'][0]
# Unpack any errors or warnings that were sent to the logger during the photometry:
if self.message_queue:
if not self._details.get('errors'):
self._details['errors'] = []
self._details['errors'] += self.message_queue
self.message_queue.clear()
#----------------------------------------------------------------------------------------------
def save_lightcurve(self, output_folder=None, version=None):
"""
Save generated lightcurve to file.
Parameters:
output_folder (string, optional): Path to directory where to save lightcurve.
If ``None`` the directory specified in the attribute ``output_folder`` is used.
version (integer, optional): Version number to add to the FITS header and file name.
If not set, the :py:attr:`version` is used.
Returns:
string: Path to the generated file.
"""
# Check if another output folder was provided:
if output_folder is None:
output_folder = self.output_folder
if version is None:
if self.version is None:
raise ValueError("VERSION has not been set")
else:
version = self.version
# Make sure that the directory exists:
os.makedirs(output_folder, exist_ok=True)
# Create sumimage before changing the self.lightcurve object:
SumImage = self.sumimage
# Propergate the Background Shenanigans flags into the quality flags if
# one was detected somewhere in the final stamp in the given timestamp:
quality = np.zeros_like(self.lightcurve['time'], dtype='int32')
for k, flg in enumerate(self.pixelflags):
if np.any(flg & PixelQualityFlags.BackgroundShenanigans != 0):
quality[k] |= CorrectorQualityFlags.BackgroundShenanigans
# Remove timestamps that have no defined time:
# This is a problem in the Sector 1 alert data.
indx = np.isfinite(self.lightcurve['time'])
self.lightcurve = self.lightcurve[indx]
# Get the current date for the files:
now = datetime.datetime.now()
# Primary FITS header:
hdu = fits.PrimaryHDU()
hdu.header['NEXTEND'] = (3 + int(hasattr(self, 'halo_weightmap')), 'number of standard extensions')
hdu.header['EXTNAME'] = ('PRIMARY', 'name of extension')
hdu.header['ORIGIN'] = ('TASOC/Aarhus', 'institution responsible for creating this file')
hdu.header['DATE'] = (now.strftime("%Y-%m-%d"), 'date the file was created')
hdu.header['TELESCOP'] = ('TESS', 'telescope')
hdu.header['INSTRUME'] = ('TESS Photometer', 'detector type')
hdu.header['FILTER'] = ('TESS', 'Photometric bandpass filter')
hdu.header['OBJECT'] = (f"TIC {self.starid:d}", 'string version of TICID')
hdu.header['TICID'] = (self.starid, 'unique TESS target identifier')
hdu.header['CAMERA'] = (self.camera, 'Camera number')
hdu.header['CCD'] = (self.ccd, 'CCD number')
hdu.header['SECTOR'] = (self.sector, 'Observing sector')
# Versions:
hdu.header['PROCVER'] = (__version__, 'Version of photometry pipeline')
hdu.header['FILEVER'] = ('1.5', 'File format version')
hdu.header['DATA_REL'] = (self.data_rel, 'Data release number')
hdu.header['VERSION'] = (version, 'Version of the processing')
hdu.header['PHOTMET'] = (self.method, 'Photometric method used')
# Object properties:
if self.target['pm_ra'] is None or self.target['pm_decl'] is None:
pmtotal = fits.card.Undefined()
else:
pmtotal = np.sqrt(self.target['pm_ra']**2 + self.target['pm_decl']**2)
hdu.header['RADESYS'] = ('ICRS', 'reference frame of celestial coordinates')
hdu.header['EQUINOX'] = (2000.0, 'equinox of celestial coordinate system')
hdu.header['RA_OBJ'] = (self.target['ra_J2000'], '[deg] Right ascension')
hdu.header['DEC_OBJ'] = (self.target['decl_J2000'], '[deg] Declination')
hdu.header['PMRA'] = (fits.card.Undefined() if not self.target['pm_ra'] else self.target['pm_ra'], '[mas/yr] RA proper motion')
hdu.header['PMDEC'] = (fits.card.Undefined() if not self.target['pm_decl'] else self.target['pm_decl'], '[mas/yr] Dec proper motion')
hdu.header['PMTOTAL'] = (pmtotal, '[mas/yr] total proper motion')
hdu.header['TESSMAG'] = (self.target['tmag'], '[mag] TESS magnitude')
hdu.header['TEFF'] = (fits.card.Undefined() if not self.target['teff'] else self.target['teff'], '[K] Effective temperature')
hdu.header['TICVER'] = (self.ticver, 'TESS Input Catalog version')
# Cosmic ray headers:
hdu.header['CRMITEN'] = (self.header['CRMITEN'], 'spacecraft cosmic ray mitigation enabled')
hdu.header['CRBLKSZ'] = (self.header['CRBLKSZ'], '[exposures] s/c cosmic ray mitigation block siz')
hdu.header['CRSPOC'] = (self.header['CRSPOC'], 'SPOC cosmic ray cleaning enabled')
# Add K2P2 Settings to the header of the file:
if self.additional_headers:
for key, value in self.additional_headers.items():
hdu.header[key] = value
# Add Data Validation header, which will be filled later on:
hdu.header['DATAVAL'] = (0, 'Data validation flags')
# Make binary table:
# Define table columns:
c1 = fits.Column(name='TIME', format='D', disp='D14.7', unit='BJD - 2457000, days', array=self.lightcurve['time'])
c2 = fits.Column(name='TIMECORR', format='E', disp='E13.6', unit='d', array=self.lightcurve['timecorr'])
c3 = fits.Column(name='CADENCENO', format='J', disp='I10', array=self.lightcurve['cadenceno'])
c4 = fits.Column(name='FLUX_RAW', format='D', disp='E26.17', unit='e-/s', array=self.lightcurve['flux'])
c5 = fits.Column(name='FLUX_RAW_ERR', format='D', disp='E26.17', unit='e-/s', array=self.lightcurve['flux_err'])
c6 = fits.Column(name='FLUX_BKG', format='D', disp='E26.17', unit='e-/s', array=self.lightcurve['flux_background'])
c7 = fits.Column(name='FLUX_CORR', format='D', disp='E26.17', unit='ppm', array=np.full_like(self.lightcurve['time'], np.nan))
c8 = fits.Column(name='FLUX_CORR_ERR', format='D', disp='E26.17', unit='ppm', array=np.full_like(self.lightcurve['time'], np.nan))
c9 = fits.Column(name='QUALITY', format='J', disp='B16.16', array=quality)
c10 = fits.Column(name='PIXEL_QUALITY', format='J', disp='B16.16', array=self.lightcurve['quality'])
c11 = fits.Column(name='MOM_CENTR1', format='D', disp='F10.5', unit='pixels', array=self.lightcurve['pos_centroid'][:, 0]) # column
c12 = fits.Column(name='MOM_CENTR2', format='D', disp='F10.5', unit='pixels', array=self.lightcurve['pos_centroid'][:, 1]) # row
c13 = fits.Column(name='POS_CORR1', format='D', disp='F14.7', unit='pixels', array=self.lightcurve['pos_corr'][:, 0]) # column
c14 = fits.Column(name='POS_CORR2', format='D', disp='F14.7', unit='pixels', array=self.lightcurve['pos_corr'][:, 1]) # row
tbhdu = fits.BinTableHDU.from_columns([c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14], name='LIGHTCURVE')
# Add proper comments on all the table headers:
tbhdu.header.comments['TTYPE1'] = 'column title: data time stamps'
tbhdu.header.comments['TFORM1'] = 'column format: 64-bit floating point'
tbhdu.header.comments['TUNIT1'] = 'column units: Barycenter corrected TESS Julian'
tbhdu.header.comments['TDISP1'] = 'column display format'
tbhdu.header.comments['TTYPE2'] = 'column title: barycenter - timeslice correction'
tbhdu.header.comments['TFORM2'] = 'column format: 32-bit floating point'
tbhdu.header.comments['TUNIT2'] = 'column units: day'
tbhdu.header.comments['TDISP2'] = 'column display format'
tbhdu.header.comments['TTYPE3'] = 'column title: unique cadence number'
tbhdu.header.comments['TFORM3'] = 'column format: signed 32-bit integer'
tbhdu.header.comments['TDISP3'] = 'column display format'
tbhdu.header.comments['TTYPE4'] = 'column title: photometric flux'
tbhdu.header.comments['TFORM4'] = 'column format: 64-bit floating point'
tbhdu.header.comments['TUNIT4'] = 'column units: electrons per second'
tbhdu.header.comments['TDISP4'] = 'column display format'
tbhdu.header.comments['TTYPE5'] = 'column title: photometric flux error'
tbhdu.header.comments['TFORM5'] = 'column format: 64-bit floating point'
tbhdu.header.comments['TUNIT5'] = 'column units: electrons per second'
tbhdu.header.comments['TDISP5'] = 'column display format'
tbhdu.header.comments['TTYPE6'] = 'column title: photometric background flux'
tbhdu.header.comments['TFORM6'] = 'column format: 64-bit floating point'
tbhdu.header.comments['TUNIT6'] = 'column units: electrons per second'
tbhdu.header.comments['TDISP6'] = 'column display format'
tbhdu.header.comments['TTYPE7'] = 'column title: corrected photometric flux'
tbhdu.header.comments['TFORM7'] = 'column format: 64-bit floating point'
tbhdu.header.comments['TUNIT7'] = 'column units: rel. flux in parts-per-million'
tbhdu.header.comments['TDISP7'] = 'column display format'
tbhdu.header.comments['TTYPE8'] = 'column title: corrected photometric flux error'
tbhdu.header.comments['TFORM8'] = 'column format: 64-bit floating point'
tbhdu.header.comments['TUNIT8'] = 'column units: parts-per-million'
tbhdu.header.comments['TDISP8'] = 'column display format'
tbhdu.header.comments['TTYPE9'] = 'column title: photometry quality flags'
tbhdu.header.comments['TFORM9'] = 'column format: signed 32-bit integer'
tbhdu.header.comments['TDISP9'] = 'column display format'
tbhdu.header.comments['TTYPE10'] = 'column title: pixel quality flags'
tbhdu.header.comments['TFORM10'] = 'column format: signed 32-bit integer'
tbhdu.header.comments['TDISP10'] = 'column display format'
tbhdu.header.comments['TTYPE11'] = 'column title: moment-derived column centroid'
tbhdu.header.comments['TFORM11'] = 'column format: 64-bit floating point'
tbhdu.header.comments['TUNIT11'] = 'column units: pixels'
tbhdu.header.comments['TDISP11'] = 'column display format'
tbhdu.header.comments['TTYPE12'] = 'column title: moment-derived row centroid'
tbhdu.header.comments['TFORM12'] = 'column format: 64-bit floating point'
tbhdu.header.comments['TUNIT12'] = 'column units: pixels'
tbhdu.header.comments['TDISP12'] = 'column display format'
tbhdu.header.comments['TTYPE13'] = 'column title: column position correction'
tbhdu.header.comments['TFORM13'] = 'column format: 64-bit floating point'
tbhdu.header.comments['TUNIT13'] = 'column units: pixels'
tbhdu.header.comments['TDISP13'] = 'column display format'
tbhdu.header.comments['TTYPE14'] = 'column title: row position correction'
tbhdu.header.comments['TFORM14'] = 'column format: 64-bit floating point'
tbhdu.header.comments['TUNIT14'] = 'column units: pixels'
tbhdu.header.comments['TDISP14'] = 'column display format'
tbhdu.header.set('INHERIT', True, 'inherit the primary header', after='TFIELDS')
# Timestamps of start and end of timeseries:
tdel = self.cadence/86400
tstart = self.lightcurve['time'][0] - tdel/2
tstop = self.lightcurve['time'][-1] + tdel/2
tstart_tm = Time(tstart, 2457000, format='jd', scale='tdb')
tstop_tm = Time(tstop, 2457000, format='jd', scale='tdb')
telapse = tstop - tstart
frametime = 2.0
int_time = 1.98
readtime = 0.02
if self.header['CRMITEN']:
crblocksize = self.header['CRBLKSZ']
deadc = (int_time * (crblocksize-2)/crblocksize) / frametime
else:
deadc = int_time / frametime
# Headers related to time to be added to LIGHTCURVE extension:
tbhdu.header['TIMEREF'] = ('SOLARSYSTEM', 'barycentric correction applied to times')
tbhdu.header['TIMESYS'] = ('TDB', 'time system is Barycentric Dynamical Time (TDB)')
tbhdu.header['BJDREFI'] = (2457000, 'integer part of BTJD reference date')
tbhdu.header['BJDREFF'] = (0.0, 'fraction of the day in BTJD reference date')
tbhdu.header['TIMEUNIT'] = ('d', 'time unit for TIME, TSTART and TSTOP')
tbhdu.header['TSTART'] = (tstart, 'observation start time in BTJD')
tbhdu.header['TSTOP'] = (tstop, 'observation stop time in BTJD')
tbhdu.header['DATE-OBS'] = (tstart_tm.utc.isot, 'TSTART as UTC calendar date')
tbhdu.header['DATE-END'] = (tstop_tm.utc.isot, 'TSTOP as UTC calendar date')
tbhdu.header['MJD-BEG'] = (tstart_tm.mjd, 'observation start time in MJD')
tbhdu.header['MJD-END'] = (tstop_tm.mjd, 'observation start time in MJD')
tbhdu.header['TELAPSE'] = (telapse, '[d] TSTOP - TSTART')
tbhdu.header['LIVETIME'] = (telapse*deadc, '[d] TELAPSE multiplied by DEADC')
tbhdu.header['DEADC'] = (deadc, 'deadtime correction')
tbhdu.header['EXPOSURE'] = (telapse*deadc, '[d] time on source')
tbhdu.header['XPOSURE'] = (frametime*deadc*self.num_frm, '[s] Duration of exposure')
tbhdu.header['TIMEPIXR'] = (0.5, 'bin time beginning=0 middle=0.5 end=1')
tbhdu.header['TIMEDEL'] = (tdel, '[d] time resolution of data')
tbhdu.header['INT_TIME'] = (int_time, '[s] photon accumulation time per frame')
tbhdu.header['READTIME'] = (readtime, '[s] readout time per frame')
tbhdu.header['FRAMETIM'] = (frametime, '[s] frame time (INT_TIME + READTIME)')
tbhdu.header['NUM_FRM'] = (self.num_frm, 'number of frames per time stamp')
tbhdu.header['NREADOUT'] = (self.n_readout, 'number of read per cadence')
# Make aperture image:
mask = self.aperture
if self.final_phot_mask is not None:
mask[self.final_phot_mask] |= 2
if self.final_position_mask is not None:
mask[self.final_position_mask] |= 8
# Construct FITS header for image extensions:
if self.datasource == 'ffi':
ir1, ir2, ic1, ic2 = self._stamp
else:
ir1 = self._stamp[0] - self._max_stamp[0]
ir2 = self._stamp[1] - self._max_stamp[0]
ic1 = self._stamp[2] - self._max_stamp[2]
ic2 = self._stamp[3] - self._max_stamp[2]
wcs = self.wcs[ir1:ir2, ic1:ic2]
header = wcs.to_header(relax=True)
header.set('INHERIT', True, 'inherit the primary header', before=0) # Add inherit header
# Create aperture image extension:
img_aperture = fits.ImageHDU(data=mask, header=header, name='APERTURE')
# Make sumimage image:
img_sumimage = fits.ImageHDU(data=SumImage, header=header, name="SUMIMAGE")
# List of the HDUs what will be put into the FITS file:
hdus = [hdu, tbhdu, img_sumimage, img_aperture]
# For Halo photometry, also add the weightmap to the FITS file:
if hasattr(self, 'halo_weightmap'):
# Create binary table to hold the list of weightmaps for halo photometry:
c1 = fits.Column(name='CADENCENO1', format='J', array=self.halo_weightmap['initial_cadence'])
c2 = fits.Column(name='CADENCENO2', format='J', array=self.halo_weightmap['final_cadence'])
c3 = fits.Column(name='SAT_PIXELS', format='J', array=self.halo_weightmap['sat_pixels'])
c4 = fits.Column(
name='WEIGHTMAP',
format='%dE' % np.prod(SumImage.shape),
dim='(%d,%d)' % (SumImage.shape[1], SumImage.shape[0]),
array=self.halo_weightmap['weightmap']
)
wm = fits.BinTableHDU.from_columns([c1, c2, c3, c4], header=header, name='WEIGHTMAP')
wm.header['TTYPE1'] = ('CADENCENO1', 'column title: first cadence number')
wm.header['TFORM1'] = ('J', 'column format: signed 32-bit integer')
wm.header['TDISP1'] = ('I10', 'column display format')
wm.header['TTYPE2'] = ('CADENCENO2', 'column title: last cadence number')
wm.header['TFORM2'] = ('J', 'column format: signed 32-bit integer')
wm.header['TDISP2'] = ('I10', 'column display format')
wm.header['TTYPE3'] = ('SAT_PIXELS', 'column title: Saturated pixels')
wm.header['TFORM3'] = ('J', 'column format: signed 32-bit integer')
wm.header['TDISP3'] = ('I10', 'column display format')
wm.header['TTYPE4'] = ('WEIGHTMAP', 'column title: Weightmap')
wm.header.comments['TFORM4'] = 'column format: image of 32-bit floating point'
wm.header['TDISP4'] = ('E14.7', 'column display format')
wm.header.comments['TDIM4'] = 'column dimensions: pixel aperture array'
# Add the new table to the list of HDUs:
hdus.append(wm)
# File name to save the lightcurve under:
filename = 'tess{starid:011d}-s{sector:03d}-{camera:d}-{ccd:d}-c{cadence:04d}-dr{datarel:02d}-v{version:02d}-tasoc_lc.fits.gz'.format(
starid=self.starid,
sector=self.sector,
camera=self.camera,
ccd=self.ccd,
cadence=self.cadence,
datarel=self.data_rel,
version=version
)
# Write to file:
filepath = os.path.join(output_folder, filename)
with fits.HDUList(hdus) as hdulist:
hdulist.writeto(filepath, checksum=True, overwrite=True)
# Store the output file in the details object for future reference:
if os.path.realpath(output_folder).startswith(os.path.realpath(self.input_folder)):
self._details['filepath_lightcurve'] = os.path.relpath(filepath, os.path.abspath(self.input_folder)).replace('\\', '/')
else:
self._details['filepath_lightcurve'] = os.path.relpath(filepath, self.output_folder_base).replace('\\', '/')
return filepath
|
tasoc/photometry
|
photometry/BasePhotometry.py
|
Python
|
gpl-3.0
| 72,934
|
[
"Gaussian"
] |
b2b39baa52c65a0f6850223347e0c0409f76e2dfb43443389d1c661e69428336
|
#!/usr/bin/python
#
# Counts the bases for each sequence in a FASTA file and spits them out along with the sequence record id, tab separated.
#
# Requires: BioPython
import sys
from Bio import SeqIO
def main():
if len(sys.argv)!=2:
print "countbases.py <fasta-file>"
sys.exit(0)
for seq_record in SeqIO.parse(sys.argv[1], "fasta"):
print seq_record.id+"\t"+str(len(seq_record))
if __name__ == '__main__':
main()
|
carnegie-dpb/biotools
|
countbases.py
|
Python
|
gpl-2.0
| 453
|
[
"Biopython"
] |
34345028e5d405dccba48f2e8e0ad1771b0640fc69ef4318273953a4dfacc3fa
|
##
# Copyright 2009-2018 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing TINKER, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import glob
import os
import re
import shutil
import sys
import tempfile
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import DARWIN, LINUX, get_os_type
class EB_TINKER(EasyBlock):
"""Support for building/installing TINKER."""
def __init__(self, *args, **kwargs):
"""Custom easyblock constructor for TINKER: initialise class variables."""
super(EB_TINKER, self).__init__(*args, **kwargs)
self.build_subdir = None
self.build_in_installdir = True
def configure_step(self):
"""Custom configuration procedure for TINKER."""
# make sure FFTW is available
if get_software_root('FFTW') is None:
raise EasyBuildError("FFTW dependency is not available.")
os_dirs = {
LINUX: 'linux',
DARWIN: 'macosx',
}
os_type = get_os_type()
os_dir = os_dirs.get(os_type)
if os_dir is None:
raise EasyBuildError("Failed to determine OS directory for %s (known: %s)", os_type, os_dirs)
comp_dirs = {
toolchain.INTELCOMP: 'intel',
toolchain.GCC: 'gfortran',
}
comp_fam = self.toolchain.comp_family()
comp_dir = comp_dirs.get(comp_fam)
if comp_dir is None:
raise EasyBuildError("Failed to determine compiler directory for %s (known: %s)", comp_fam, comp_dirs)
self.build_subdir = os.path.join(os_dir, comp_dir)
self.log.info("Using build scripts from %s subdirectory" % self.build_subdir)
# patch 'link.make' script to use FFTW provided via EasyBuild
link_make_fp = os.path.join(self.cfg['start_dir'], self.build_subdir, 'link.make')
for line in fileinput.input(link_make_fp, inplace=1, backup='.orig'):
line = re.sub(r"libfftw3_threads.a libfftw3.a", r"-L$EBROOTFFTW/lib -lfftw3_threads -lfftw3", line)
sys.stdout.write(line)
def build_step(self):
"""Custom build procedure for TINKER."""
source_dir = os.path.join(self.cfg['start_dir'], 'source')
try:
os.chdir(source_dir)
except OSError, err:
raise EasyBuildError("Failed to move to %s: %s", source_dir, err)
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'compile.make'))
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'library.make'))
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'link.make'))
def test_step(self):
"""Custom built-in test procedure for TINKER."""
if self.cfg['runtest']:
# copy tests, params and built binaries to temporary directory for testing
tmpdir = tempfile.mkdtemp()
testdir = os.path.join(tmpdir, 'test')
mkdir(os.path.join(tmpdir, 'bin'))
binaries = glob.glob(os.path.join(self.cfg['start_dir'], 'source', '*.x'))
try:
for binary in binaries:
shutil.copy2(binary, os.path.join(tmpdir, 'bin', os.path.basename(binary)[:-2]))
shutil.copytree(os.path.join(self.cfg['start_dir'], 'test'), testdir)
shutil.copytree(os.path.join(self.cfg['start_dir'], 'params'), os.path.join(tmpdir, 'params'))
except OSError, err:
raise EasyBuildError("Failed to copy binaries and tests to %s: %s", tmpdir, err)
try:
os.chdir(testdir)
except OSError, err:
raise EasyBuildError("Failed to move to %s to run tests: %s", testdir, err)
# run all tests via the provided 'run' scripts
tests = glob.glob(os.path.join(testdir, '*.run'))
# gpcr takes too logn (~1h), ifabp fails due to input issues (?)
tests = [t for t in tests if not (t.endswith('gpcr.run') or t.endswith('ifabp.run'))]
for test in tests:
run_cmd(test)
def install_step(self):
"""Custom install procedure for TINKER."""
source_dir = os.path.join(self.cfg['start_dir'], 'source')
try:
os.chdir(source_dir)
except OSError, err:
raise EasyBuildError("Failed to move to %s: %s", source_dir, err)
mkdir(os.path.join(self.cfg['start_dir'], 'bin'))
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'rename.make'))
def sanity_check_step(self):
"""Custom sanity check for TINKER."""
custom_paths = {
'files': ['tinker/source/libtinker.a'],
'dirs': ['tinker/bin'],
}
super(EB_TINKER, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom guesses for module file prepend-path statements."""
guesses = super(EB_TINKER, self).make_module_req_guess()
guesses['PATH'].append(os.path.join('tinker', 'bin'))
guesses['LIBRARY_PATH'].append(os.path.join('tinker', 'source'))
return guesses
|
bartoldeman/easybuild-easyblocks
|
easybuild/easyblocks/t/tinker.py
|
Python
|
gpl-2.0
| 6,443
|
[
"TINKER"
] |
e0c6627d20d0de69b9a13df787ceaf6d91ec04ac4b543e52c54846fe44a07b81
|
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Parts of the code are based on eventghost plugins code
# noinspection PyUnresolvedReferences
import eg
eg.RegisterPlugin(
name = "ORCA",
description = "Receives events from the Orca over Network.",
version = "1.0.1",
author = "miljbee, adjustments from thica",
canMultiLoad = True,
createMacrosOnAdd = True,
icon = (
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QAAAAAAAD5Q7t/"
"AAAACXBIWXMAAAsSAAALEgHS3X78AAAAB3RJTUUH1gIQFgQb1MiCRwAAAVVJREFUOMud"
"kjFLw2AQhp8vif0fUlPoIgVx6+AgopNI3fwBViiIoOAgFaugIDhUtP4BxWDs4CI4d3MR"
"cSyIQ1tDbcHWtjFI4tAWG5pE8ca7997vnrtP4BOZvW0dSBAcZ0pAMTEzPUs4GvMsVkvP"
"6HktGWRAOBpjIXVNKOSWWdYXN7lFAAINhBCEQgqxyTHAAQQAD/dFbLurUYJYT7P7TI2C"
"VavwIiZodyyaH6ZLo/RZVTXiOYVhGOh5jcpbq5eRAXAc5wdBVSPMLR16GtxdbgJgN95d"
"OxicACG6bPH4uIu1UHjE7sFqR/NDVxhaoixLvFYbtDufNFtu1tzxgdeAaZfBU7ECTvd1"
"WRlxsa4sp1ydkiRxkstmlEFRrWT4nrRer3vmlf6mb883fK8AoF1d+Bqc6Xkt+cufT6e3"
"dnb9DJJrq+uYpunZ2WcFfA0ol8v8N5Qgvr/EN8Lzfbs+L0goAAAAAElFTkSuQmCC"
),
)
# noinspection PyUnresolvedReferences
import wx
import asynchat
import asyncore
import socket
import win32api
import win32con
class Text:
"""
Helper class for receivin remote ghost commands
"""
def __init__(self):
pass
port = "TCP/IP Port:"
eventPrefix = "Event Prefix:"
tcpBox = "TCP/IP Settings"
eventGenerationBox = "Event generation"
DEBUG = False
if DEBUG:
log = eg.Print
else:
# noinspection PyUnusedLocal
def log(dummyMesg):
pass
class ServerHandler(asynchat.async_chat):
"""Telnet engine class. Implements command line user interface."""
# noinspection PyUnusedLocal
def __init__(self, sock, addr, plugin, server):
log("Server Handler inited")
self.plugin = plugin
# Call constructor of the parent class
asynchat.async_chat.__init__(self, sock)
# Set up input line terminator
self.set_terminator('\n')
# Initialize input data buffer
self.data = ''
self.state = self.state1
self.ip = addr[0]
self.aTreeMacros={}
self.aRawMacros={}
def handle_close(self):
"""
Closes the connection
"""
self.plugin.EndLastEvent()
asynchat.async_chat.handle_close(self)
def collect_incoming_data(self, data):
"""Put data read from socket to a buffer
"""
# Collect data in input buffer
log("<<" + repr(data))
self.data = self.data + data
if DEBUG:
def push(self, data):
log(">>" + repr(data))
asynchat.async_chat.push(self, data)
def found_terminator(self):
"""
This method is called by asynchronous engine when it finds
command terminator in the input stream
"""
# Take the complete line
line = self.data
# Reset input buffer
self.data = ''
#call state handler
self.state(line)
def initiate_close(self):
"""
Closes the connection
"""
self.state = self.state1
self.close()
def respond_ok(self):
"""
Give a OK response
"""
self.respond(u'RemoteGhost.OK')
def respond_error(self):
"""
Give an ERROR Response
"""
self.respond(u'RemoteGhost.ERROR')
def respond(self, sMsg):
"""
Give a the sMsg as a response
:param sMsg:
"""
try:
sMsg += "[EOL]"
#print u"ORCA:",type(sMsg),u':',sMsg
#print "ORCA:",eg.systemEncoding
#asynchat.async_chat.push(self,sMsg.encode(eg.systemEncoding))
asynchat.async_chat.push(self,sMsg.encode("utf-8","replace"))
#synchat.async_chat.push(self,sMsg.decode("utf-8","replace"))
# asynchat.async_chat.push(self,u"\xc3\x84\xc3\xa4")
#asynchat.async_chat.push(self,sMsg)
except Exception as inst:
print (u"ORCA:error send to Orca:"+str(inst))
pass
def state1(self, line):
if line.startswith('c'):
self.respond(u"RemoteGhost.Pong")
return
if line.startswith('e'):
self.plugin.TriggerEvent(line[1:].strip())
self.respond_ok()
#self.initiate_close()
return
if line.startswith('a'):
sRet=self.ExecuteMacro(line[1:].strip())
self.respond(u"RemoteGhost."+sRet)
return
if line.startswith('k'):
hwnds = eg.lastFoundWindows
if not hwnds:
hwnd = None
else:
hwnd = hwnds[0]
sCmd=line[1:]
print (u'ORCA: Sending Keystroke:'+sCmd)
eg.SendKeys(hwnd, sCmd, False)
self.respond_ok()
return
if line.startswith('m'):
sCmd=line[1:]
if sCmd=='{Mouse_Left_Click}':
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)
self.respond_ok()
return
if sCmd=='{Mouse_Right_Click}':
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN, 0, 0, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP, 0, 0, 0, 0)
self.respond_ok()
return
self.respond_error()
print (u'ORCA:Received invalid statement:'+line)
# noinspection PyUnusedLocal
def ProcessMsg(self, sMsg, icon):
self.respond(sMsg)
def ExecuteMacro(self,sMacro):
sRet="Command not found...:"+sMacro
if len(self.aTreeMacros)==0:
self.BrowseTree()
oMacro=self.aTreeMacros.get(sMacro)
if oMacro is None:
oMacro=self.aRawMacros.get(sMacro)
if oMacro is not None:
sRet= oMacro.Execute
if isinstance(oMacro, eg.MacroItem):
eg.programCounter = (oMacro, 0)
sRet=eg.RunProgram()
elif isinstance(oMacro, eg.ActionItem):
sRet=oMacro.Execute()
#def ExecuteTreeItem(obj, event):
if sRet is None:
sRet=''
if not isinstance(sRet, str):
print ("ORCA: ErrorExecute:",type(sRet))
sRet=str(sRet)
return sRet
return "Error"
def BrowseTree(self,oRoot=None, sContext=''):
filterClassesNodes=(eg.FolderItem, eg.MacroItem)
filterClassesTargets=(eg.MacroItem,eg.ActionItem)
def filterFuncNodes(oObj):
return isinstance(oObj, filterClassesNodes)
def filterFuncTargets(oObj):
return isinstance(oObj, filterClassesTargets)
if oRoot is None:
srcTree = eg.document.frame.treeCtrl
srcRoot = srcTree.GetRootItem()
obj = srcTree.GetPyData(srcRoot)
oRoot=obj
for child in oRoot.childs:
sText=child.GetLabel()
#print "ORCA:5:",type(child)
#print "ORCA:6:",child.GetLabel()
if filterFuncTargets(child):
sLabel=child.GetLabel()
if sContext=='':
sQlName=sLabel
else:
sQlName=sContext+"."+sLabel
#print "ORCA:6:",sQlName
self.aTreeMacros[sQlName]=child
self.aRawMacros[sLabel]=child
if filterFuncNodes(child):
self.BrowseTree(child,sContext+"."+sText)
class Server(asyncore.dispatcher):
def __init__ (self, port, handler):
self.handler = handler
self.oServerHandler = None
# Call parent class constructor explicitly
asyncore.dispatcher.__init__(self)
# Create socket of requested type
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# restart the asyncore loop, so it notices the new socket
eg.RestartAsyncore()
# Set it to re-use address
#self.set_reuse_addr()
# Bind to all interfaces of this host at specified port
self.bind(('', port))
# Start listening for incoming requests
#self.listen (1024)
self.listen(5)
def handle_accept (self):
"""Called by asyncore engine when new connection arrives"""
# Accept new connection
log("handle_accept")
#self.handler.TriggerEvent(u'ORCA Connecting...')
(sock, addr) = self.accept()
self.oServerHandler=ServerHandler(sock, addr, self.handler, self)
self.handler.logWrapper.AddLogListener(self.oServerHandler)
def SendMsg(self, sMsg):
if self.oServerHandler:
self.oServerHandler.respond(sMsg)
class NetworkReceiver(eg.PluginBase):
text = Text
def __init__(self):
pass
def __start__(self, port, prefix):
self.port = port
self.info.eventPrefix = prefix
self.logWrapper = LogWrapper(self)
self.server = Server(self.port, self)
def __stop__(self):
if self.server:
self.server.close()
self.server = None
self.logWrapper.StopAllLogListeners()
def Configure(self, port=1024, prefix="ORCA"):
text = self.text
panel = eg.ConfigPanel()
portCtrl = panel.SpinIntCtrl(port, max=65535)
eventPrefixCtrl = panel.TextCtrl(prefix)
st1 = panel.StaticText(text.port)
st3 = panel.StaticText(text.eventPrefix)
eg.EqualizeWidths((st1, st3))
box1 = panel.BoxedGroup(text.tcpBox, (st1, portCtrl))
box3 = panel.BoxedGroup(text.eventGenerationBox, (st3, eventPrefixCtrl))
panel.sizer.AddMany([
(box1, 0, wx.EXPAND),
(box3, 0, wx.EXPAND|wx.TOP, 10),
])
while panel.Affirmed():
panel.SetResult(portCtrl.GetValue(), eventPrefixCtrl.GetValue())
class LogWrapper:
# noinspection PyUnusedLocal
def __init__(self, oPlugin):
self.aLogListener = []
@eg.LogIt
def AddLogListener(self, oListener):
if len(self.aLogListener) == 0:
eg.log.AddLogListener(self)
if oListener not in self.aLogListener:
self.aLogListener.append(oListener)
# noinspection PyUnusedLocal
def WriteLine(self, sLine, oIcon, wRef, when, indent):
for oListener in self.aLogListener:
if not sLine.startswith("ORCA:"):
oListener.ProcessMsg(sLine, oIcon)
@eg.LogIt
def StopAllLogListeners(self):
if len(self.aLogListener) > 0:
eg.log.RemoveLogListener(self)
self.aLogListener = []
|
thica/ORCA-Remote
|
src/interfaces/remoteghost/Evenghost_Plugin/ORCA/__init__.py
|
Python
|
gpl-3.0
| 11,936
|
[
"ORCA"
] |
3ae4bf25ecd6723742c0be26a20d46c3445cd7db473144c10c328c240f2ed300
|
#!/usr/bin/env python
#TODO: the above hash bang sould work on any Unix/Linux system. Anyway, check
#your path to env or your Python interpreter
#PPM from RC transmitters Calibrate Version 2.0
#
#PPMRC - PPM interpreter, calibrator and remapper
#Copyright (C) 2017 Francesco Antonetti Lamorgese Passeri
#https://github.com/antlampas/ppmrc
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#PPMRC is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import serial
import time
#Global variables
licenseText = "\n\nPPMRC Copyright (C) 2017 Francesco Antonetti Lamorgese \
Passeri\nThis program comes with ABSOLUTELY NO WARRANTY; for details visit \
http://fsf.org/.\nThis is free software, and you are welcome to redistribute \
it under certain conditions; visit https://github.com/antlampas/ppmrc for \
details.\n\n"
serialIn = serial.Serial()
serialOut = serial.Serial()
outputType = ""
f = ""
userInterfaceType = ""
#End global variables
#Functions definition
#Remap function
def remap(x,in_min,in_max,out_min,out_max):
# Maps the x value from a range [in_min,in_max] to another [out_min,out_max]
# It is the same of map() function of Arduino's language
return float((x-in_min)*(out_max-out_min)/(in_max-in_min)+out_min) #Returns the remaped and converted to floating point value
#End remap
#Read values function
def readValues():
# Reads the values from the serial port.
# the input parameter is the serial port where read data and returns the
# values array, converted in floating point variables.
# The input comes in form of string of numbers separated by spaces
while True:
serialInput = serialIn.readline() #Reads the input string from serial port
channelsArray = serialInput.split() #Splits the string in an array
channelsValue = [] #Declares an array where put values converted in floating poing variables
good = 1 #This is needed to check if substrings are numbers or not
for i in channelsArray: #For each number in the input string
if i.isdigit(): #Checks if the variable is a number
channelsValue += [float(i)] #And, if it is, puts it in the array
else: #Otherwise
good = 0 #Report "bad input"
break #and stop putting values in the array
if good == 1: #If all values are correctly numbers
return channelsValue #Returns the array with the values converted in floating point and splited in an array
#End readValues
def minMax(ord,lowerBound,upperBound,tests,numberOfChannels,wait):
#ord: "minimums" find minimums; "maximums" find maximums
#arr: array with initial minimum and maximum values
#wait: give some time to move axes in required positions
boundaries = [] #Vector for the minimum and maximum input values
for i in range(numberOfChannels): #Initialize the vector to host the minimum and maximum values of each channel
boundaries.extend([float(lowerBound)]) #First n elements are for the minimum values of the n channels
for i in range(numberOfChannels): #The next n elemets (from n+1 to 2n) are for the maximum values for the n channels
boundaries.extend([float(upperBound)])
numberOfTests = range(1,tests)
if ord == "minimum": #Advise to position the axes
messageOutput(userInterfaceType,"Move all axes to minimum")
elif ord == "maximum":
messageOutput(userInterfaceType,"Move all axes to maximum")
time.sleep(wait) #Ignore all the garbage that Arduino sends at the begin
for i in numberOfTests: #Here begin the readings to find the minimum values
chVal = readValues() #Read the string from the serial port
if len(chVal) != numberOfChannels: #Check that the number of values meets the number of channels
i -= 1
continue
j = 0
for ch in chVal: #For each channel
if ord == "minimum": #If minimums chosen, search and store the minimums
leftSide = float(ch)
rightSide = lowerBound
index = j
elif ord == "maximum": #Else, if maximums chosen, search and store maximums
leftSide = upperBound
rightSide = float(ch)
index = j+numberOfChannels
if leftSide > rightSide: #Checks that the value isn't below the lower or above the upper bound
if ord == "minimum":
leftSide = float(ch)
rightSide = boundaries[index]
elif ord == "maximum":
leftSide = boundaries[index]
rightSide = float(ch)
if leftSide < rightSide: #If the input value is smaller than minimum value or bigger than maximum value in the array
boundaries[index] = leftSide #Overwrite the value
j += 1
return boundaries
#Calibrate function
def calibrate(tests,wait,lowerBound,upperBound,numberOfChannels):
# Calibrates the serial input to be included in a range correctly readable
# from FlightGear ([-1.0,1.0] for roll, pitch and yaw; [0,1.0] for throttle)
boundaries = \
minMax("minimum",lowerBound,upperBound,tests,numberOfChannels,wait) #Find and store minimums
boundaries = \
minMax("maximum",lowerBound,upperBound,tests,numberOfChannels,wait) #Find and store maximums
return boundaries
#End calibrate function
#User interface functions
#Output to user
def messageOutput(outputType,message): #Function needed unbind user interface from application logic
if outputType == "command line" or outputType == "Command Line" or \
outputType == "Command line" or outputType == "COMMAND LINE" or \
outputType == "CLI" or outputType == "cli":
print(message)
elif outputType == "Graphical" or outputType == "graphical" or \
outputType == "GRAPHICAL" or outputType == "GUI" or outputType == "gui":
#TODO: Write code related to the creation of the GUI and print the
#message on it
doSomething()
#End output to user
#Read the user input
def userInput(inputType,message): #Function needed unbind user interface from application logic
if inputType == "command line" or inputType == "Command Line" or \
inputType == "Command line" or inputType == "COMMAND LINE" or \
inputType == "CLI" or inputType == "cli":
return input(message)
elif inputType == "Graphical" or inputType == "graphical" or \
inputType == "GRAPHICAL" or inputType == "GUI" or inputType == "gui":
#TODO: Write code related to the creation of the GUI and print the
#message on it
doSomething()
#End read the user input
#End user interface functions
#Initialize function
def initialize():
global outputType
global f
global serialIn
global serialOut
global userInterfaceType
userInterfaceType = "cli"
messageOutput(userInterfaceType,licenseText)
serialInPort = userInput(userInterfaceType,"Input serial port: ")
serialInBaud = userInput(userInterfaceType,"Baudrate: ")
serialIn.port = serialInPort
serialIn.baudrate = serialInBaud
serialIn.timeout = 0
serialIn.open()
while True:
outputType = \
userInput(userInterfaceType, \
"Where sould i put the output data? (serial or file) ")
if outputType == "Serial" or outputType == "serial":
serialOutPort = userInput(userInterfaceType, \
"Output serial port: ")
serialOutBaud = userInput(userInterfaceType,"Baudrate: ")
serialOut.port = serialOutPort
serialOut.baudrate = serialOutBaud
serialOut.timeout = 0
serialOut.open()
break
elif outputType == "File" or outputType == "file":
fileName = userInput(userInterfaceType,"Full file path: ")
f = open(fileName,"w")
f.close()
f = open(fileName,"a")
break
else:
messageOutput(userInterfaceType,"I don't understand...")
global boundaries
boundaries = calibrate(500,3,1040,2000,channels)
#End initialize
#End functions definition
#Main program
boundaries = []
channels = 8
initialize()
while True:
s = readValues()
if len(s) != channels:
continue
roll = remap(s[0],boundaries[0],boundaries[8],-1.0,1.0)
pitch = remap(s[1],boundaries[1],boundaries[9],-1.0,1.0)
throttle = remap(s[2],boundaries[2],boundaries[10],0.0,1.0)
yaw = remap(s[3],boundaries[3],boundaries[11],-1.0,1.0)
o = '{:+1.2f}'.format(roll) + " " + '{:+1.2f}'.format(pitch) + " " + \
'{:+1.2f}'.format(throttle) + " " + '{:+1.2f}'.format(yaw) + '\n'
print(o)
if (outputType == "Serial" or outputType == "serial"):
serialOut.write(o.encode('ascii'))
elif (outputType == "File" or outputType == "file"):
f.write(o)
#End main program
|
antlampas/ppmrc
|
Example/FlightGear/Linux/ppmrc.py
|
Python
|
gpl-3.0
| 10,863
|
[
"VisIt"
] |
d3ad2154892f8abda675f8e33846fa06c69202157233363a546a2dd374749c81
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#
# Impressive, a fancy presentation tool
# Copyright (C) 2005-2014 Martin J. Fiedler <martin.fiedler@gmx.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__title__ = "Impressive"
__version__ = "0.11.0b"
__rev__ = 180
__author__ = "Martin J. Fiedler"
__email__ = "martin.fiedler@gmx.net"
__website__ = "http://impressive.sourceforge.net/"
import sys
if __rev__ and (("WIP" in __version__) or ("rc" in __version__) or ("alpha" in __version__) or ("beta" in __version__)):
__version__ += " (SVN r%s)" % __rev__
def greet():
print >>sys.stderr, "Welcome to", __title__, "version", __version__
if __name__ == "__main__":
greet()
TopLeft, BottomLeft, TopRight, BottomRight, TopCenter, BottomCenter = range(6)
NoCache, MemCache, CompressedCache, FileCache, PersistentCache = range(5) # for CacheMode
Off, First, Last = range(3) # for AutoOverview
# You may change the following lines to modify the default settings
Verbose = False
Fullscreen = True
FakeFullscreen = False
Scaling = False
Supersample = None
BackgroundRendering = True
PDFRendererPath = None
UseAutoScreenSize = True
ScreenWidth = 1024
ScreenHeight = 768
WindowPos = None
TransitionDuration = 1000
MouseHideDelay = 3000
BoxFadeDuration = 100
ZoomDuration = 250
BlankFadeDuration = 250
BoxFadeBlur = 1.5
BoxFadeDarkness = 0.25
BoxFadeDarknessStep = 0.05
MarkColor = (1.0, 0.0, 0.0, 0.1)
BoxEdgeSize = 4
SpotRadius = 64
MinSpotDetail = 13
SpotDetail = 12
CacheMode = FileCache
HighQualityOverview = True
OverviewBorder = 3
OverviewLogoBorder = 24
AutoOverview = Off
InitialPage = None
Wrap = False
AutoAdvance = None
AutoAutoAdvance = False
RenderToDirectory = None
Rotation = 0
DAR = None
PAR = 1.0
Overscan = 3
PollInterval = 0
PageRangeStart = 0
PageRangeEnd = 999999
FontSize = 14
FontTextureWidth = 512
FontTextureHeight = 256
Gamma = 1.0
BlackLevel = 0
GammaStep = 1.1
BlackLevelStep = 8
EstimatedDuration = None
PageProgress = False
AutoAdvanceProgress = False
ProgressBarSizeFactor = 0.02
ProgressBarAlpha = 0.5
ProgressBarColorNormal = (0.0, 1.0, 0.0)
ProgressBarColorWarning = (1.0, 1.0, 0.0)
ProgressBarColorCritical = (1.0, 0.0, 0.0)
ProgressBarColorPage = (0.0, 0.5, 1.0)
ProgressBarWarningFactor = 1.25
ProgressBarCriticalFactor = 1.5
CursorImage = None
CursorHotspot = (0, 0)
MinutesOnly = False
OSDMargin = 16
OSDAlpha = 1.0
OSDTimePos = TopRight
OSDTitlePos = BottomLeft
OSDPagePos = BottomRight
OSDStatusPos = TopLeft
ZoomFactor = 2
FadeInOut = False
ShowLogo = True
Shuffle = False
QuitAtEnd = False
ShowClock = False
HalfScreen = False
InvertPages = False
MinBoxSize = 20
UseBlurShader = True
TimeTracking = False
EventTestMode = False
# import basic modules
import random, getopt, os, types, re, codecs, tempfile, glob, cStringIO, re
import traceback, subprocess, time, itertools, ctypes.util, zlib
from math import *
from ctypes import *
# import hashlib for MD5 generation, but fall back to old md5 lib if unavailable
# (this is the case for Python versions older than 2.5)
try:
import hashlib
md5obj = hashlib.md5
except ImportError:
import md5
md5obj = md5.new
# initialize some platform-specific settings
if os.name == "nt":
root = os.path.split(sys.argv[0])[0] or "."
_find_paths = [root, os.path.join(root, "win32"), os.path.join(root, "gs")] + filter(None, os.getenv("PATH").split(';'))
def FindBinary(binary):
if not binary.lower().endswith(".exe"):
binary += ".exe"
for p in _find_paths:
path = os.path.join(p, binary)
if os.path.isfile(path):
return path
return binary # fall-back if not found
pdftkPath = FindBinary("pdftk.exe")
GhostScriptPlatformOptions = ["-I" + os.path.join(root, "gs")]
try:
import win32api
HaveWin32API = True
MPlayerPath = FindBinary("mplayer.exe")
def RunURL(url):
win32api.ShellExecute(0, "open", url, "", "", 0)
except ImportError:
HaveWin32API = False
MPlayerPath = ""
def RunURL(url): print "Error: cannot run URL `%s'" % url
MPlayerPlatformOptions = [ "-colorkey", "0x000000" ]
MPlayerColorKey = True
if getattr(sys, "frozen", False):
sys.path.append(root)
FontPath = []
FontList = ["Verdana.ttf", "Arial.ttf"]
Nice = []
else:
def FindBinary(x): return x
GhostScriptPlatformOptions = []
MPlayerPath = "mplayer"
MPlayerPlatformOptions = [ "-vo", "gl" ]
MPlayerColorKey = False
pdftkPath = "pdftk"
FontPath = ["/usr/share/fonts", "/usr/local/share/fonts", "/usr/X11R6/lib/X11/fonts/TTF"]
FontList = ["DejaVuSans.ttf", "Vera.ttf", "Verdana.ttf"]
Nice = ["nice", "-n", "7"]
def RunURL(url):
try:
subprocess.Popen(["xdg-open", url])
except OSError:
print >>sys.stderr, "Error: cannot open URL `%s'" % url
# import special modules
try:
import pygame
from pygame.locals import *
from PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageChops, ImageOps
from PIL import TiffImagePlugin, BmpImagePlugin, JpegImagePlugin, PngImagePlugin, PpmImagePlugin
except (ValueError, ImportError), err:
print >>sys.stderr, "Oops! Cannot load necessary modules:", err
print >>sys.stderr, """To use Impressive, you need to install the following Python modules:
- PyGame [python-pygame] http://www.pygame.org/
- PIL [python-imaging] http://www.pythonware.com/products/pil/
or Pillow http://pypi.python.org/pypi/Pillow/
- PyWin32 (OPTIONAL, Win32) http://sourceforge.net/projects/pywin32/
Additionally, please be sure to have pdftoppm or GhostScript installed if you
intend to use PDF input."""
sys.exit(1)
try:
import thread
HaveThreads = True
def create_lock(): return thread.allocate_lock()
def get_thread_id(): return thread.get_ident()
except ImportError:
HaveThreads = False
class pseudolock:
def __init__(self): self.state = False
def acquire(self, dummy=0): self.state = True
def release(self): self.state = False
def locked(self): return self.state
def create_lock(): return pseudolock()
def get_thread_id(): return 0xDEADC0DE
CleanExit = False
##### GLOBAL VARIABLES #########################################################
# initialize private variables
DocumentTitle = None
FileName = ""
FileList = []
InfoScriptPath = None
AvailableRenderers = []
PDFRenderer = None
BaseWorkingDir = '.'
Marking = False
Tracing = False
Panning = False
FileProps = {}
PageProps = {}
PageCache = {}
CacheFile = None
CacheFileName = None
CacheFilePos = 0
CacheMagic = ""
MPlayerProcess = None
VideoPlaying = False
MarkValid, MarkBaseX, MarkBaseY = False, 0, 0
PanValid, PanBaseX, PanBaseY = False, 0, 0
MarkUL = (0, 0)
MarkLR = (0, 0)
ZoomX0 = 0.0
ZoomY0 = 0.0
ZoomArea = 1.0
ZoomMode = False
IsZoomed = False
HighResZoomFailed = False
TransitionRunning = False
TransitionPhase = 0.0
CurrentCaption = 0
OverviewNeedUpdate = False
FileStats = None
OSDFont = None
CurrentOSDCaption = ""
CurrentOSDPage = ""
CurrentOSDStatus = ""
CurrentOSDComment = ""
Lrender = create_lock()
Lcache = create_lock()
Loverview = create_lock()
RTrunning = False
RTrestart = False
StartTime = 0
CurrentTime = 0
PageEnterTime = 0
PageLeaveTime = 0
PageTimeout = 0
TimeDisplay = False
FirstPage = True
ProgressBarPos = 0
CursorVisible = True
OverviewMode = False
LastPage = 0
WantStatus = False
GLVendor = ""
GLRenderer = ""
GLVersion = ""
RequiredShaders = []
DefaultScreenTransform = (-1.0, 1.0, 2.0, -2.0)
ScreenTransform = DefaultScreenTransform
SpotVertices = None
SpotIndices = None
# tool constants (used in info scripts)
FirstTimeOnly = 2
##### PLATFORM-SPECIFIC PYGAME INTERFACE CODE ##################################
class Platform_PyGame(object):
name = 'pygame'
allow_custom_fullscreen_res = True
has_hardware_cursor = True
_buttons = { 1: "lmb", 2: "mmb", 3: "rmb", 4: "wheelup", 5: "wheeldown" }
_keys = dict((getattr(pygame.locals, k), k[2:].lower()) for k in [k for k in dir(pygame.locals) if k.startswith('K_')])
def __init__(self):
self.next_event = None
self.schedule_map_ev2flag = {}
self.schedule_map_ev2name = {}
self.schedule_map_name2ev = {}
self.schedule_max = USEREVENT
def Init(self):
pygame.display.init()
def GetTicks(self):
return pygame.time.get_ticks()
def GetScreenSize(self):
return pygame.display.list_modes()[0]
def StartDisplay(self):
global ScreenWidth, ScreenHeight, Fullscreen, FakeFullscreen, WindowPos
pygame.display.set_caption(__title__)
flags = OPENGL | DOUBLEBUF
if Fullscreen:
if FakeFullscreen:
print >>sys.stderr, "Using \"fake-fullscreen\" mode."
flags |= NOFRAME
if not WindowPos:
WindowPos = (0,0)
else:
flags |= FULLSCREEN
if WindowPos:
os.environ["SDL_VIDEO_WINDOW_POS"] = ','.join(map(str, WindowPos))
pygame.display.set_mode((ScreenWidth, ScreenHeight), flags)
pygame.key.set_repeat(500, 30)
def LoadOpenGL(self):
try:
sdl = CDLL(ctypes.util.find_library("SDL") or ctypes.util.find_library("SDL-1.2") or "SDL", RTLD_GLOBAL)
get_proc_address = CFUNCTYPE(c_void_p, c_char_p)(('SDL_GL_GetProcAddress', sdl))
except OSError:
raise ImportError("failed to load the SDL library")
except AttributeError:
raise ImportError("failed to load SDL_GL_GetProcAddress from the SDL library")
def loadsym(name, prototype):
try:
addr = get_proc_address(name)
except EnvironmentError:
return None
if not addr:
return None
return prototype(addr)
return OpenGL(loadsym, desktop=True)
def SwapBuffers(self):
pygame.display.flip()
def Done(self):
pygame.display.quit()
def Quit(self):
pygame.quit()
def SetWindowTitle(self, text):
pygame.display.set_caption(text, __title__)
def GetWindowID(self):
return pygame.display.get_wm_info()['window']
def GetMousePos(self):
return pygame.mouse.get_pos()
def SetMousePos(self, coords):
pygame.mouse.set_pos(coords)
def SetMouseVisible(self, visible):
pygame.mouse.set_visible(visible)
def _translate_mods(self, key, mods):
if mods & KMOD_SHIFT:
key = "shift+" + key
if mods & KMOD_ALT:
key = "alt+" + key
if mods & KMOD_CTRL:
key = "ctrl+" + key
return key
def _translate_button(self, ev):
try:
return self._translate_mods(self._buttons[ev.button], pygame.key.get_mods())
except KeyError:
return 'unknown-button-' + str(ev.button)
def _translate_key(self, ev):
try:
return self._translate_mods(self._keys[ev.key], ev.mod)
except KeyError:
return 'unknown-key-' + str(ev.key)
def GetEvent(self, poll=False):
if self.next_event:
ev = self.next_event
self.next_event = None
return ev
if poll:
ev = pygame.event.poll()
else:
ev = pygame.event.wait()
if ev.type == NOEVENT:
return None
elif ev.type == QUIT:
return "$quit"
elif ev.type == VIDEOEXPOSE:
return "$expose"
elif ev.type == MOUSEBUTTONDOWN:
return '+' + self._translate_button(ev)
elif ev.type == MOUSEBUTTONUP:
ev = self._translate_button(ev)
self.next_event = '-' + ev
return '*' + ev
elif ev.type == MOUSEMOTION:
pygame.event.clear(MOUSEMOTION)
return "$move"
elif ev.type == KEYDOWN:
if ev.mod & KMOD_ALT:
if ev.key == K_F4:
return self.PostQuitEvent()
elif ev.key == K_TAB:
return "$alt-tab"
ev = self._translate_key(ev)
self.next_event = '*' + ev
return '+' + ev
elif ev.type == KEYUP:
return '-' + self._translate_key(ev)
elif (ev.type >= USEREVENT) and (ev.type < self.schedule_max):
if not(self.schedule_map_ev2flag.get(ev.type)):
pygame.time.set_timer(ev.type, 0)
return self.schedule_map_ev2name.get(ev.type)
else:
return "$?"
def CheckAnimationCancelEvent(self):
return bool(pygame.event.get([KEYDOWN, MOUSEBUTTONUP]))
def ScheduleEvent(self, name, msec=0, periodic=False):
try:
ev_code = self.schedule_map_name2ev[name]
except KeyError:
ev_code = self.schedule_max
self.schedule_map_name2ev[name] = ev_code
self.schedule_map_ev2name[ev_code] = name
self.schedule_max += 1
self.schedule_map_ev2flag[ev_code] = periodic
pygame.time.set_timer(ev_code, msec)
def PostQuitEvent(self):
pygame.event.post(pygame.event.Event(QUIT))
def ToggleFullscreen(self):
return pygame.display.toggle_fullscreen()
def Minimize(self):
pygame.display.iconify()
def SetGammaRamp(self, gamma, black_level):
scale = 1.0 / (255 - black_level)
power = 1.0 / gamma
ramp = [int(65535.0 * ((max(0, x - black_level) * scale) ** power)) for x in range(256)]
return pygame.display.set_gamma_ramp(ramp, ramp, ramp)
class Platform_Win32(Platform_PyGame):
name = 'pygame-win32'
def GetScreenSize(self):
if HaveWin32API:
dm = win32api.EnumDisplaySettings(None, -1) #ENUM_CURRENT_SETTINGS
return (int(dm.PelsWidth), int(dm.PelsHeight))
return Platform_PyGame.GetScreenSize(self)
def LoadOpenGL(self):
try:
opengl32 = WinDLL("opengl32")
get_proc_address = WINFUNCTYPE(c_void_p, c_char_p)(('wglGetProcAddress', opengl32))
except OSError:
raise ImportError("failed to load the OpenGL library")
except AttributeError:
raise ImportError("failed to load wglGetProcAddress from the OpenGL library")
def loadsym(name, prototype):
# try to load OpenGL 1.1 function from opengl32.dll first
try:
return prototype((name, opengl32))
except AttributeError:
pass
# if that fails, load the extension function via wglGetProcAddress
try:
addr = get_proc_address(name)
except EnvironmentError:
addr = None
if not addr:
return None
return prototype(addr)
return OpenGL(loadsym, desktop=True)
class Platform_Unix(Platform_PyGame):
name = 'pygame-unix'
def GetScreenSize(self):
re_res = re.compile(r'\s*(\d+)x(\d+)\s+\d+\.\d+\*')
res = None
try:
xrandr = subprocess.Popen(["xrandr"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in xrandr.stdout:
m = re_res.match(line)
if m:
res = tuple(map(int, m.groups()))
xrandr.wait()
except OSError:
pass
if res:
return res
return Platform_PyGame.GetScreenSize(self)
class Platform_EGL(Platform_Unix):
name = 'egl'
def StartDisplay(self, display=None, window=None, width=None, height=None):
global ScreenWidth, ScreenHeight
width = width or ScreenWidth
height = height or ScreenHeight
# load the GLESv2 library before the EGL library (required on the BCM2835)
try:
self.gles = ctypes.CDLL(ctypes.util.find_library("GLESv2"))
except OSError:
raise ImportError("failed to load the OpenGL ES 2.0 library")
# import all functions first
try:
egl = CDLL(ctypes.util.find_library("EGL"))
def loadfunc(func, ret, *args):
return CFUNCTYPE(ret, *args)((func, egl))
eglGetDisplay = loadfunc("eglGetDisplay", c_void_p, c_void_p)
eglInitialize = loadfunc("eglInitialize", c_uint, c_void_p, POINTER(c_int), POINTER(c_int))
eglChooseConfig = loadfunc("eglChooseConfig", c_uint, c_void_p, c_void_p, POINTER(c_void_p), c_int, POINTER(c_int))
eglCreateWindowSurface = loadfunc("eglCreateWindowSurface", c_void_p, c_void_p, c_void_p, c_void_p, c_void_p)
eglCreateContext = loadfunc("eglCreateContext", c_void_p, c_void_p, c_void_p, c_void_p, c_void_p)
eglMakeCurrent = loadfunc("eglMakeCurrent", c_uint, c_void_p, c_void_p, c_void_p, c_void_p)
self.eglSwapBuffers = loadfunc("eglSwapBuffers", c_int, c_void_p, c_void_p)
except OSError:
raise ImportError("failed to load the EGL library")
except AttributeError:
raise ImportError("failed to load required symbols from the EGL library")
# prepare parameters
config_attribs = [
0x3024, 8, # EGL_RED_SIZE >= 8
0x3023, 8, # EGL_GREEN_SIZE >= 8
0x3022, 8, # EGL_BLUE_SIZE >= 8
0x3021, 0, # EGL_ALPHA_SIZE >= 0
0x3025, 0, # EGL_DEPTH_SIZE >= 0
0x3040, 0x0004, # EGL_RENDERABLE_TYPE = EGL_OPENGL_ES2_BIT
0x3033, 0x0004, # EGL_SURFACE_TYPE = EGL_WINDOW_BIT
0x3038 # EGL_NONE
]
context_attribs = [
0x3098, 2, # EGL_CONTEXT_CLIENT_VERSION = 2
0x3038 # EGL_NONE
]
config_attribs = (c_int * len(config_attribs))(*config_attribs)
context_attribs = (c_int * len(context_attribs))(*context_attribs)
# perform actual initialization
eglMakeCurrent(None, None, None, None)
self.egl_display = eglGetDisplay(display)
if not self.egl_display:
raise RuntimeError("could not get EGL display")
if not eglInitialize(self.egl_display, None, None):
raise RuntimeError("could not initialize EGL")
config = c_void_p()
num_configs = c_int(0)
if not eglChooseConfig(self.egl_display, config_attribs, byref(config), 1, byref(num_configs)):
raise RuntimeError("failed to get a framebuffer configuration")
if not num_configs.value:
raise RuntimeError("no suitable framebuffer configuration found")
self.egl_surface = eglCreateWindowSurface(self.egl_display, config, window, None)
if not self.egl_surface:
raise RuntimeError("could not create EGL surface")
context = eglCreateContext(self.egl_display, config, None, context_attribs)
if not context:
raise RuntimeError("could not create OpenGL ES rendering context")
if not eglMakeCurrent(self.egl_display, self.egl_surface, self.egl_surface, context):
raise RuntimeError("could not activate OpenGL ES rendering context")
def LoadOpenGL(self):
def loadsym(name, prototype):
return prototype((name, self.gles))
return OpenGL(loadsym, desktop=False)
def SwapBuffers(self):
self.eglSwapBuffers(self.egl_display, self.egl_surface)
class Platform_BCM2835(Platform_EGL):
name = 'bcm2835'
allow_custom_fullscreen_res = False
has_hardware_cursor = False
DISPLAY_ID = 0
def __init__(self, libbcm_host):
Platform_EGL.__init__(self)
self.libbcm_host_path = libbcm_host
def Init(self):
try:
self.bcm_host = CDLL(self.libbcm_host_path)
def loadfunc(func, ret, *args):
return CFUNCTYPE(ret, *args)((func, self.bcm_host))
bcm_host_init = loadfunc("bcm_host_init", None)
graphics_get_display_size = loadfunc("graphics_get_display_size", c_int32, c_uint16, POINTER(c_uint32), POINTER(c_uint32))
except OSError:
raise ImportError("failed to load the bcm_host library")
except AttributeError:
raise ImportError("failed to load required symbols from the bcm_host library")
bcm_host_init()
x, y = c_uint32(0), c_uint32(0)
if graphics_get_display_size(self.DISPLAY_ID, byref(x), byref(y)) < 0:
raise RuntimeError("could not determine display size")
self.screen_size = (int(x.value), int(y.value))
def GetScreenSize(self):
return self.screen_size
def StartDisplay(self):
global ScreenWidth, ScreenHeight, Fullscreen, FakeFullscreen, WindowPos
class VC_DISPMANX_ALPHA_T(Structure):
_fields_ = [("flags", c_int), ("opacity", c_uint32), ("mask", c_void_p)]
class EGL_DISPMANX_WINDOW_T(Structure):
_fields_ = [("element", c_uint32), ("width", c_int), ("height", c_int)]
# first, import everything
try:
def loadfunc(func, ret, *args):
return CFUNCTYPE(ret, *args)((func, self.bcm_host))
vc_dispmanx_display_open = loadfunc("vc_dispmanx_display_open", c_uint32, c_uint32)
vc_dispmanx_update_start = loadfunc("vc_dispmanx_update_start", c_uint32, c_int32)
vc_dispmanx_element_add = loadfunc("vc_dispmanx_element_add", c_int32,
c_uint32, c_uint32, c_int32, # update, display, layer
c_void_p, c_uint32, c_void_p, c_uint32, # dest_rect, src, drc_rect, protection
POINTER(VC_DISPMANX_ALPHA_T), # alpha
c_void_p, c_uint32) # clamp, transform
vc_dispmanx_update_submit_sync = loadfunc("vc_dispmanx_update_submit_sync", c_int, c_uint32)
except AttributeError:
raise ImportError("failed to load required symbols from the bcm_host library")
# sanitize arguments
width = min(ScreenWidth, self.screen_size[0])
height = min(ScreenHeight, self.screen_size[1])
if WindowPos:
x0, y0 = WindowPos
else:
x0 = (self.screen_size[0] - width) / 2
y0 = (self.screen_size[1] - height) / 2
x0 = max(min(x0, self.screen_size[0] - width), 0)
y0 = max(min(y0, self.screen_size[1] - height), 0)
# prepare arguments
dst_rect = (c_int32 * 4)(x0, y0, width, height)
src_rect = (c_int32 * 4)(0, 0, width << 16, height << 16)
alpha = VC_DISPMANX_ALPHA_T(1, 255, None) # DISPMANX_FLAGS_ALPHA_FIXED_ALL_PIXELS
# perform initialization
display = vc_dispmanx_display_open(self.DISPLAY_ID)
update = vc_dispmanx_update_start(0)
layer = vc_dispmanx_element_add(update, display, 0, byref(dst_rect), 0, byref(src_rect), 0, byref(alpha), None, 0)
vc_dispmanx_update_submit_sync(update)
self.window = EGL_DISPMANX_WINDOW_T(layer, width, height)
Platform_EGL.StartDisplay(self, None, byref(self.window), width, height)
# finally, tell PyGame what just happened
pygame.display.set_mode((width, height), 0)
pygame.mouse.set_pos((width / 2, height / 2))
libbcm_host = ctypes.util.find_library("bcm_host")
if libbcm_host:
Platform = Platform_BCM2835(libbcm_host)
elif os.name == "nt":
Platform = Platform_Win32()
else:
Platform = Platform_Unix()
##### TOOL CODE ################################################################
# read and write the PageProps and FileProps meta-dictionaries
def GetProp(prop_dict, key, prop, default=None):
if not key in prop_dict: return default
if type(prop) == types.StringType:
return prop_dict[key].get(prop, default)
for subprop in prop:
try:
return prop_dict[key][subprop]
except KeyError:
pass
return default
def SetProp(prop_dict, key, prop, value):
if not key in prop_dict:
prop_dict[key] = {prop: value}
else:
prop_dict[key][prop] = value
def DelProp(prop_dict, key, prop):
try:
del prop_dict[key][prop]
except KeyError:
pass
def GetPageProp(page, prop, default=None):
global PageProps
return GetProp(PageProps, page, prop, default)
def SetPageProp(page, prop, value):
global PageProps
SetProp(PageProps, page, prop, value)
def DelPageProp(page, prop):
global PageProps
DelProp(PageProps, page, prop)
def GetTristatePageProp(page, prop, default=0):
res = GetPageProp(page, prop, default)
if res != FirstTimeOnly: return res
return (GetPageProp(page, '_shown', 0) == 1)
def GetFileProp(page, prop, default=None):
global FileProps
return GetProp(FileProps, page, prop, default)
def SetFileProp(page, prop, value):
global FileProps
SetProp(FileProps, page, prop, value)
# the Impressive logo (256x64 pixels grayscale PNG)
LOGO = """iVBORw0KGgoAAAANSUhEUgAAAQAAAABACAAAAADQNvZiAAAL8ElEQVR4Xu2Ze1hVVfrHv+cc7siAEiF4AW1QEkmD8pJUWlkaaSWWk9pk5ZT5szKvPydvoVhqKuWY9jhkmjZpmZmO9wwzLwhiCImAeEFEkJtyk/se17tZ66yz9zlp+IcPD3z++Z79ujxrne963/XupWjytNCCy5QtuXm/vueAxmBAk8dnWyhpWkhFszTA7VR7qMy
ajz+PEUS/RXO7omnyDP/9eBKNNuCdg1Pn/PYUmiQR4HRutAEeiwyA0yo0RVwGg1PYaAO6OQKAfys0Qbq6gHO60QacVQCgoAxNkPa4PQPsmOQumQIoU9BI5gYCyHy/CRuAqb8Pq4jZi0byakcA36MpG4Avv0SjcaQ1ZNxxA5S0xnWB26YTfccZ3Bl8wMmquEMG/BV3MgPcwTmJZmnAX8D55U4ZcA+T8hwArd3xJ3H0gnU8nGENVzfbGRCLW8Xe2
2BpQN/+NwgE0ZV9DgMRPGHp11Gj3SGwD5+8KubtMKM+AwrHLNmdU3S1Mml2F+0K+zPaAHAY/fH6mY+D4/X2ocLKK3nb5z4CS3quPphXXJaxZf6TkPH75KeLpSUXdix+wWQtA0pOMAljk3WChAvN30GMf3Xflarcor0LnobAWKncYAmIbexzOgDD6CMKkTOczzX1okLs84FEhmJB3edekImgaAjw6Dn24Te+rsU1CifaHmY8V9YpnKNmC5znVoh
w2kixBSYR/C8Yx9nDRkjMoEXdC8JuernC+aYVz4AOjtIxHsAkDfDf91UfED7fqg4MOL2oPYjHk7pBYOevKao3knvoj4h0dP1BHtgneYodOO8eaA+O76lxRnB67z74CAjnuDnO4HTZkCw2RVMBR+ivwYzbFCbfpKrpHf+RCzgj4oPIAFqiMMDUSTXgheTHIFh5N2CKlPbdaykEHe2gwTu2j9aAnDLP7R4wE7a3MyT6Jt4NFcOX9EkQ9imIRcGQ6
bbexhFwmIrFG4J3WfHVRarG/dwTEoFxQXoDOjowOT2W8iN71yUw7hoL47pZRqA2eUcOGE8NEhs+h+RE9Ai/Li8uOAWGxxZvjQFp9puZcvrupPSr3LXwn5tyyNF5UHlnIIjCUsgMmgCipNhWEyhNFBkgp4D7JCZfp9ELy37awrr90dO+OktH6lIQi1lFVJvAGKgwNrPIpgcNMMyl51h8dkOuR3sDppUUWcsL4GuF8Afh+HE9Pe6BgM6NlTEsys8
Ad4opv3alHN3CwrXBIBJp0L86whQ6cXO5ODPUWTYGwhD05vqCG+FKqDysNLADKrksEAXOHPpyMt8ujgam9KJGoP4M9SSkFaSDGM8XWt3geTw9LGMjAsBwukKLh8oqhagSdftYJQXC+bMTOXLhRihz6aB2Izf8BGAtDdlpBGHYw572qn5Wyuvv+D034HfaEai0/qxOGBDODZgGFbJzn+imV9njGu4FM5T319XsKZXqN1lycJmicomX8VQ+w0FPq
KxngVwQwxWV0xBEKbJBCOKOnhTlOoAC59uIA5Ge6VztTh99wRl8hgxwqmXhx8B54Bg3YCQ3gGf9NBa4xvcjkj3V0HnThbrO1XvA3a2iFDACBoqdkc9sFA08yjMYKhufKIRKFhNvmqLDauzN0NwEFmQz6ecHiy/ExcHX0MBkkneK+PPRFCbUqLzB6ATOzu6LmXiaLMMJfd7SdIGy41A5QtFAEG3eZbL2LM1Hmz07U1wd9tCsRsDXWdsFURF+Cg1
Ug9g9qopHFCbl9QDwgcf+59ppDCifR9LN0oDiQZfQQAAVXuZ2CGhRXcxGTjKAU7mBSQ7dcyY4glO/RtMFfq3l3tRIjXAy86dmPg18hQ7RNdpZjXyJmVIXrIng+8/35PSIOnDoFxeRW3//ZYiHi8YAxFszYKRwFC8bmCyvh+A89WjaFuoJw7a1hgXKMSY9D/nbvAoc4IHrSWYDPN9msoa+PoL6zhel2lntrHXB2bsgaEsy4hoE5BEt9M2T4RUPQ
GtAhhUDtkjfOIAkOhoS3ABlRRST8OPDEyGzvD+T0MTRO2xcBWLBOcJW1AeMqW4AqqPUdgHGxInaWXkG1J+TKiBOe9W5nqy9/WVQAT1XJtnHKcvRGVA1GQLnXrBKa5JVF1WTD42FzNZ4dcz2eUarGVCeAMiHQHcXAF7UyGKyJAP0s3IDsqjWNT9HRDIVCFx9xZAxWQ121J6HxCXpxHLoyOTzcxD0cIBVikmKnikldVq9xhlm6oZmkRpm7vaylgG
Hai0NMLE0mObKvF8Ahsc9NmalEtCcgZXZ+v0mtB7lg9tXC+2IYvmfixJgxoskpxQakkGcfGGzK8jdkOHStLnhe3zAeOLEiEP6DIiVSvsyG9j7F3iPp3afLc2aXwQNmdyATMmAs4qUIp62DSCEfYJ2lMy5mtECT5LXd8EGu3tvoVXgvoRRUqdICf22n/r1sRNXQOCuMwBHhqltYLoLgMoP5Vlnr4IWI9q2kl8D9BWgNSCAR2wZEEySK48+o6v1P
Njk9we3gfjLt31h5vKAFSDslr8EQcS9xDEQ8oWw7TgqvpybzGqnvwvq91sfKea55O2mM6A7yTFpdEk+zBSQFME21579YCa1Sqetvc9BUDPh+CpqUoY1WaIK+J9rDWjvO90ZwPWPbjarUdsFb54BmgrQGTCYZLetBEnnLxO2UWa/WA6G1yLIrOmfS+q40sBDvkNeDjLBguM1TIa9QRf5XM2stgxQztpIWIqU52gjGbYNiHiMSfYpqwYIMwPxh3z
X7zzpsC4gRI9PIA1+GoT/vks/rku5OBQylSeYLHQCULFQZFU+zWrTgMsVGgNslrirjz4D6s9C4LqMJAaEnZ/OgKKiWzAASQ/G0fKGwoJLD28mfR6MvsmPM/HZGqWvARcAWHFF8t2mAdozsDrrFrugeMyugmBmB6r6aBD+drzFaGpgoBFWcIOgYA5JoCZcOUURYee1raAy4xGtAUT5Ys2sYa42DZDS+1w9BO5eVpuA7S7YbxLJp1d1dglSmPQcC
ws69GDyQ6QDOPuoUdCKl8S4g3P+kAi/FsCDhiirBizP18zq8z4s8HwIxrvcb7UL6iN6A8L3OlAn+xC2DVhNsqANzDjNOn0X09BZieJFuc4o/runx2unhkAgwr0gCDWBQzcqovRjmFlfzWRyAMyYxqcHwWjRBTvfvAuS69cKuIUesgGey39wppkjKmQDKnIgc+wQjd0fBM7zqZEuaQD83BF0eLEziOGUfL8BMHaH748bPEGE9OZh3AuBsx8kDoP
4tBBm8jYxcdgTBs6jiSvapMMoX4b97G+jCzo8uTxzApV83atpljcJWPJeLW1rwiRvAE4PTYr93h9l2SwEwDQl+7txAfB4j27utYlsEhcAIy/smNzD4DpqO60xTvO91dn6GihZApmZJUz8DyzoAMA+9P9+jL0PSIedyADbV6HSPE1Ea8D86Wjl5cmz8PpLW/WjZeIjIynvlyzJO+nR097cp+8Do01EBMpagYjKE2HXwYNR7gpiI+1x/N/ASarWG
/BJMWQuTFjHxDhjRnGSXaiaZmWXGwzIL/mj14AMXRcUkQBx9xcUDaHViTdLvQGI8nsdhPdAHtrPZFMvXuqtQCTMZ3IwZowJhCuInPEkX0wSLzaRkEmsdgCuLYUlX/k3jGrdn4diAaOuC9Ze+LNdUKZ2VdBhCDo4WDWgfuxCBTJH+k+lNBjaPwESZ0ZTseSN7bkTEvmjikivjq2Fyr+3Q6YqEcCyq9Awb1w1ZFKHDwWMurvg+VoI3Lxv3gVlitY
FvZWrsysTOv6/z1EIkoc+dAAqB3qNPCfqen5wGu9hTz9xgoeVmMBYqOzqlUQl+uY/9NeB4mjo+DxoGwTnxwRvVgCDowFArWqlgxFAvWyTE5OaOghM9mQx38ACT/ZUCVQVFOSn7oyrgwVGBz5aT/CQMF/vwtTU06lJ9ZAwdA65PyQoJzllRzpk2oWEhPQoSkn5OR5mTPf39oiPuwYNfV/Bgf/AGp2eHdCubUXqDU7UqNPhdvAoZjIzCk0XIxqLn
OLN3IAzzduAFgMKrzZXA8R7cTPOgGZugNvdzdoA0QWbtQEtGdBiQEl+MzagqSdAiwEttPA/JcotzChXXBQAAAAASUVORK5CYII="""
# the default cursor (19x23 pixel RGBA PNG)
DEFAULT_CURSOR = """iVBORw0KGgoAAAANSUhEUgAAABMAAAAXCAYAAADpwXTaAAADCklEQVR42qWUXWwMURTH787MznbWbm1VtdWP0KBN+pFWlQRVQlJBQkR4lGqioY0IibSprAchHgQhoh76hAQPJB4IRdBobdFstbZ4oJLup9au3c5Md3fmjnPHdE2qZVsn+c3snDv3v/9zzt2lEcRbx90rnAk/d7x2xdF/BAWwFmv6jm1bal4db95Xp
uVmLcbEJfQ9Y0Fu8YZ1yzsvnTu6G3LG2YopPM+HbfMWohTObC0pWXLjWrv9DOS52YjJAi8EKJpBqbZMxNAMlZeXdeTOzdP36/duzYF1w4yciSI/gmUJxLIQw7CIomiUZrOu37m9puukvW51sn0kL2FBEN0Yy2qClGswUIiijYjjUvJXrijuaLt4uCGZPv7qmTAWIGIKMMeajliTGQQNqkOGYbiCxTmXr7e3XC0tXmT5mxhNLtVrq3KWLS3YQxw
RjCyHBD6IFPUVclUMHGeqWFVVWJuXm/Gku2cwNK0zr9fvJc5UdwqGqVoRZ56rOjMAFMWon1NTLZU11WXdZ0/Vb56qj2ri0eOXwzAAnBDEGKWl56oCk2FZNqOoMP9e24XG5sl9VMv0+0eM9XW7mhijkSXPpF+M0YRkOY7iMVFfbsKE1cJtrN1UXmrmUjr6XUMi0lmVYKKj5Hjo3dnSshENU9WXS75IxgoOhfmxWEwurSwvaIX96mCYCbFoNBrEW
MqnMK0JSurx6HcNhxwOR8TnHx33eALjXt+o4A8EBUVReNjnBgaALGBoQkwWRRGOB1ZFDJhSBV90OoIHmuxOWZZ98E4Q4HVEgDDgAUiZyoQYjsbiI2SSMpRKynrv+jR2sKmlF4TewLpD20RExrXNMY24dpcTYvBj94F1RHC7vdH9Dcf6eF5wwtpDwKk5wZMnoY/fzqIxH3EWiQhS46ETAz7/t3eQfwqQe2g6gT/OGYkfobBHisfkVvv5vg8fP/d
D6hnQq/Xqn0KJc0aiorxofq9zkL11+8FXeOwCOgGfVlpSof+vygTWAGagB/iiNTfp0IsRkWxA0hxFZyI0lbBRX/pM4ycZx2V6yAv08AAAAABJRU5ErkJggg=="""
# determine the next power of two
def npot(x):
res = 1
while res < x: res <<= 1
return res
# convert boolean value to string
def b2s(b):
if b: return "Y"
return "N"
# extract a number at the beginning of a string
def num(s):
s = s.strip()
r = ""
while s[0] in "0123456789":
r += s[0]
s = s[1:]
try:
return int(r)
except ValueError:
return -1
# linearly interpolate between two floating-point RGB colors represented as tuples
def lerpColor(a, b, t):
return tuple([min(1.0, max(0.0, x + t * (y - x))) for x, y in zip(a, b)])
# get a representative subset of file statistics
def my_stat(filename):
try:
s = os.stat(filename)
except OSError:
return None
return (s.st_size, s.st_mtime, s.st_ctime, s.st_mode)
# determine (pagecount,width,height) of a PDF file
def analyze_pdf(filename):
f = file(filename,"rb")
pdf = f.read()
f.close()
box = map(float, pdf.split("/MediaBox",1)[1].split("]",1)[0].split("[",1)[1].strip().split())
return (max(map(num, pdf.split("/Count")[1:])), box[2]-box[0], box[3]-box[1])
# unescape { literals in PDF files
re_unescape = re.compile(r'&#[0-9]+;')
def decode_literal(m):
try:
code = int(m.group(0)[2:-1])
if code:
return chr(code)
else:
return ""
except ValueError:
return '?'
def unescape_pdf(s):
return re_unescape.sub(decode_literal, s)
# parse pdftk output
def pdftkParse(filename, page_offset=0):
f = file(filename, "r")
InfoKey = None
BookmarkTitle = None
Title = None
Pages = 0
for line in f.xreadlines():
try:
key, value = [item.strip() for item in line.split(':', 1)]
except ValueError:
continue
key = key.lower()
if key == "numberofpages":
Pages = int(value)
elif key == "infokey":
InfoKey = value.lower()
elif (key == "infovalue") and (InfoKey == "title"):
Title = unescape_pdf(value)
InfoKey = None
elif key == "bookmarktitle":
BookmarkTitle = unescape_pdf(value)
elif key == "bookmarkpagenumber" and BookmarkTitle:
try:
page = int(value)
if not GetPageProp(page + page_offset, '_title'):
SetPageProp(page + page_offset, '_title', BookmarkTitle)
except ValueError:
pass
BookmarkTitle = None
f.close()
if AutoOverview:
SetPageProp(page_offset + 1, '_overview', True)
for page in xrange(page_offset + 2, page_offset + Pages):
SetPageProp(page, '_overview', \
not(not(GetPageProp(page + AutoOverview - 1, '_title'))))
SetPageProp(page_offset + Pages, '_overview', True)
return (Title, Pages)
# translate pixel coordinates to normalized screen coordinates
def MouseToScreen(mousepos):
return (ZoomX0 + mousepos[0] * ZoomArea / ScreenWidth,
ZoomY0 + mousepos[1] * ZoomArea / ScreenHeight)
# normalize rectangle coordinates so that the upper-left point comes first
def NormalizeRect(X0, Y0, X1, Y1):
return (min(X0, X1), min(Y0, Y1), max(X0, X1), max(Y0, Y1))
# check if a point is inside a box (or a list of boxes)
def InsideBox(x, y, box):
return (x >= box[0]) and (y >= box[1]) and (x < box[2]) and (y < box[3])
def FindBox(x, y, boxes):
for i in xrange(len(boxes)):
if InsideBox(x, y, boxes[i]):
return i
raise ValueError
# zoom an image size to a destination size, preserving the aspect ratio
def ZoomToFit(size, dest=None):
if not dest:
dest = (ScreenWidth + Overscan, ScreenHeight + Overscan)
newx = dest[0]
newy = size[1] * newx / size[0]
if newy > dest[1]:
newy = dest[1]
newx = size[0] * newy / size[1]
return (newx, newy)
# get the overlay grid screen coordinates for a specific page
def OverviewPos(page):
return ( \
int(page % OverviewGridSize) * OverviewCellX + OverviewOfsX, \
int(page / OverviewGridSize) * OverviewCellY + OverviewOfsY \
)
def StopMPlayer():
global MPlayerProcess, VideoPlaying
if not MPlayerProcess: return
# first, ask politely
try:
MPlayerProcess.stdin.write('quit\n')
for i in xrange(10):
if not(MPlayerProcess.poll() is None):
MPlayerProcess = None
VideoPlaying = False
return
time.sleep(0.1)
except:
pass
# if that didn't work, be rude
print >>sys.stderr, "MPlayer didn't exit properly, killing PID", MPlayerProcess.pid
try:
if os.name == 'nt':
win32api.TerminateProcess(win32api.OpenProcess(1, False, MPlayerProcess.pid), 0)
else:
os.kill(MPlayerProcess.pid, 2)
MPlayerProcess = None
except:
pass
VideoPlaying = False
def ClockTime(minutes):
if minutes:
return time.strftime("%H:%M")
else:
return time.strftime("%H:%M:%S")
def FormatTime(t, minutes=False):
if minutes and (t < 3600):
return "%d min" % (t / 60)
elif minutes:
return "%d:%02d" % (t / 3600, (t / 60) % 60)
elif t < 3600:
return "%d:%02d" % (t / 60, t % 60)
else:
ms = t % 3600
return "%d:%02d:%02d" % (t / 3600, ms / 60, ms % 60)
def SafeCall(func, args=[], kwargs={}):
if not func: return None
try:
return func(*args, **kwargs)
except:
print >>sys.stderr, "----- Unhandled Exception ----"
traceback.print_exc(file=sys.stderr)
print >>sys.stderr, "----- End of traceback -----"
def Quit(code=0):
global CleanExit
if not code:
CleanExit = True
StopMPlayer()
Platform.Done()
print >>sys.stderr, "Total presentation time: %s." % \
FormatTime((Platform.GetTicks() - StartTime) / 1000)
sys.exit(code)
##### OPENGL (ES) 2.0 LOADER AND TOOLKIT #######################################
if os.name == 'nt':
GLFUNCTYPE = WINFUNCTYPE
else:
GLFUNCTYPE = CFUNCTYPE
class GLFunction(object):
def __init__(self, required, name, ret, *args):
self.name = name
self.required = required
self.prototype = GLFUNCTYPE(ret, *args)
class OpenGL(object):
FALSE = 0
TRUE = 1
NO_ERROR = 0
INVALID_ENUM = 0x0500
INVALID_VALUE = 0x0501
INVALID_OPERATION = 0x0502
OUT_OF_MEMORY = 0x0505
INVALID_FRAMEBUFFER_OPERATION = 0x0506
VENDOR = 0x1F00
RENDERER = 0x1F01
VERSION = 0x1F02
EXTENSIONS = 0x1F03
POINTS = 0x0000
LINES = 0x0001
LINE_LOOP = 0x0002
LINE_STRIP = 0x0003
TRIANGLES = 0x0004
TRIANGLE_STRIP = 0x0005
TRIANGLE_FAN = 0x0006
BYTE = 0x1400
UNSIGNED_BYTE = 0x1401
SHORT = 0x1402
UNSIGNED_SHORT = 0x1403
INT = 0x1404
UNSIGNED_INT = 0x1405
FLOAT = 0x1406
DEPTH_TEST = 0x0B71
BLEND = 0x0BE2
ZERO = 0
ONE = 1
SRC_COLOR = 0x0300
ONE_MINUS_SRC_COLOR = 0x0301
SRC_ALPHA = 0x0302
ONE_MINUS_SRC_ALPHA = 0x0303
DST_ALPHA = 0x0304
ONE_MINUS_DST_ALPHA = 0x0305
DST_COLOR = 0x0306
ONE_MINUS_DST_COLOR = 0x0307
DEPTH_BUFFER_BIT = 0x00000100
COLOR_BUFFER_BIT = 0x00004000
TEXTURE0 = 0x84C0
TEXTURE_2D = 0x0DE1
TEXTURE_RECTANGLE = 0x84F5
TEXTURE_MAG_FILTER = 0x2800
TEXTURE_MIN_FILTER = 0x2801
TEXTURE_WRAP_S = 0x2802
TEXTURE_WRAP_T = 0x2803
NEAREST = 0x2600
LINEAR = 0x2601
NEAREST_MIPMAP_NEAREST = 0x2700
LINEAR_MIPMAP_NEAREST = 0x2701
NEAREST_MIPMAP_LINEAR = 0x2702
LINEAR_MIPMAP_LINEAR = 0x2703
CLAMP_TO_EDGE = 0x812F
REPEAT = 0x2901
ALPHA = 0x1906
RGB = 0x1907
RGBA = 0x1908
LUMINANCE = 0x1909
LUMINANCE_ALPHA = 0x190A
ARRAY_BUFFER = 0x8892
ELEMENT_ARRAY_BUFFER = 0x8893
STREAM_DRAW = 0x88E0
STATIC_DRAW = 0x88E4
DYNAMIC_DRAW = 0x88E8
FRAGMENT_SHADER = 0x8B30
VERTEX_SHADER = 0x8B31
COMPILE_STATUS = 0x8B81
LINK_STATUS = 0x8B82
INFO_LOG_LENGTH = 0x8B84
_funcs = [
GLFunction(True, "GetString", c_char_p, c_uint),
GLFunction(True, "Enable", None, c_uint),
GLFunction(True, "Disable", None, c_uint),
GLFunction(True, "GetError", c_uint),
GLFunction(True, "Viewport", None, c_int, c_int, c_int, c_int),
GLFunction(True, "Clear", None, c_uint),
GLFunction(True, "ClearColor", None, c_float, c_float, c_float, c_float),
GLFunction(True, "BlendFunc", None, c_uint, c_uint),
GLFunction(True, "GenTextures", None, c_uint, POINTER(c_int)),
GLFunction(True, "BindTexture", None, c_uint, c_int),
GLFunction(True, "ActiveTexture", None, c_uint),
GLFunction(True, "TexParameteri", None, c_uint, c_uint, c_int),
GLFunction(True, "TexImage2D", None, c_uint, c_uint, c_uint, c_uint, c_uint, c_uint, c_uint, c_uint, c_void_p),
GLFunction(True, "GenerateMipmap", None, c_uint),
GLFunction(True, "GenBuffers", None, c_uint, POINTER(c_int)),
GLFunction(True, "BindBuffer", None, c_uint, c_int),
GLFunction(True, "BufferData", None, c_uint, c_void_p, c_void_p, c_uint),
GLFunction(True, "CreateProgram", c_uint),
GLFunction(True, "CreateShader", c_uint, c_uint),
GLFunction(True, "ShaderSource", None, c_uint, c_uint, c_void_p, c_void_p),
GLFunction(True, "CompileShader", None, c_uint),
GLFunction(True, "GetShaderiv", None, c_uint, c_uint, POINTER(c_uint)),
GLFunction(True, "GetShaderInfoLog", None, c_uint, c_uint, c_void_p, c_void_p),
GLFunction(True, "AttachShader", None, c_uint, c_uint),
GLFunction(True, "LinkProgram", None, c_uint),
GLFunction(True, "GetProgramiv", None, c_uint, c_uint, POINTER(c_uint)),
GLFunction(True, "GetProgramInfoLog", None, c_uint, c_uint, c_void_p, c_void_p),
GLFunction(True, "UseProgram", None, c_uint),
GLFunction(True, "BindAttribLocation", None, c_uint, c_uint, c_char_p),
GLFunction(True, "GetAttribLocation", c_int, c_uint, c_char_p),
GLFunction(True, "GetUniformLocation", c_int, c_uint, c_char_p),
GLFunction(True, "Uniform1f", None, c_uint, c_float),
GLFunction(True, "Uniform2f", None, c_uint, c_float, c_float),
GLFunction(True, "Uniform3f", None, c_uint, c_float, c_float, c_float),
GLFunction(True, "Uniform4f", None, c_uint, c_float, c_float, c_float, c_float),
GLFunction(True, "Uniform1i", None, c_uint, c_int),
GLFunction(True, "Uniform2i", None, c_uint, c_int, c_int),
GLFunction(True, "Uniform3i", None, c_uint, c_int, c_int, c_int),
GLFunction(True, "Uniform4i", None, c_uint, c_int, c_int, c_int, c_int),
GLFunction(True, "EnableVertexAttribArray", None, c_uint),
GLFunction(True, "DisableVertexAttribArray", None, c_uint),
GLFunction(True, "VertexAttribPointer", None, c_uint, c_uint, c_uint, c_uint, c_uint, c_void_p),
GLFunction(True, "DrawArrays", None, c_uint, c_uint, c_uint),
GLFunction(True, "DrawElements", None, c_uint, c_uint, c_uint, c_void_p),
]
_typemap = {
BYTE: c_int8,
UNSIGNED_BYTE: c_uint8,
SHORT: c_int16,
UNSIGNED_SHORT: c_uint16,
INT: c_int32,
UNSIGNED_INT: c_uint32,
FLOAT: c_float
}
def __init__(self, loader, desktop=False):
global GLVendor, GLRenderer, GLVersion
self._is_desktop_gl = desktop
for func in self._funcs:
funcptr = None
for suffix in ("", "ARB", "ObjectARB", "EXT", "OES"):
funcptr = loader("gl" + func.name + suffix, func.prototype)
if funcptr:
break
if not funcptr:
if func.required:
raise ImportError("failed to import required OpenGL function 'gl%s'" % func.name)
else:
def errfunc(*args):
raise ImportError("call to unimplemented OpenGL function 'gl%s'" % func.name)
funcptr = errfunc
if hasattr(self, func.name):
setattr(self, '_' + func.name, funcptr)
else:
setattr(self, func.name, funcptr)
if func.name == "GetString":
GLVendor = self.GetString(self.VENDOR) or ""
GLRenderer = self.GetString(self.RENDERER) or ""
GLVersion = self.GetString(self.VERSION) or ""
self._init()
def GenTextures(self, n=1):
bufs = (c_int * n)()
self._GenTextures(n, bufs)
if n == 1: return bufs[0]
return list(bufs)
def ActiveTexture(self, tmu):
if tmu < self.TEXTURE0:
tmu += self.TEXTURE0
self._ActiveTexture(tmu)
def GenBuffers(self, n=1):
bufs = (c_int * n)()
self._GenBuffers(n, bufs)
if n == 1: return bufs[0]
return list(bufs)
def BufferData(self, target, size=0, data=None, usage=STATIC_DRAW, type=None):
if isinstance(data, list):
if type:
type = self._typemap[type]
elif isinstance(data[0], int):
type = c_int32
elif isinstance(data[0], float):
type = c_float
else:
raise TypeError("cannot infer buffer data type")
size = len(data) * sizeof(type)
data = (type * len(data))(*data)
self._BufferData(target, cast(size, c_void_p), cast(data, c_void_p), usage)
def ShaderSource(self, shader, source):
source = c_char_p(source)
self._ShaderSource(shader, 1, pointer(source), None)
def GetShaderi(self, shader, pname):
res = (c_uint * 1)()
self.GetShaderiv(shader, pname, res)
return res[0]
def GetShaderInfoLog(self, shader):
length = self.GetShaderi(shader, self.INFO_LOG_LENGTH)
if not length: return None
buf = create_string_buffer(length + 1)
self._GetShaderInfoLog(shader, length + 1, None, buf)
return buf.raw.split('\0', 1)[0]
def GetProgrami(self, program, pname):
res = (c_uint * 1)()
self.GetProgramiv(program, pname, res)
return res[0]
def GetProgramInfoLog(self, program):
length = self.GetProgrami(program, self.INFO_LOG_LENGTH)
if not length: return None
buf = create_string_buffer(length + 1)
self._GetProgramInfoLog(program, length + 1, None, buf)
return buf.raw.split('\0', 1)[0]
def Uniform(self, location, *values):
if not values:
raise TypeError("no values for glUniform")
if (len(values) == 1) and (isinstance(values[0], list) or isinstance(values[0], tuple)):
values = values[0]
l = len(values)
if l > 4:
raise TypeError("uniform vector has too-high order(%d)" % len(values))
if any(isinstance(v, float) for v in values):
if l == 1: self.Uniform1f(location, values[0])
elif l == 2: self.Uniform2f(location, values[0], values[1])
elif l == 3: self.Uniform3f(location, values[0], values[1], values[2])
else: self.Uniform4f(location, values[0], values[1], values[2], values[3])
else:
if l == 1: self.Uniform1i(location, values[0])
elif l == 2: self.Uniform2i(location, values[0], values[1])
elif l == 3: self.Uniform3i(location, values[0], values[1], values[2])
else: self.Uniform4i(location, values[0], values[1], values[2], values[3])
##### Convenience Functions #####
def _init(self):
self.enabled_attribs = set()
def set_enabled_attribs(self, *attrs):
want = set(attrs)
for a in (want - self.enabled_attribs):
self.EnableVertexAttribArray(a)
for a in (self.enabled_attribs - want):
self.DisableVertexAttribArray(a)
self.enabled_attribs = want
def set_texture(self, target=TEXTURE_2D, tex=0, tmu=0):
self.ActiveTexture(self.TEXTURE0 + tmu)
self.BindTexture(target, tex)
def make_texture(self, target=TEXTURE_2D, wrap=CLAMP_TO_EDGE, filter=LINEAR_MIPMAP_NEAREST, img=None):
tex = self.GenTextures()
min_filter = filter
if min_filter < self.NEAREST_MIPMAP_NEAREST:
mag_filter = min_filter
else:
mag_filter = self.NEAREST + (min_filter & 1)
self.BindTexture(target, tex)
self.TexParameteri(target, self.TEXTURE_WRAP_S, wrap)
self.TexParameteri(target, self.TEXTURE_WRAP_T, wrap)
self.TexParameteri(target, self.TEXTURE_MIN_FILTER, min_filter)
self.TexParameteri(target, self.TEXTURE_MAG_FILTER, mag_filter)
if img:
self.load_texture(target, img)
return tex
def load_texture(self, target, tex_or_img, img=None):
if img:
gl.BindTexture(target, tex_or_img)
else:
img = tex_or_img
if img.mode == 'RGBA': format = self.RGBA
elif img.mode == 'RGB': format = self.RGB
elif img.mode == 'LA': format = self.LUMINANCE_ALPHA
elif img.mode == 'L': format = self.LUMINANCE
else: raise TypeError("image has unsupported color format '%s'" % img.mode)
gl.TexImage2D(target, 0, format, img.size[0], img.size[1], 0, format, self.UNSIGNED_BYTE, img.tostring())
class GLShaderCompileError(SyntaxError):
pass
class GLInvalidShaderError(GLShaderCompileError):
pass
class GLShader(object):
LOG_NEVER = 0
LOG_ON_ERROR = 1
LOG_IF_NOT_EMPTY = 2
LOG_ALWAYS = 3
LOG_DEFAULT = LOG_ON_ERROR
def __init__(self, vs=None, fs=None, attributes=[], uniforms=[], loglevel=None):
if not(vs): vs = self.vs
if not(fs): fs = self.fs
if not(attributes) and hasattr(self, 'attributes'):
attributes = self.attributes
if isinstance(attributes, dict):
attributes = attributes.items()
if not(uniforms) and hasattr(self, 'uniforms'):
uniforms = self.uniforms
if isinstance(uniforms, dict):
uniforms = uniforms.items()
uniforms = [((u, None) if isinstance(u, basestring) else u) for u in uniforms]
if (loglevel is None) and hasattr(self, 'loglevel'):
loglevel = self.loglevel
if loglevel is None:
loglevel = self.LOG_DEFAULT
self.program = gl.CreateProgram()
def handle_shader_log(status, log_getter, action):
force_log = (loglevel >= self.LOG_ALWAYS) or ((loglevel >= self.LOG_ON_ERROR) and not(status))
if force_log or (loglevel >= self.LOG_IF_NOT_EMPTY):
log = log_getter().rstrip()
else:
log = ""
if force_log or ((loglevel >= self.LOG_IF_NOT_EMPTY) and log):
if status:
print >>sys.stderr, "Info: log for %s %s:" % (self.__class__.__name__, action)
else:
print >>sys.stderr, "Error: %s %s failed - log information follows:" % (self.__class__.__name__, action)
for line in log.split('\n'):
print >>sys.stderr, '>', line.rstrip()
if not status:
raise GLShaderCompileError("failure during %s %s" % (self.__class__.__name__, action))
def handle_shader(type_enum, type_name, src):
if gl._is_desktop_gl:
src = src.replace("highp ", "")
src = src.replace("mediump ", "")
src = src.replace("lowp ", "")
shader = gl.CreateShader(type_enum)
gl.ShaderSource(shader, src)
gl.CompileShader(shader)
handle_shader_log(gl.GetShaderi(shader, gl.COMPILE_STATUS),
lambda: gl.GetShaderInfoLog(shader),
type_name + " shader compilation")
gl.AttachShader(self.program, shader)
handle_shader(gl.VERTEX_SHADER, "vertex", vs)
handle_shader(gl.FRAGMENT_SHADER, "fragment", fs)
for attr in attributes:
if not isinstance(attr, basestring):
loc, name = attr
if isinstance(loc, basestring):
loc, name = name, loc
setattr(self, name, loc)
elif hasattr(self, attr):
name = attr
loc = getattr(self, name)
gl.BindAttribLocation(self.program, loc, name)
gl.LinkProgram(self.program)
handle_shader_log(gl.GetProgrami(self.program, gl.LINK_STATUS),
lambda: gl.GetProgramInfoLog(self.program),
"linking")
gl.UseProgram(self.program)
for name in attributes:
if isinstance(name, basestring) and not(hasattr(self, attr)):
setattr(self, name, int(gl.GetAttribLocation(self.program, name)))
for u in uniforms:
loc = int(gl.GetUniformLocation(self.program, u[0]))
setattr(self, u[0], loc)
if u[1] is not None:
gl.Uniform(loc, *u[1:])
def use(self):
gl.UseProgram(self.program)
return self
@classmethod
def get_instance(self):
try:
instance = self._instance
if instance:
return instance
else:
raise GLInvalidShaderError("shader failed to compile in the past")
except AttributeError:
try:
self._instance = self()
except GLShaderCompileError, e:
self._instance = None
raise
return self._instance
# NOTE: OpenGL drawing code in Impressive uses the following conventions:
# - program binding is undefined
# - vertex attribute layout is undefined
# - vertex attribute enable/disable is managed by gl.set_enabled_attribs()
# - texture bindings are undefined
# - ActiveTexure is TEXTURE0
# - array and element array buffer bindings are undefined
# - BLEND is disabled, BlendFunc is (SRC_ALPHA, ONE_MINUS_SRC_ALPHA)
##### STOCK SHADERS ############################################################
class SimpleQuad(object):
"vertex buffer singleton for a simple quad (used by various shaders)"
vbuf = None
@classmethod
def draw(self):
gl.set_enabled_attribs(0)
if not self.vbuf:
self.vbuf = gl.GenBuffers()
gl.BindBuffer(gl.ARRAY_BUFFER, self.vbuf)
gl.BufferData(gl.ARRAY_BUFFER, data=[0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0])
else:
gl.BindBuffer(gl.ARRAY_BUFFER, self.vbuf)
gl.VertexAttribPointer(0, 2, gl.FLOAT, False, 0, 0)
gl.DrawArrays(gl.TRIANGLE_STRIP, 0, 4)
class TexturedRectShader(GLShader):
vs = """
attribute highp vec2 aPos;
uniform highp vec4 uPosTransform;
uniform highp vec4 uScreenTransform;
uniform highp vec4 uTexTransform;
varying mediump vec2 vTexCoord;
void main() {
highp vec2 pos = uPosTransform.xy + aPos * uPosTransform.zw;
gl_Position = vec4(uScreenTransform.xy + pos * uScreenTransform.zw, 0.0, 1.0);
vTexCoord = uTexTransform.xy + aPos * uTexTransform.zw;
}
"""
fs = """
uniform lowp vec4 uColor;
uniform lowp sampler2D uTex;
varying mediump vec2 vTexCoord;
void main() {
gl_FragColor = uColor * texture2D(uTex, vTexCoord);
}
"""
attributes = { 0: 'aPos' }
uniforms = ['uPosTransform', 'uScreenTransform', 'uTexTransform', 'uColor']
def draw(self, x0, y0, x1, y1, s0=0.0, t0=0.0, s1=1.0, t1=1.0, tex=None, color=1.0):
self.use()
if tex:
gl.BindTexture(gl.TEXTURE_2D, tex)
if isinstance(color, float):
gl.Uniform4f(self.uColor, color, color, color, 1.0)
else:
gl.Uniform(self.uColor, color)
gl.Uniform(self.uPosTransform, x0, y0, x1 - x0, y1 - y0)
gl.Uniform(self.uScreenTransform, ScreenTransform)
gl.Uniform(self.uTexTransform, s0, t0, s1 - s0, t1 - t0)
SimpleQuad.draw()
RequiredShaders.append(TexturedRectShader)
class TexturedMeshShader(GLShader):
vs = """
attribute highp vec3 aPosAndAlpha;
uniform highp vec4 uPosTransform;
uniform highp vec4 uScreenTransform;
uniform highp vec4 uTexTransform;
varying mediump vec2 vTexCoord;
varying lowp float vAlpha;
void main() {
highp vec2 pos = uPosTransform.xy + aPosAndAlpha.xy * uPosTransform.zw;
gl_Position = vec4(uScreenTransform.xy + pos * uScreenTransform.zw, 0.0, 1.0);
vTexCoord = uTexTransform.xy + aPosAndAlpha.xy * uTexTransform.zw;
vAlpha = aPosAndAlpha.z;
}
"""
fs = """
uniform lowp sampler2D uTex;
varying mediump vec2 vTexCoord;
varying lowp float vAlpha;
void main() {
gl_FragColor = vec4(1.0, 1.0, 1.0, vAlpha) * texture2D(uTex, vTexCoord);
}
"""
attributes = { 0: 'aPosAndAlpha' }
uniforms = ['uPosTransform', 'uScreenTransform', 'uTexTransform']
def setup(self, x0, y0, x1, y1, s0=0.0, t0=0.0, s1=1.0, t1=1.0, tex=None):
self.use()
if tex:
gl.BindTexture(gl.TEXTURE_2D, tex)
gl.Uniform(self.uPosTransform, x0, y0, x1 - x0, y1 - y0)
gl.Uniform(self.uScreenTransform, ScreenTransform)
gl.Uniform(self.uTexTransform, s0, t0, s1 - s0, t1 - t0)
RequiredShaders.append(TexturedMeshShader)
class BlurShader(GLShader):
vs = """
attribute highp vec2 aPos;
uniform highp vec4 uScreenTransform;
varying mediump vec2 vTexCoord;
void main() {
gl_Position = vec4(uScreenTransform.xy + aPos * uScreenTransform.zw, 0.0, 1.0);
vTexCoord = aPos;
}
"""
fs = """
uniform lowp float uIntensity;
uniform mediump sampler2D uTex;
uniform mediump vec2 uDeltaTexCoord;
varying mediump vec2 vTexCoord;
void main() {
lowp vec3 color = (uIntensity * 0.125) * (
texture2D(uTex, vTexCoord).rgb * 3.0
+ texture2D(uTex, vTexCoord + uDeltaTexCoord * vec2(+0.89, +0.45)).rgb
+ texture2D(uTex, vTexCoord + uDeltaTexCoord * vec2(+0.71, -0.71)).rgb
+ texture2D(uTex, vTexCoord + uDeltaTexCoord * vec2(-0.45, -0.89)).rgb
+ texture2D(uTex, vTexCoord + uDeltaTexCoord * vec2(-0.99, +0.16)).rgb
+ texture2D(uTex, vTexCoord + uDeltaTexCoord * vec2(-0.16, +0.99)).rgb
);
lowp float gray = dot(vec3(0.299, 0.587, 0.114), color);
gl_FragColor = vec4(mix(color, vec3(gray, gray, gray), uIntensity), 1.0);
}
"""
attributes = { 0: 'aPos' }
uniforms = ['uScreenTransform', 'uDeltaTexCoord', 'uIntensity']
def draw(self, dtx, dty, intensity=1.0, tex=None):
self.use()
if tex:
gl.BindTexture(gl.TEXTURE_2D, tex)
gl.Uniform(self.uScreenTransform, ScreenTransform)
gl.Uniform2f(self.uDeltaTexCoord, dtx, dty)
gl.Uniform1f(self.uIntensity, intensity)
SimpleQuad.draw()
# (not added to RequiredShaders because this shader is allowed to fail)
class ProgressBarShader(GLShader):
vs = """
attribute highp vec2 aPos;
uniform highp vec4 uPosTransform;
uniform lowp vec4 uColor0;
uniform lowp vec4 uColor1;
varying lowp vec4 vColor;
void main() {
gl_Position = vec4(uPosTransform.xy + aPos * uPosTransform.zw, 0.0, 1.0);
vColor = mix(uColor0, uColor1, aPos.y);
}
"""
fs = """
varying lowp vec4 vColor;
void main() {
gl_FragColor = vColor;
}
"""
attributes = { 0: 'aPos' }
uniforms = ['uPosTransform', 'uColor0', 'uColor1']
def draw(self, x0, y0, x1, y1, color0, color1):
self.use()
tx0 = ScreenTransform[0] + ScreenTransform[2] * x0
ty0 = ScreenTransform[1] + ScreenTransform[3] * y0
tx1 = ScreenTransform[0] + ScreenTransform[2] * x1
ty1 = ScreenTransform[1] + ScreenTransform[3] * y1
gl.Uniform4f(self.uPosTransform, tx0, ty0, tx1 - tx0, ty1 - ty0)
gl.Uniform(self.uColor0, color0)
gl.Uniform(self.uColor1, color1)
SimpleQuad.draw()
RequiredShaders.append(ProgressBarShader)
##### RENDERING TOOL CODE ######################################################
# meshes for highlight boxes and the spotlight are laid out in the same manner:
# - vertex 0 is the center vertex
# - for each slice, there are two further vertices:
# - vertex 2*i+1 is the "inner" vertex with full alpha
# - vertex 2*i+2 is the "outer" vertex with zero alpha
class HighlightIndexBuffer(object):
def __init__(self, npoints, reuse_buf=None, dynamic=False):
if not reuse_buf:
self.buf = gl.GenBuffers()
elif isinstance(reuse_buf, HighlightIndexBuffer):
self.buf = reuse_buf.buf
else:
self.buf = reuse_buf
data = []
for i in xrange(npoints):
if i:
b0 = 2 * i - 1
else:
b0 = 2 * npoints - 1
b1 = 2 * i + 1
data.extend([
0, b1, b0,
b1, b1+1, b0,
b1+1, b0+1, b0
])
self.vertices = 9 * npoints
if dynamic:
usage = gl.DYNAMIC_DRAW
else:
usage = gl.STATIC_DRAW
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, self.buf)
gl.BufferData(gl.ELEMENT_ARRAY_BUFFER, data=data, type=gl.UNSIGNED_SHORT, usage=usage)
def draw(self):
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, self.buf)
gl.DrawElements(gl.TRIANGLES, self.vertices, gl.UNSIGNED_SHORT, 0)
def GenerateSpotMesh():
global SpotVertices, SpotIndices
rx0 = SpotRadius * PixelX
ry0 = SpotRadius * PixelY
rx1 = (SpotRadius + BoxEdgeSize) * PixelX
ry1 = (SpotRadius + BoxEdgeSize) * PixelY
slices = max(MinSpotDetail, int(2.0 * pi * SpotRadius / SpotDetail / ZoomArea))
SpotIndices = HighlightIndexBuffer(slices, reuse_buf=SpotIndices, dynamic=True)
vertices = [0.0, 0.0, 1.0]
for i in xrange(slices):
a = i * 2.0 * pi / slices
vertices.extend([
rx0 * sin(a), ry0 * cos(a), 1.0,
rx1 * sin(a), ry1 * cos(a), 0.0
])
if not SpotVertices:
SpotVertices = gl.GenBuffers()
gl.BindBuffer(gl.ARRAY_BUFFER, SpotVertices)
gl.BufferData(gl.ARRAY_BUFFER, data=vertices, usage=gl.DYNAMIC_DRAW)
##### TRANSITIONS ##############################################################
# base class for all transitions
class Transition(object):
# constructor: must instantiate (i.e. compile) all required shaders
# and (optionally) perform some additional initialization
def __init__(self):
pass
# called once at the start of each transition
def start(self):
pass
# render a frame of the transition, using the relative time 't' and the
# global texture identifiers Tcurrent and Tnext
def render(self, t):
pass
# smoothstep() makes most transitions better :)
def smoothstep(t):
return t * t * (3.0 - 2.0 * t)
# an array containing all possible transition classes
AllTransitions = []
class Crossfade(Transition):
"""simple crossfade"""
class CrossfadeShader(GLShader):
vs = """
attribute highp vec2 aPos;
uniform highp vec4 uTexTransform;
varying mediump vec2 vTexCoord;
void main() {
gl_Position = vec4(vec2(-1.0, 1.0) + aPos * vec2(2.0, -2.0), 0.0, 1.0);
vTexCoord = uTexTransform.xy + aPos * uTexTransform.zw;
}
"""
fs = """
uniform lowp sampler2D uTcurrent;
uniform lowp sampler2D uTnext;
uniform lowp float uTime;
varying mediump vec2 vTexCoord;
void main() {
gl_FragColor = mix(texture2D(uTcurrent, vTexCoord), texture2D(uTnext, vTexCoord), uTime);
}
"""
attributes = { 0: 'aPos' }
uniforms = [('uTnext', 1), 'uTexTransform', 'uTime']
def __init__(self):
shader = self.CrossfadeShader.get_instance().use()
gl.Uniform4f(shader.uTexTransform, 0.0, 0.0, TexMaxS, TexMaxT)
def render(self, t):
shader = self.CrossfadeShader.get_instance().use()
gl.set_texture(gl.TEXTURE_2D, Tnext, 1)
gl.set_texture(gl.TEXTURE_2D, Tcurrent, 0)
gl.Uniform1f(shader.uTime, t)
SimpleQuad.draw()
AllTransitions.append(Crossfade)
class FadeOutFadeIn(Transition):
"fade out to black and fade in again"
def render(self, t):
if t < 0.5:
tex = Tcurrent
t = 1.0 - 2.0 * t
else:
tex = Tnext
t = 2.0 * t - 1.0
TexturedRectShader.get_instance().draw(
0.0, 0.0, 1.0, 1.0,
s1=TexMaxS, t1=TexMaxT,
tex=tex,
color=(t, t, t, 1.0)
)
AllTransitions.append(FadeOutFadeIn)
class Slide(Transition):
def render(self, t):
t = smoothstep(t)
x = self.dx * t
y = self.dy * t
TexturedRectShader.get_instance().draw(
x, y, x + 1.0, y + 1.0,
s1=TexMaxS, t1=TexMaxT,
tex=Tcurrent
)
TexturedRectShader.get_instance().draw(
x - self.dx, y - self.dy,
x - self.dx + 1.0, y - self.dy + 1.0,
s1=TexMaxS, t1=TexMaxT,
tex=Tnext
)
class SlideUp(Slide):
"slide upwards"
dx, dy = 0.0, -1.0
class SlideDown(Slide):
"slide downwards"
dx, dy = 0.0, 1.0
class SlideLeft(Slide):
"slide to the left"
dx, dy = -1.0, 0.0
class SlideRight(Slide):
"slide to the right"
dx, dy = 1.0, 0.0
AllTransitions.extend([SlideUp, SlideDown, SlideLeft, SlideRight])
class Squeeze(Transition):
def render(self, t):
for tex, x0, y0, x1, y1 in self.getparams(smoothstep(t)):
TexturedRectShader.get_instance().draw(
x0, y0, x1, y1,
s1=TexMaxS, t1=TexMaxT,
tex=tex
)
class SqueezeUp(Squeeze):
"squeeze upwards"
def getparams(self, t):
return ((Tcurrent, 0.0, 0.0, 1.0, 1.0 - t),
(Tnext, 0.0, 1.0 - t, 1.0, 1.0))
class SqueezeDown(Squeeze):
"squeeze downwards"
def getparams(self, t):
return ((Tcurrent, 0.0, t, 1.0, 1.0),
(Tnext, 0.0, 0.0, 1.0, t))
class SqueezeLeft(Squeeze):
"squeeze to the left"
def getparams(self, t):
return ((Tcurrent, 0.0, 0.0, 1.0 - t, 1.0),
(Tnext, 1.0 - t, 0.0, 1.0, 1.0))
class SqueezeRight(Squeeze):
"squeeze to the right"
def getparams(self, t):
return ((Tcurrent, t, 0.0, 1.0, 1.0),
(Tnext, 0.0, 0.0, t, 1.0))
AllTransitions.extend([SqueezeUp, SqueezeDown, SqueezeLeft, SqueezeRight])
class Wipe(Transition):
band_size = 0.5 # relative size of the wiping band
rx, ry = 16, 16 # mask texture resolution
class_mask = True # True if the mask shall be shared between all instances of this subclass
class WipeShader(GLShader):
vs = """
attribute highp vec2 aPos;
uniform highp vec4 uTexTransform;
uniform highp vec4 uMaskTransform;
varying mediump vec2 vTexCoord;
varying mediump vec2 vMaskCoord;
void main() {
gl_Position = vec4(vec2(-1.0, 1.0) + aPos * vec2(2.0, -2.0), 0.0, 1.0);
vTexCoord = uTexTransform.xy + aPos * uTexTransform.zw;
vMaskCoord = uMaskTransform.xy + aPos * uMaskTransform.zw;
}
"""
fs = """
uniform lowp sampler2D uTcurrent;
uniform lowp sampler2D uTnext;
uniform mediump sampler2D uMaskTex;
uniform mediump vec2 uAlphaTransform;
varying mediump vec2 vTexCoord;
varying mediump vec2 vMaskCoord;
void main() {
mediump float mask = texture2D(uMaskTex, vMaskCoord).r;
mask = (mask + uAlphaTransform.x) * uAlphaTransform.y;
mask = smoothstep(0.0, 1.0, mask);
gl_FragColor = mix(texture2D(uTnext, vTexCoord), texture2D(uTcurrent, vTexCoord), mask);
// gl_FragColor = texture2D(uMaskTex, vMaskCoord); // uncomment for mask debugging
}
"""
attributes = { 0: 'aPos' }
uniforms = [('uTnext', 1), ('uMaskTex', 2), 'uTexTransform', 'uMaskTransform', 'uAlphaTransform']
def __init__(self):
GLShader.__init__(self)
self.mask_tex = gl.make_texture(gl.TEXTURE_2D, gl.CLAMP_TO_EDGE, gl.LINEAR)
mask = None
def __init__(self):
shader = self.WipeShader.get_instance().use()
gl.Uniform4f(shader.uTexTransform, 0.0, 0.0, TexMaxS, TexMaxT)
if not self.class_mask:
self.mask = self.prepare_mask()
elif not self.mask:
self.__class__.mask = self.prepare_mask()
def start(self):
shader = self.WipeShader.get_instance().use()
gl.Uniform4f(shader.uMaskTransform,
0.5 / self.rx, 0.5 / self.ry,
1.0 - 1.0 / self.rx,
1.0 - 1.0 / self.ry)
gl.BindTexture(gl.TEXTURE_2D, shader.mask_tex)
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, self.rx, self.ry, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, self.mask)
def bind_mask_tex(self, shader):
gl.set_texture(gl.TEXTURE_2D, shader.mask_tex, 2)
def render(self, t):
shader = self.WipeShader.get_instance().use()
self.bind_mask_tex(shader) # own method b/c WipeBrightness overrides it
gl.set_texture(gl.TEXTURE_2D, Tnext, 1)
gl.set_texture(gl.TEXTURE_2D, Tcurrent, 0)
gl.Uniform2f(shader.uAlphaTransform,
self.band_size - t * (1.0 + self.band_size),
1.0 / self.band_size)
SimpleQuad.draw()
def prepare_mask(self):
scale = 1.0 / (self.rx - 1)
xx = [i * scale for i in xrange((self.rx + 3) & (~3))]
scale = 1.0 / (self.ry - 1)
yy = [i * scale for i in xrange(self.ry)]
def iter2d():
for y in yy:
for x in xx:
yield (x, y)
return ''.join(chr(max(0, min(255, int(self.f(x, y) * 255.0 + 0.5)))) for x, y in iter2d())
def f(self, x, y):
return 0.5
class WipeLeft(Wipe):
"wipe from right to left"
def f(self, x, y):
return 1.0 - x
class WipeRight(Wipe):
"wipe from left to right"
def f(self, x, y):
return x
class WipeUp(Wipe):
"wipe upwards"
def f(self, x, y):
return 1.0 - y
class WipeDown(Wipe):
"wipe downwards"
def f(self, x, y):
return y
class WipeUpLeft(Wipe):
"wipe from the lower-right to the upper-left corner"
def f(self, x, y):
return 1.0 - 0.5 * (x + y)
class WipeUpRight(Wipe):
"wipe from the lower-left to the upper-right corner"
def f(self, x, y):
return 0.5 * (1.0 - y + x)
class WipeDownLeft(Wipe):
"wipe from the upper-right to the lower-left corner"
def f(self, x, y):
return 0.5 * (1.0 - x + y)
class WipeDownRight(Wipe):
"wipe from the upper-left to the lower-right corner"
def f(self, x, y):
return 0.5 * (x + y)
class WipeCenterOut(Wipe):
"wipe from the center outwards"
rx, ry = 64, 32
def __init__(self):
self.scale = 1.0
self.scale = 1.0 / self.f(0.0, 0.0)
Wipe.__init__(self)
def f(self, x, y):
return hypot((x - 0.5) * DAR, y - 0.5) * self.scale
class WipeCenterIn(Wipe):
"wipe from the corners inwards"
rx, ry = 64, 32
def __init__(self):
self.scale = 1.0
self.scale = 1.0 / (1.0 - self.f(0.0, 0.0))
Wipe.__init__(self)
def f(self, x, y):
return 1.0 - hypot((x - 0.5) * DAR, y - 0.5) * self.scale
class WipeBlobs(Wipe):
"""wipe using nice "blob"-like patterns"""
rx, ry = 64, 32
class_mask = False
def __init__(self):
self.x0 = random.random() * 6.2
self.y0 = random.random() * 6.2
self.sx = (random.random() * 15.0 + 5.0) * DAR
self.sy = random.random() * 15.0 + 5.0
Wipe.__init__(self)
def f(self, x, y):
return 0.5 + 0.25 * (cos(self.x0 + self.sx * x) + cos(self.y0 + self.sy * y))
class WipeClouds(Wipe):
"""wipe using cloud-like patterns"""
rx, ry = 128, 128
class_mask = False
decay = 0.25
blur = 5
def prepare_mask(self):
assert self.rx == self.ry
noise = Image.fromstring('L', (self.rx * 4, self.ry * 2), ''.join(map(chr, (random.randrange(256) for i in xrange(self.rx * self.ry * 8)))))
img = Image.new('L', (1, 1), random.randrange(256))
alpha = 1.0
npos = 0
border = 0
while img.size[0] <= self.rx:
border += 2
next = img.size[0] * 2
alpha *= self.decay
img = Image.blend(
img.resize((next, next), Image.BILINEAR),
noise.crop((npos, 0, npos + next, next)),
alpha)
npos += next
img = ImageOps.equalize(ImageOps.autocontrast(img))
for i in xrange(self.blur):
img = img.filter(ImageFilter.BLUR)
img = img.crop((border, border, img.size[0] - 2 * border, img.size[1] - 2 * border)).resize((self.rx, self.ry), Image.ANTIALIAS)
return img.tostring()
class WipeBrightness1(Wipe):
"""wipe based on the current slide's brightness"""
band_size = 1.0
def prepare_mask(self):
return True # dummy
def start(self):
shader = self.WipeShader.get_instance().use()
gl.Uniform4f(shader.uMaskTransform, 0.0, 0.0, TexMaxS, TexMaxT)
def bind_mask_tex(self, dummy):
gl.set_texture(gl.TEXTURE_2D, Tcurrent, 2)
class WipeBrightness2(WipeBrightness1):
"""wipe based on the next slide's brightness"""
def bind_mask_tex(self, dummy):
gl.set_texture(gl.TEXTURE_2D, Tnext, 2)
AllTransitions.extend([WipeLeft, WipeRight, WipeUp, WipeDown, WipeUpLeft, WipeUpRight, WipeDownLeft, WipeDownRight, WipeCenterOut, WipeCenterIn, WipeBlobs, WipeClouds, WipeBrightness1, WipeBrightness2])
class PagePeel(Transition):
"an unrealistic, but nice page peel effect"
class PagePeel_PeeledPageShader(GLShader):
vs = """
attribute highp vec2 aPos;
uniform highp vec4 uPosTransform;
varying mediump vec2 vTexCoord;
void main() {
highp vec2 pos = uPosTransform.xy + aPos * uPosTransform.zw;
gl_Position = vec4(vec2(-1.0, 1.0) + pos * vec2(2.0, -2.0), 0.0, 1.0);
vTexCoord = aPos + vec2(0.0, -0.5);
}
"""
fs = """
uniform lowp sampler2D uTex;
uniform highp vec4 uTexTransform;
uniform highp float uHeight;
uniform mediump float uShadowStrength;
varying mediump vec2 vTexCoord;
void main() {
mediump vec2 tc = vTexCoord;
tc.y *= 1.0 - tc.x * uHeight;
tc.x = mix(tc.x, tc.x * tc.x, uHeight);
tc = uTexTransform.xy + (tc + vec2(0.0, 0.5)) * uTexTransform.zw;
mediump float shadow_pos = 1.0 - vTexCoord.x;
mediump float light = 1.0 - (shadow_pos * shadow_pos) * uShadowStrength;
gl_FragColor = vec4(light, light, light, 1.0) * texture2D(uTex, tc);
}
"""
attributes = { 0: 'aPos' }
uniforms = ['uPosTransform', 'uTexTransform', 'uHeight', 'uShadowStrength']
class PagePeel_RevealedPageShader(GLShader):
vs = """
attribute highp vec2 aPos;
uniform highp vec4 uPosTransform;
uniform highp vec4 uTexTransform;
varying mediump vec2 vTexCoord;
varying mediump float vShadowPos;
void main() {
highp vec2 pos = uPosTransform.xy + aPos * uPosTransform.zw;
gl_Position = vec4(vec2(-1.0, 1.0) + pos * vec2(2.0, -2.0), 0.0, 1.0);
vShadowPos = 1.0 - aPos.x;
vTexCoord = uTexTransform.xy + aPos * uTexTransform.zw;
}
"""
fs = """
uniform lowp sampler2D uTex;
uniform mediump float uShadowStrength;
varying mediump vec2 vTexCoord;
varying mediump float vShadowPos;
void main() {
mediump float light = 1.0 - (vShadowPos * vShadowPos) * uShadowStrength;
gl_FragColor = vec4(light, light, light, 1.0) * texture2D(uTex, vTexCoord);
}
"""
attributes = { 0: 'aPos' }
uniforms = ['uPosTransform', 'uTexTransform', 'uShadowStrength']
def __init__(self):
shader = self.PagePeel_PeeledPageShader.get_instance().use()
gl.Uniform4f(shader.uTexTransform, 0.0, 0.0, TexMaxS, TexMaxT)
self.PagePeel_RevealedPageShader.get_instance()
def render(self, t):
angle = t * 0.5 * pi
split = cos(angle)
height = sin(angle)
# draw the old page that is peeled away
gl.BindTexture(gl.TEXTURE_2D, Tcurrent)
shader = self.PagePeel_PeeledPageShader.get_instance().use()
gl.Uniform4f(shader.uPosTransform, 0.0, 0.0, split, 1.0)
gl.Uniform1f(shader.uHeight, height * 0.25)
gl.Uniform1f(shader.uShadowStrength, 0.2 * (1.0 - split));
SimpleQuad.draw()
# draw the new page that is revealed
gl.BindTexture(gl.TEXTURE_2D, Tnext)
shader = self.PagePeel_RevealedPageShader.get_instance().use()
gl.Uniform4f(shader.uPosTransform, split, 0.0, 1.0 - split, 1.0)
gl.Uniform4f(shader.uTexTransform, split * TexMaxS, 0.0, (1.0 - split) * TexMaxS, TexMaxT)
gl.Uniform1f(shader.uShadowStrength, split);
SimpleQuad.draw()
AllTransitions.append(PagePeel)
# the AvailableTransitions array contains a list of all transition classes that
# can be randomly assigned to pages;
# this selection normally only includes "unintrusive" transtitions, i.e. mostly
# crossfade/wipe variations
AvailableTransitions = [ # from coolest to lamest
WipeBlobs,
WipeCenterOut,
WipeDownRight, WipeRight, WipeDown
]
##### OSD FONT RENDERER ########################################################
# force a string or sequence of ordinals into a unicode string
def ForceUnicode(s, charset='iso8859-15'):
if type(s) == types.UnicodeType:
return s
if type(s) == types.StringType:
return unicode(s, charset, 'ignore')
if type(s) in (types.TupleType, types.ListType):
return u''.join(map(unichr, s))
raise TypeError, "string argument not convertible to Unicode"
# search a system font path for a font file
def SearchFont(root, name):
if not os.path.isdir(root):
return None
infix = ""
fontfile = []
while (len(infix) < 10) and not(fontfile):
fontfile = filter(os.path.isfile, glob.glob(root + infix + name))
infix += "*/"
if not fontfile:
return None
else:
return fontfile[0]
# load a system font
def LoadFont(dirs, name, size):
# first try to load the font directly
try:
return ImageFont.truetype(name, size, encoding='unic')
except:
pass
# no need to search further on Windows
if os.name == 'nt':
return None
# start search for the font
for dir in dirs:
fontfile = SearchFont(dir + "/", name)
if fontfile:
try:
return ImageFont.truetype(fontfile, size, encoding='unic')
except:
pass
return None
# alignment constants
Left = 0
Right = 1
Center = 2
Down = 0
Up = 1
Auto = -1
# font renderer class
class GLFont:
def __init__(self, width, height, name, size, search_path=[], default_charset='iso8859-15', extend=1, blur=1):
self.width = width
self.height = height
self._i_extend = range(extend)
self._i_blur = range(blur)
self.feather = extend + blur + 1
self.current_x = 0
self.current_y = 0
self.max_height = 0
self.boxes = {}
self.widths = {}
self.line_height = 0
self.default_charset = default_charset
if isinstance(name, basestring):
self.font = LoadFont(search_path, name, size)
else:
for check_name in name:
self.font = LoadFont(search_path, check_name, size)
if self.font: break
if not self.font:
raise IOError, "font file not found"
self.img = Image.new('LA', (width, height))
self.alpha = Image.new('L', (width, height))
self.extend = ImageFilter.MaxFilter()
self.blur = ImageFilter.Kernel((3, 3), [1,2,1,2,4,2,1,2,1])
self.tex = gl.make_texture(gl.TEXTURE_2D, filter=gl.NEAREST)
self.AddString(range(32, 128))
self.vertices = None
self.index_buffer = None
self.index_buffer_capacity = 0
def AddCharacter(self, c):
w, h = self.font.getsize(c)
try:
ox, oy = self.font.getoffset(c)
w += ox
h += oy
except AttributeError:
pass
self.line_height = max(self.line_height, h)
size = (w + 2 * self.feather, h + 2 * self.feather)
glyph = Image.new('L', size)
draw = ImageDraw.Draw(glyph)
draw.text((self.feather, self.feather), c, font=self.font, fill=255)
del draw
box = self.AllocateGlyphBox(*size)
self.img.paste(glyph, (box.orig_x, box.orig_y))
for i in self._i_extend: glyph = glyph.filter(self.extend)
for i in self._i_blur: glyph = glyph.filter(self.blur)
self.alpha.paste(glyph, (box.orig_x, box.orig_y))
self.boxes[c] = box
self.widths[c] = w
del glyph
def AddString(self, s, charset=None, fail_silently=False):
update_count = 0
try:
for c in ForceUnicode(s, self.GetCharset(charset)):
if c in self.widths:
continue
self.AddCharacter(c)
update_count += 1
except ValueError:
if fail_silently:
pass
else:
raise
if not update_count: return
self.img.putalpha(self.alpha)
gl.load_texture(gl.TEXTURE_2D, self.tex, self.img)
def AllocateGlyphBox(self, w, h):
if self.current_x + w > self.width:
self.current_x = 0
self.current_y += self.max_height
self.max_height = 0
if self.current_y + h > self.height:
raise ValueError, "bitmap too small for all the glyphs"
box = self.GlyphBox()
box.orig_x = self.current_x
box.orig_y = self.current_y
box.size_x = w
box.size_y = h
box.x0 = self.current_x / float(self.width)
box.y0 = self.current_y / float(self.height)
box.x1 = (self.current_x + w) / float(self.width)
box.y1 = (self.current_y + h) / float(self.height)
box.dsx = w * PixelX
box.dsy = h * PixelY
self.current_x += w
self.max_height = max(self.max_height, h)
return box
def GetCharset(self, charset=None):
if charset: return charset
return self.default_charset
def SplitText(self, s, charset=None):
return ForceUnicode(s, self.GetCharset(charset)).split(u'\n')
def GetLineHeight(self):
return self.line_height
def GetTextWidth(self, s, charset=None):
return max([self.GetTextWidthEx(line) for line in self.SplitText(s, charset)])
def GetTextHeight(self, s, charset=None):
return len(self.SplitText(s, charset)) * self.line_height
def GetTextSize(self, s, charset=None):
lines = self.SplitText(s, charset)
return (max([self.GetTextWidthEx(line) for line in lines]), len(lines) * self.line_height)
def GetTextWidthEx(self, u):
if u: return sum([self.widths.get(c, 0) for c in u])
else: return 0
def GetTextHeightEx(self, u=[]):
return self.line_height
def AlignTextEx(self, x, u, align=Left):
if not align: return x
return x - (self.GetTextWidthEx(u) / align)
class FontShader(GLShader):
vs = """
attribute highp vec4 aPosAndTexCoord;
varying mediump vec2 vTexCoord;
void main() {
gl_Position = vec4(vec2(-1.0, 1.0) + aPosAndTexCoord.xy * vec2(2.0, -2.0), 0.0, 1.0);
vTexCoord = aPosAndTexCoord.zw;
}
"""
fs = """
uniform lowp sampler2D uTex;
uniform lowp vec4 uColor;
varying mediump vec2 vTexCoord;
void main() {
gl_FragColor = uColor * texture2D(uTex, vTexCoord);
}
"""
attributes = { 0: 'aPosAndTexCoord' }
uniforms = ['uColor']
def BeginDraw(self):
self.vertices = []
def EndDraw(self, color=(1.0, 1.0, 1.0), alpha=1.0, beveled=True):
if not self.vertices:
self.vertices = None
return
char_count = len(self.vertices) / 16
if char_count > 16383:
print >>sys.stderr, "Internal Error: too many characters (%d) to display in one go, truncating." % char_count
char_count = 16383
# create an index buffer large enough for the text
if not(self.index_buffer) or (self.index_buffer_capacity < char_count):
self.index_buffer_capacity = (char_count + 63) & (~63)
data = []
for b in xrange(0, self.index_buffer_capacity * 4, 4):
data.extend([b+0, b+2, b+1, b+1, b+2, b+3])
if not self.index_buffer:
self.index_buffer = gl.GenBuffers()
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, self.index_buffer)
gl.BufferData(gl.ELEMENT_ARRAY_BUFFER, data=data, type=gl.UNSIGNED_SHORT, usage=gl.DYNAMIC_DRAW)
else:
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, self.index_buffer)
# set the vertex buffer
vbuf = (c_float * len(self.vertices))(*self.vertices)
gl.BindBuffer(gl.ARRAY_BUFFER, 0)
gl.set_enabled_attribs(0)
gl.VertexAttribPointer(0, 4, gl.FLOAT, False, 0, vbuf)
# draw it
shader = self.FontShader.get_instance().use()
gl.BindTexture(gl.TEXTURE_2D, self.tex)
if beveled:
gl.BlendFunc(gl.ZERO, gl.ONE_MINUS_SRC_ALPHA)
gl.Uniform4f(shader.uColor, 0.0, 0.0, 0.0, alpha)
gl.DrawElements(gl.TRIANGLES, char_count * 6, gl.UNSIGNED_SHORT, 0)
gl.BlendFunc(gl.ONE, gl.ONE)
gl.Uniform4f(shader.uColor, color[0] * alpha, color[1] * alpha, color[2] * alpha, 1.0)
gl.DrawElements(gl.TRIANGLES, char_count * 6, gl.UNSIGNED_SHORT, 0)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
self.vertices = None
def Draw(self, origin, text, charset=None, align=Left, color=(1.0, 1.0, 1.0), alpha=1.0, beveled=True, bold=False):
own_draw = (self.vertices is None)
if own_draw:
self.BeginDraw()
lines = self.SplitText(text, charset)
x0, y = origin
x0 -= self.feather
y -= self.feather
for line in lines:
sy = y * PixelY
x = self.AlignTextEx(x0, line, align)
for c in line:
if not c in self.widths: continue
self.boxes[c].add_vertices(self.vertices, x * PixelX, sy)
x += self.widths[c]
y += self.line_height
if bold and not(beveled):
self.Draw((origin[0] + 1, origin[1]), text, charset=charset, align=align, color=color, alpha=alpha, beveled=False, bold=False)
if own_draw:
self.EndDraw(color, alpha, beveled)
class GlyphBox:
def add_vertices(self, vertex_list, sx=0.0, sy=0.0):
vertex_list.extend([
sx, sy, self.x0, self.y0,
sx + self.dsx, sy, self.x1, self.y0,
sx, sy + self.dsy, self.x0, self.y1,
sx + self.dsx, sy + self.dsy, self.x1, self.y1,
])
# high-level draw function
def DrawOSD(x, y, text, halign=Auto, valign=Auto, alpha=1.0):
if not(OSDFont) or not(text) or (alpha <= 0.004): return
if alpha > 1.0: alpha = 1.0
if halign == Auto:
if x < 0:
x += ScreenWidth
halign = Right
else:
halign = Left
if HalfScreen and (halign == Left):
x += ScreenWidth / 2
if valign == Auto:
if y < 0:
y += ScreenHeight
valign = Up
else:
valign = Down
if valign != Down:
y -= OSDFont.GetLineHeight() / valign
OSDFont.Draw((x, y), text, align=halign, alpha=alpha)
# very high-level draw function
def DrawOSDEx(position, text, alpha_factor=1.0):
xpos = position >> 1
y = (1 - 2 * (position & 1)) * OSDMargin
if xpos < 2:
x = (1 - 2 * xpos) * OSDMargin
halign = Auto
else:
x = ScreenWidth / 2
halign = Center
DrawOSD(x, y, text, halign, alpha = OSDAlpha * alpha_factor)
RequiredShaders.append(GLFont.FontShader)
##### PDF PARSER ###############################################################
class PDFError(Exception):
pass
class PDFref:
def __init__(self, ref):
self.ref = ref
def __repr__(self):
return "PDFref(%d)" % self.ref
re_pdfstring = re.compile(r'\(\)|\(.*?[^\\]\)')
pdfstringrepl = [("\\"+x[0], x[1:]) for x in "(( )) n\n r\r t\t".split(" ")]
def pdf_maskstring(s):
s = s[1:-1]
for a, b in pdfstringrepl:
s = s.replace(a, b)
return " <" + "".join(["%02X"%ord(c) for c in s]) + "> "
def pdf_mask_all_strings(s):
return re_pdfstring.sub(lambda x: pdf_maskstring(x.group(0)), s)
def pdf_unmaskstring(s):
return "".join([chr(int(s[i:i+2], 16)) for i in xrange(1, len(s)-1, 2)])
class PDFParser:
def __init__(self, filename):
self.f = file(filename, "rb")
self.errors = 0
# find the first cross-reference table
self.f.seek(0, 2)
filesize = self.f.tell()
self.f.seek(filesize - 128)
trailer = self.f.read()
i = trailer.rfind("startxref")
if i < 0:
raise PDFError, "cross-reference table offset missing"
try:
offset = int(trailer[i:].split("\n")[1].strip())
except (IndexError, ValueError):
raise PDFError, "malformed cross-reference table offset"
# follow the trailer chain
self.xref = {}
while offset:
newxref = self.xref
self.xref, rootref, offset = self.parse_trailer(offset)
self.xref.update(newxref)
# scan the page and names tree
self.obj2page = {}
self.page2obj = {}
self.annots = {}
self.page_count = 0
self.box = {}
self.names = {}
self.rotate = {}
root = self.getobj(rootref, 'Catalog')
try:
self.scan_page_tree(root['Pages'].ref)
except KeyError:
raise PDFError, "root page tree node missing"
try:
self.scan_names_tree(root['Names'].ref)
except KeyError:
pass
def getline(self):
while True:
line = self.f.readline().strip()
if line: return line
def find_length(self, tokens, begin, end):
level = 1
for i in xrange(1, len(tokens)):
if tokens[i] == begin: level += 1
if tokens[i] == end: level -= 1
if not level: break
return i + 1
def parse_tokens(self, tokens, want_list=False):
res = []
while tokens:
t = tokens[0]
v = t
tlen = 1
if (len(tokens) >= 3) and (tokens[2] == 'R'):
v = PDFref(int(t))
tlen = 3
elif t == "<<":
tlen = self.find_length(tokens, "<<", ">>")
v = self.parse_tokens(tokens[1 : tlen - 1], True)
v = dict(zip(v[::2], v[1::2]))
elif t == "[":
tlen = self.find_length(tokens, "[", "]")
v = self.parse_tokens(tokens[1 : tlen - 1], True)
elif not(t) or (t[0] == "null"):
v = None
elif (t[0] == '<') and (t[-1] == '>'):
v = pdf_unmaskstring(t)
elif t[0] == '/':
v = t[1:]
elif t == 'null':
v = None
else:
try:
v = float(t)
v = int(t)
except ValueError:
pass
res.append(v)
del tokens[:tlen]
if want_list:
return res
if not res:
return None
if len(res) == 1:
return res[0]
return res
def parse(self, data):
data = pdf_mask_all_strings(data)
data = data.replace("<<", " << ").replace("[", " [ ").replace("(", " (")
data = data.replace(">>", " >> ").replace("]", " ] ").replace(")", ") ")
data = data.replace("/", " /").replace("><", "> <")
return self.parse_tokens(filter(None, data.split()))
def getobj(self, obj, force_type=None):
if isinstance(obj, PDFref):
obj = obj.ref
if type(obj) != types.IntType:
raise PDFError, "object is not a valid reference"
offset = self.xref.get(obj, 0)
if not offset:
raise PDFError, "referenced non-existing PDF object"
self.f.seek(offset)
header = self.getline().split(None, 3)
if (len(header) < 3) or (header[2] != "obj") or (header[0] != str(obj)):
raise PDFError, "object does not start where it's supposed to"
if len(header) == 4:
data = [header[3]]
else:
data = []
while True:
line = self.getline()
if line in ("endobj", "stream"): break
data.append(line)
data = self.parse(" ".join(data))
if force_type:
try:
t = data['Type']
except (KeyError, IndexError, ValueError):
t = None
if t != force_type:
raise PDFError, "object does not match the intended type"
return data
def parse_xref_section(self, start, count):
xref = {}
for obj in xrange(start, start + count):
line = self.getline()
if line[-1] == 'f':
xref[obj] = 0
else:
xref[obj] = int(line[:10], 10)
return xref
def parse_trailer(self, offset):
self.f.seek(offset)
xref = {}
rootref = 0
offset = 0
if self.getline() != "xref":
raise PDFError, "cross-reference table does not start where it's supposed to"
return (xref, rootref, offset) # no xref table found, abort
# parse xref sections
while True:
line = self.getline()
if line == "trailer": break
start, count = map(int, line.split())
xref.update(self.parse_xref_section(start, count))
# parse trailer
trailer = ""
while True:
line = self.getline()
if line in ("startxref", "%%EOF"): break
trailer += line
trailer = self.parse(trailer)
try:
rootref = trailer['Root'].ref
except KeyError:
raise PDFError, "root catalog entry missing"
except AttributeError:
raise PDFError, "root catalog entry is not a reference"
return (xref, rootref, trailer.get('Prev', 0))
def scan_page_tree(self, obj, mbox=None, cbox=None, rotate=0):
try:
node = self.getobj(obj)
if node['Type'] == 'Pages':
for kid in node['Kids']:
self.scan_page_tree(kid.ref, \
node.get('MediaBox', mbox), \
node.get('CropBox', cbox), \
node.get('Rotate', 0))
else:
page = self.page_count + 1
anode = node.get('Annots', [])
if anode.__class__ == PDFref:
anode = self.getobj(anode.ref)
self.page_count = page
self.obj2page[obj] = page
self.page2obj[page] = obj
self.box[page] = node.get('CropBox', cbox) or node.get('MediaBox', mbox)
self.rotate[page] = node.get('Rotate', rotate)
self.annots[page] = [a.ref for a in anode]
except (KeyError, TypeError, ValueError):
self.errors += 1
def scan_names_tree(self, obj, come_from=None, name=None):
try:
node = self.getobj(obj)
# if we came from the root node, proceed to Dests
if not come_from:
for entry in ('Dests', ):
if entry in node:
self.scan_names_tree(node[entry], entry)
elif come_from == 'Dests':
if 'Kids' in node:
for kid in node['Kids']:
self.scan_names_tree(kid, come_from)
elif 'Names' in node:
nlist = node['Names']
while (len(nlist) >= 2) \
and (type(nlist[0]) == types.StringType) \
and (nlist[1].__class__ == PDFref):
self.scan_names_tree(nlist[1], come_from, nlist[0])
del nlist[:2]
elif name and ('D' in node):
page = self.dest2page(node['D'])
if page:
self.names[name] = page
# else: unsupported node, don't care
except PDFError:
self.errors += 1
def dest2page(self, dest):
if type(dest) in (types.StringType, types.UnicodeType):
return self.names.get(dest, None)
if type(dest) != types.ListType:
return dest
elif dest[0].__class__ == PDFref:
return self.obj2page.get(dest[0].ref, None)
else:
return dest[0]
def get_href(self, obj):
try:
node = self.getobj(obj, 'Annot')
if node['Subtype'] != 'Link': return None
dest = None
if 'Dest' in node:
dest = self.dest2page(node['Dest'])
elif 'A' in node:
a = node['A']
if isinstance(a, PDFref):
a = self.getobj(a)
action = a['S']
if action == 'URI':
dest = a.get('URI', None)
for prefix in ("file://", "file:", "run://", "run:"):
if dest.startswith(prefix):
dest = dest[len(prefix):]
break
elif action == 'Launch':
dest = a.get('F', None)
elif action == 'GoTo':
dest = self.dest2page(a.get('D', None))
if dest:
return tuple(node['Rect'] + [dest])
except PDFError:
self.errors += 1
def GetHyperlinks(self):
res = {}
for page in self.annots:
try:
a = filter(None, map(self.get_href, self.annots[page]))
except (PDFError, TypeError, ValueError):
self.errors += 1
a = None
if a: res[page] = a
return res
def rotate_coord(x, y, rot):
if rot == 1: x, y = 1.0 - y, x
elif rot == 2: x, y = 1.0 - x, 1.0 - y
elif rot == 3: x, y = y, 1.0 - x
return (x, y)
def AddHyperlink(page_offset, page, target, linkbox, pagebox, rotate):
page += page_offset
if type(target) == types.IntType:
target += page_offset
# compute relative position of the link on the page
w = 1.0 / (pagebox[2] - pagebox[0])
h = 1.0 / (pagebox[3] - pagebox[1])
x0 = (linkbox[0] - pagebox[0]) * w
y0 = (pagebox[3] - linkbox[3]) * h
x1 = (linkbox[2] - pagebox[0]) * w
y1 = (pagebox[3] - linkbox[1]) * h
# get effective rotation
rotate /= 90
page_rot = GetPageProp(page, 'rotate')
if page_rot is None:
page_rot = Rotation
if page_rot:
rotate += page_rot
while rotate < 0:
rotate += 1000000
rotate &= 3
# rotate the rectangle
x0, y0 = rotate_coord(x0, y0, rotate)
x1, y1 = rotate_coord(x1, y1, rotate)
if x0 > x1: x0, x1 = x1, x0
if y0 > y1: y0, y1 = y1, y0
# save the hyperlink
href = (0, target, x0, y0, x1, y1)
if GetPageProp(page, '_href'):
PageProps[page]['_href'].append(href)
else:
SetPageProp(page, '_href', [href])
def FixHyperlinks(page):
if not(GetPageProp(page, '_box')) or not(GetPageProp(page, '_href')):
return # no hyperlinks or unknown page size
bx0, by0, bx1, by1 = GetPageProp(page, '_box')
bdx = bx1 - bx0
bdy = by1 - by0
href = []
for fixed, target, x0, y0, x1, y1 in GetPageProp(page, '_href'):
if fixed:
href.append((1, target, x0, y0, x1, y1))
else:
href.append((1, target, \
int(bx0 + bdx * x0), int(by0 + bdy * y0), \
int(bx0 + bdx * x1), int(by0 + bdy * y1)))
SetPageProp(page, '_href', href)
def ParsePDF(filename):
try:
assert 0 == subprocess.Popen([pdftkPath, filename, "output", TempFileName + ".pdf", "uncompress"]).wait()
except OSError:
print >>sys.stderr, "Note: pdftk not found, hyperlinks disabled."
return
except AssertionError:
print >>sys.stderr, "Note: pdftk failed, hyperlinks disabled."
return
count = 0
try:
try:
pdf = PDFParser(TempFileName + ".pdf")
for page, annots in pdf.GetHyperlinks().iteritems():
for page_offset in FileProps[filename]['offsets']:
for a in annots:
AddHyperlink(page_offset, page, a[4], a[:4], pdf.box[page], pdf.rotate[page])
count += len(annots)
FixHyperlinks(page)
if pdf.errors:
print >>sys.stderr, "Note: there are errors in the PDF file, hyperlinks might not work properly"
del pdf
return count
except IOError:
print >>sys.stderr, "Note: file produced by pdftk not readable, hyperlinks disabled."
except PDFError, e:
print >>sys.stderr, "Note: error in PDF file, hyperlinks disabled."
print >>sys.stderr, " PDF parser error message:", e
finally:
try:
os.remove(TempFileName + ".pdf")
except OSError:
pass
##### PAGE CACHE MANAGEMENT ####################################################
# helper class that allows PIL to write and read image files with an offset
class IOWrapper:
def __init__(self, f, offset=0):
self.f = f
self.offset = offset
self.f.seek(offset)
def read(self, count=None):
if count is None:
return self.f.read()
else:
return self.f.read(count)
def write(self, data):
self.f.write(data)
def seek(self, pos, whence=0):
assert(whence in (0, 1))
if whence:
self.f.seek(pos, 1)
else:
self.f.seek(pos + self.offset)
def tell(self):
return self.f.tell() - self.offset
# generate a "magic number" that is used to identify persistent cache files
def UpdateCacheMagic():
global CacheMagic
pool = [PageCount, ScreenWidth, ScreenHeight, b2s(Scaling), b2s(Supersample), b2s(Rotation)]
flist = list(FileProps.keys())
flist.sort(lambda a,b: cmp(a.lower(), b.lower()))
for f in flist:
pool.append(f)
pool.extend(list(GetFileProp(f, 'stat', [])))
CacheMagic = md5obj("\0".join(map(str, pool))).hexdigest()
# set the persistent cache file position to the current end of the file
def UpdatePCachePos():
global CacheFilePos
CacheFile.seek(0, 2)
CacheFilePos = CacheFile.tell()
# rewrite the header of the persistent cache
def WritePCacheHeader(reset=False):
pages = ["%08x" % PageCache.get(page, 0) for page in range(1, PageCount+1)]
CacheFile.seek(0)
CacheFile.write(CacheMagic + "".join(pages))
if reset:
CacheFile.truncate()
UpdatePCachePos()
# return an image from the persistent cache or None if none is available
def GetPCacheImage(page):
if CacheMode != PersistentCache:
return # not applicable if persistent cache isn't used
Lcache.acquire()
try:
if page in PageCache:
img = Image.open(IOWrapper(CacheFile, PageCache[page]))
img.load()
return img
finally:
Lcache.release()
# returns an image from the non-persistent cache or None if none is available
def GetCacheImage(page):
if CacheMode in (NoCache, PersistentCache):
return # not applicable in uncached or persistent-cache mode
Lcache.acquire()
try:
if page in PageCache:
if CacheMode == FileCache:
CacheFile.seek(PageCache[page])
return CacheFile.read(TexSize)
elif CacheMode == CompressedCache:
return zlib.decompress(PageCache[page])
else:
return PageCache[page]
finally:
Lcache.release()
# adds an image to the persistent cache
def AddToPCache(page, img):
if CacheMode != PersistentCache:
return # not applicable if persistent cache isn't used
Lcache.acquire()
try:
if page in PageCache:
return # page is already cached and we can't update it safely
# -> stop here (the new image will be identical to the old
# one anyway)
img.save(IOWrapper(CacheFile, CacheFilePos), "ppm")
PageCache[page] = CacheFilePos
WritePCacheHeader()
finally:
Lcache.release()
# adds an image to the non-persistent cache
def AddToCache(page, data):
global CacheFilePos
if CacheMode in (NoCache, PersistentCache):
return # not applicable in uncached or persistent-cache mode
Lcache.acquire()
try:
if CacheMode == FileCache:
if not(page in PageCache):
PageCache[page] = CacheFilePos
CacheFilePos += len(data)
CacheFile.seek(PageCache[page])
CacheFile.write(data)
elif CacheMode == CompressedCache:
PageCache[page] = zlib.compress(data, 1)
else:
PageCache[page] = data
finally:
Lcache.release()
# invalidates the whole cache
def InvalidateCache():
global PageCache, CacheFilePos
Lcache.acquire()
try:
PageCache = {}
if CacheMode == PersistentCache:
UpdateCacheMagic()
WritePCacheHeader(True)
else:
CacheFilePos = 0
finally:
Lcache.release()
# initialize the persistent cache
def InitPCache():
global CacheFile, CacheMode
# try to open the pre-existing cache file
try:
CacheFile = file(CacheFileName, "rb+")
except IOError:
CacheFile = None
# check the cache magic
UpdateCacheMagic()
if CacheFile and (CacheFile.read(32) != CacheMagic):
print >>sys.stderr, "Cache file mismatch, recreating cache."
CacheFile.close()
CacheFile = None
if CacheFile:
# if the magic was valid, import cache data
print >>sys.stderr, "Using already existing persistent cache file."
for page in range(1, PageCount+1):
offset = int(CacheFile.read(8), 16)
if offset:
PageCache[page] = offset
UpdatePCachePos()
else:
# if the magic was invalid or the file didn't exist, (re-)create it
try:
CacheFile = file(CacheFileName, "wb+")
except IOError:
print >>sys.stderr, "Error: cannot write the persistent cache file (`%s')" % CacheFileName
print >>sys.stderr, "Falling back to temporary file cache."
CacheMode = FileCache
WritePCacheHeader()
##### PAGE RENDERING ###########################################################
class RenderError(RuntimeError):
pass
class RendererUnavailable(RenderError):
pass
class PDFRendererBase(object):
name = None
binaries = []
test_run_args = []
supports_anamorphic = False
required_options = []
@classmethod
def supports(self, binary):
if not binary:
return True
binary = os.path.basename(binary).lower()
if binary.endswith(".exe"):
binary = binary[:-4]
return (binary in self.binaries)
def __init__(self, binary=None):
# search for a working binary and run it to get a list of its options
self.binary = None
for test_binary in ([binary] if binary else self.binaries):
test_binary = FindBinary(test_binary)
try:
p = subprocess.Popen([test_binary] + self.test_run_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
data = p.stdout.read()
p.wait()
except OSError:
continue
self.binary = test_binary
break
if not self.binary:
raise RendererUnavailable("program not found")
# parse the output into an option list
data = [line.strip().replace('\t', ' ') for line in data.split('\n')]
self.options = set([line.split(' ', 1)[0].split('=', 1)[0].strip('-,') for line in data if line.startswith('-')])
if not(set(self.required_options) <= self.options):
raise RendererUnavailable("%s does not support all required options" % os.path.basename(self.binary))
def render(self, filename, page, res, antialias=True):
raise RenderError()
def execute(self, args, wait=True):
args = [self.binary] + args
if get_thread_id() == RTrunning:
args = Nice + args
try:
process = subprocess.Popen(args)
if not wait:
return process
if process.wait() != 0:
raise RenderError("rendering failed")
except OSError, e:
raise RenderError("could not start renderer - %s" % e)
def load(self, imgfile, autoremove=False):
try:
img = Image.open(imgfile)
img.load()
except (KeyboardInterrupt, SystemExit):
raise
except IOError, e:
raise RenderError("could not read image file - %s" % e)
if autoremove:
self.remove(imgfile)
return img
def remove(self, tmpfile):
try:
os.unlink(tmpfile)
except OSError:
pass
class MuPDFRenderer(PDFRendererBase):
name = "MuPDF"
binaries = ["mudraw", "pdfdraw"]
test_run_args = []
required_options = ["o", "r", "b"]
# helper object for communication with the reader thread
class ThreadComm(object):
def __init__(self, imgfile):
self.imgfile = imgfile
self.buffer = None
self.error = None
self.cancel = False
def getbuffer(self):
if self.buffer:
return self.buffer
# the reader thread might still be busy reading the last
# chunks of the data and converting them into a StringIO;
# let's give it some time
maxwait = time.time() + (0.1 if self.error else 0.5)
while not(self.buffer) and (time.time() < maxwait):
time.sleep(0.01)
return self.buffer
@staticmethod
def ReaderThread(comm):
try:
f = open(comm.imgfile, 'rb')
comm.buffer = cStringIO.StringIO(f.read())
f.close()
except IOError, e:
comm.error = "could not open FIFO for reading - %s" % e
def render(self, filename, page, res, antialias=True):
imgfile = TempFileName + ".ppm"
fifo = False
if HaveThreads:
self.remove(imgfile)
try:
os.mkfifo(imgfile)
fifo = True
comm = self.ThreadComm(imgfile)
thread.start_new_thread(self.ReaderThread, (comm, ))
except (OSError, IOError, AttributeError):
pass
if not antialias:
aa_opts = ["-b", "0"]
else:
aa_opts = []
try:
self.execute([
"-o", imgfile,
"-r", str(res[0]),
] + aa_opts + [
filename,
str(page)
])
if fifo:
if comm.error:
raise RenderError(comm.error)
if not comm.getbuffer():
raise RenderError("could not read from FIFO")
return self.load(comm.buffer, autoremove=False)
else:
return self.load(imgfile)
finally:
if fifo:
comm.error = True
if not comm.getbuffer():
# if rendering failed and the client process didn't write
# to the FIFO at all, the reader thread would block in
# read() forever; so let's open+close the FIFO to
# generate an EOF and thus wake the thead up
try:
f = open(imgfile, "w")
f.close()
except IOError:
pass
self.remove(imgfile)
AvailableRenderers.append(MuPDFRenderer)
class XpdfRenderer(PDFRendererBase):
name = "Xpdf/Poppler"
binaries = ["pdftoppm"]
test_run_args = ["-h"]
required_options = ["q", "f", "l", "r"]
def __init__(self, binary=None):
PDFRendererBase.__init__(self, binary)
self.supports_anamorphic = ('rx' in self.options) and ('ry' in self.options)
def render(self, filename, page, res, antialias=True):
if self.supports_anamorphic:
args = ["-rx", str(res[0]), "-ry", str(res[1])]
else:
args = ["-r", str(res[0])]
if not antialias:
for arg in ("aa", "aaVector"):
if arg in self.options:
args += ['-'+arg, 'no']
self.execute([
"-q",
"-f", str(page),
"-l", str(page)
] + args + [
filename,
TempFileName
])
digits = GetFileProp(filename, 'digits', 6)
try_digits = range(6, 0, -1)
try_digits.sort(key=lambda n: abs(n - digits))
try_digits = [(n, TempFileName + ("-%%0%dd.ppm" % n) % page) for n in try_digits]
for digits, imgfile in try_digits:
if not os.path.exists(imgfile):
continue
SetFileProp(filename, 'digits', digits)
return self.load(imgfile, autoremove=True)
raise RenderError("could not find generated image file")
AvailableRenderers.append(XpdfRenderer)
class GhostScriptRenderer(PDFRendererBase):
name = "GhostScript"
binaries = ["gs", "gswin32c"]
test_run_args = ["--version"]
supports_anamorphic = True
def render(self, filename, page, res, antialias=True):
imgfile = TempFileName + ".tif"
aa_bits = (4 if antialias else 1)
try:
self.execute(["-q"] + GhostScriptPlatformOptions + [
"-dBATCH", "-dNOPAUSE",
"-sDEVICE=tiff24nc",
"-dUseCropBox",
"-sOutputFile=" + imgfile,
"-dFirstPage=%d" % page,
"-dLastPage=%d" % page,
"-r%dx%d" % res,
"-dTextAlphaBits=%d" % aa_bits,
"-dGraphicsAlphaBits=%s" % aa_bits,
filename
])
return self.load(imgfile)
finally:
self.remove(imgfile)
AvailableRenderers.append(GhostScriptRenderer)
def InitPDFRenderer():
global PDFRenderer
if PDFRenderer:
return PDFRenderer
fail_reasons = []
for r_class in AvailableRenderers:
if not r_class.supports(PDFRendererPath):
continue
try:
PDFRenderer = r_class(PDFRendererPath)
print >>sys.stderr, "PDF renderer:", PDFRenderer.name
return PDFRenderer
except RendererUnavailable, e:
if Verbose:
print >>sys.stderr, "Not using %s for PDF rendering:" % r_class.name, e
else:
fail_reasons.append((r_class.name, str(e)))
print >>sys.stderr, "ERROR: PDF renderer initialization failed."
for item in fail_reasons:
print >>sys.stderr, " - %s: %s" % item
print >>sys.stderr, " Display of PDF files will not be supported."
# generate a dummy image
def DummyPage():
img = Image.new('RGB', (ScreenWidth, ScreenHeight))
img.paste(LogoImage, ((ScreenWidth - LogoImage.size[0]) / 2,
(ScreenHeight - LogoImage.size[1]) / 2))
return img
# load a page from a PDF file
def RenderPDF(page, MayAdjustResolution, ZoomMode):
if not PDFRenderer:
return DummyPage()
# load props
SourceFile = GetPageProp(page, '_file')
RealPage = GetPageProp(page, '_page')
OutputSizes = GetPageProp(page, '_out')
if not OutputSizes:
OutputSizes = GetFileProp(SourceFile, 'out', [(ScreenWidth + Overscan, ScreenHeight + Overscan), (ScreenWidth + Overscan, ScreenHeight + Overscan)])
SetPageProp(page, '_out', OutputSizes)
Resolutions = GetPageProp(page, '_res')
if not Resolutions:
Resolutions = GetFileProp(SourceFile, 'res', [(72.0, 72.0), (72.0, 72.0)])
SetPageProp(page, '_res', Resolutions)
rot = GetPageProp(page, 'rotate', Rotation)
out = OutputSizes[rot & 1]
res = Resolutions[rot & 1]
zscale = 1
# handle supersample and zoom mode
use_aa = True
if ZoomMode:
res = (ZoomFactor * res[0], ZoomFactor * res[1])
out = (ZoomFactor * out[0], ZoomFactor * out[1])
zscale = ZoomFactor
elif Supersample:
res = (Supersample * res[0], Supersample * res[1])
out = (Supersample * out[0], Supersample * out[1])
use_aa = False
# prepare the renderer options
if PDFRenderer.supports_anamorphic:
parscale = False
useres = (int(res[0] + 0.5), int(res[1] + 0.5))
else:
parscale = (abs(1.0 - PAR) > 0.01)
useres = max(res[0], res[1])
res = (useres, useres)
useres = int(useres + 0.5)
useres = (useres, useres)
# call the renderer
try:
img = PDFRenderer.render(SourceFile, RealPage, useres, use_aa)
except RenderError, e:
print >>sys.stderr, "ERROR: failed to render page %d:" % page, e
return DummyPage()
# apply rotation
if rot: img = img.rotate(90 * (4 - rot))
# compute final output image size based on PAR
if not parscale:
got = img.size
elif PAR > 1.0:
got = (int(img.size[0] / PAR + 0.5), img.size[1])
else:
got = (img.size[0], int(img.size[1] * PAR + 0.5))
# if the image size is strange, re-adjust the rendering resolution
tolerance = max(4, (ScreenWidth + ScreenHeight) / 400)
if MayAdjustResolution and (max(abs(got[0] - out[0]), abs(got[1] - out[1])) >= tolerance):
newout = ZoomToFit((img.size[0], img.size[1] * PAR))
rscale = (float(newout[0]) / img.size[0], float(newout[1]) / img.size[1])
if rot & 1:
newres = (res[0] * rscale[1], res[1] * rscale[0])
else:
newres = (res[0] * rscale[0], res[1] * rscale[1])
# only modify anything if the resolution deviation is large enough
if max(abs(1.0 - newres[0] / res[0]), abs(1.0 - newres[1] / res[1])) > 0.05:
# create a copy of the old values: they are lists and thus stored
# in the PageProps as references; we don't want to influence other
# pages though
OutputSizes = OutputSizes[:]
Resolutions = Resolutions[:]
# modify the appropriate rotation slot
OutputSizes[rot & 1] = newout
Resolutions[rot & 1] = newres
# store the new values for this page ...
SetPageProp(page, '_out', OutputSizes)
SetPageProp(page, '_res', Resolutions)
# ... and as a default for the file as well (future pages are likely
# to have the same resolution)
SetFileProp(SourceFile, 'out', OutputSizes)
SetFileProp(SourceFile, 'res', Resolutions)
return RenderPDF(page, False, ZoomMode)
# downsample a supersampled image
if Supersample and not(ZoomMode):
img = img.resize((int(float(out[0]) / Supersample + 0.5),
int(float(out[1]) / Supersample + 0.5)), Image.ANTIALIAS)
parscale = False # don't scale again
# perform PAR scaling (required for pdftoppm which doesn't support different
# dpi for horizontal and vertical)
if parscale:
if PAR > 1.0:
img = img.resize((int(img.size[0] / PAR + 0.5), img.size[1]), Image.ANTIALIAS)
else:
img = img.resize((img.size[0], int(img.size[1] * PAR + 0.5)), Image.ANTIALIAS)
# crop the overscan (if present)
if Overscan:
target = (ScreenWidth * zscale, ScreenHeight * zscale)
scale = None
if (img.size[1] > target[1]) and (img.size[0] < target[0]):
scale = float(target[1]) / img.size[1]
elif (img.size[0] > target[0]) and (img.size[1] < target[1]):
scale = float(target[0]) / img.size[0]
if scale:
w = int(img.size[0] * scale + 0.5)
h = int(img.size[1] * scale + 0.5)
if (w <= img.size[0]) and (h <= img.size[1]):
x0 = (img.size[0] - w) / 2
y0 = (img.size[1] - h) / 2
img = img.crop((x0, y0, x0 + w, y0 + h))
return img
# load a page from an image file
def LoadImage(page, ZoomMode):
# open the image file with PIL
try:
img = Image.open(GetPageProp(page, '_file'))
img.load()
except (KeyboardInterrupt, SystemExit):
raise
except:
print >>sys.stderr, "Image file `%s' is broken." % GetPageProp(page, '_file')
return DummyPage()
# apply rotation
rot = GetPageProp(page, 'rotate')
if rot is None:
rot = Rotation
if rot:
img = img.rotate(90 * (4 - rot))
# determine destination size
newsize = ZoomToFit((img.size[0], int(img.size[1] * PAR + 0.5)),
(ScreenWidth, ScreenHeight))
# don't scale if the source size is too close to the destination size
if abs(newsize[0] - img.size[0]) < 2: newsize = img.size
# don't scale if the source is smaller than the destination
if not(Scaling) and (newsize > img.size): newsize = img.size
# zoom up (if wanted)
if ZoomMode: newsize = (2 * newsize[0], 2 * newsize[1])
# skip processing if there was no change
if newsize == img.size: return img
# select a nice filter and resize the image
if newsize > img.size:
filter = Image.BICUBIC
else:
filter = Image.ANTIALIAS
return img.resize(newsize, filter)
# render a page to an OpenGL texture
def PageImage(page, ZoomMode=False, RenderMode=False):
global OverviewNeedUpdate, HighQualityOverview
EnableCacheRead = not(ZoomMode or RenderMode)
EnableCacheWrite = EnableCacheRead and \
(page >= PageRangeStart) and (page <= PageRangeEnd)
# check for the image in the cache
if EnableCacheRead:
data = GetCacheImage(page)
if data: return data
# if it's not in the temporary cache, render it
Lrender.acquire()
try:
# retrieve the image from the persistent cache or fully re-render it
if EnableCacheRead:
img = GetPCacheImage(page)
else:
img = None
if not img:
if GetPageProp(page, '_page'):
img = RenderPDF(page, not(ZoomMode), ZoomMode)
else:
img = LoadImage(page, ZoomMode)
if GetPageProp(page, 'invert', InvertPages):
img = ImageChops.invert(img)
if EnableCacheWrite:
AddToPCache(page, img)
# create black background image to paste real image onto
if ZoomMode:
TextureImage = Image.new('RGB', (ZoomFactor * TexWidth, ZoomFactor * TexHeight))
TextureImage.paste(img, ((ZoomFactor * ScreenWidth - img.size[0]) / 2, \
(ZoomFactor * ScreenHeight - img.size[1]) / 2))
else:
TextureImage = Image.new('RGB', (TexWidth, TexHeight))
x0 = (ScreenWidth - img.size[0]) / 2
y0 = (ScreenHeight - img.size[1]) / 2
TextureImage.paste(img, (x0, y0))
SetPageProp(page, '_box', (x0, y0, x0 + img.size[0], y0 + img.size[1]))
FixHyperlinks(page)
# paste thumbnail into overview image
if GetPageProp(page, ('overview', '_overview'), True) \
and (page >= PageRangeStart) and (page <= PageRangeEnd) \
and not(GetPageProp(page, '_overview_rendered')) \
and not(RenderMode):
pos = OverviewPos(OverviewPageMapInv[page])
Loverview.acquire()
try:
# first, fill the underlying area with black (i.e. remove the dummy logo)
blackness = Image.new('RGB', (OverviewCellX - OverviewBorder, \
OverviewCellY - OverviewBorder))
OverviewImage.paste(blackness, (pos[0] + OverviewBorder / 2, \
pos[1] + OverviewBorder))
del blackness
# then, scale down the original image and paste it
if HalfScreen:
img = img.crop((0, 0, img.size[0] / 2, img.size[1]))
sx = OverviewCellX - 2 * OverviewBorder
sy = OverviewCellY - 2 * OverviewBorder
if HighQualityOverview:
t0 = time.time()
img.thumbnail((sx, sy), Image.ANTIALIAS)
if (time.time() - t0) > 0.5:
print >>sys.stderr, "Note: Your system seems to be quite slow; falling back to a faster,"
print >>sys.stderr, " but slightly lower-quality overview page rendering mode"
HighQualityOverview = False
else:
img.thumbnail((sx * 2, sy * 2), Image.NEAREST)
img.thumbnail((sx, sy), Image.BILINEAR)
OverviewImage.paste(img, \
(pos[0] + (OverviewCellX - img.size[0]) / 2, \
pos[1] + (OverviewCellY - img.size[1]) / 2))
finally:
Loverview.release()
SetPageProp(page, '_overview_rendered', True)
OverviewNeedUpdate = True
del img
# return texture data
if RenderMode:
return TextureImage
data = TextureImage.tostring()
del TextureImage
finally:
Lrender.release()
# finally add it back into the cache and return it
if EnableCacheWrite:
AddToCache(page, data)
return data
# render a page to an OpenGL texture
def RenderPage(page, target):
gl.BindTexture(gl.TEXTURE_2D, target)
while gl.GetError():
pass # clear all OpenGL errors
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGB, TexWidth, TexHeight, 0, gl.RGB, gl.UNSIGNED_BYTE, PageImage(page))
if gl.GetError():
print >>sys.stderr, "I'm sorry, but your graphics card is not capable of rendering presentations"
print >>sys.stderr, "in this resolution. Either the texture memory is exhausted, or there is no"
print >>sys.stderr, "support for large textures (%dx%d). Please try to run Impressive in a" % (TexWidth, TexHeight)
print >>sys.stderr, "smaller resolution using the -g command-line option."
sys.exit(1)
# background rendering thread
def RenderThread(p1, p2):
global RTrunning, RTrestart
RTrunning = get_thread_id() or True
RTrestart = True
while RTrestart:
RTrestart = False
for pdf in FileProps:
if not pdf.lower().endswith(".pdf"): continue
if RTrestart: break
SafeCall(ParsePDF, [pdf])
if RTrestart: continue
for page in xrange(1, PageCount + 1):
if RTrestart: break
if (page != p1) and (page != p2) \
and (page >= PageRangeStart) and (page <= PageRangeEnd):
SafeCall(PageImage, [page])
RTrunning = False
if CacheMode >= FileCache:
print >>sys.stderr, "Background rendering finished, used %.1f MiB of disk space." %\
(CacheFilePos / 1048576.0)
elif CacheMode >= MemCache:
print >>sys.stderr, "Background rendering finished, using %.1f MiB of memory." %\
(sum(map(len, PageCache.itervalues())) / 1048576.0)
##### RENDER MODE ##############################################################
def DoRender():
global TexWidth, TexHeight
TexWidth = ScreenWidth
TexHeight = ScreenHeight
if os.path.exists(RenderToDirectory):
print >>sys.stderr, "Destination directory `%s' already exists," % RenderToDirectory
print >>sys.stderr, "refusing to overwrite anything."
return 1
try:
os.mkdir(RenderToDirectory)
except OSError, e:
print >>sys.stderr, "Cannot create destination directory `%s':" % RenderToDirectory
print >>sys.stderr, e.strerror
return 1
print >>sys.stderr, "Rendering presentation into `%s'" % RenderToDirectory
for page in xrange(1, PageCount + 1):
PageImage(page, RenderMode=True).save("%s/page%04d.png" % (RenderToDirectory, page))
sys.stdout.write("[%d] " % page)
sys.stdout.flush()
print >>sys.stderr
print >>sys.stderr, "Done."
return 0
##### INFO SCRIPT I/O ##########################################################
# info script reader
def LoadInfoScript():
global PageProps
try:
os.chdir(os.path.dirname(InfoScriptPath) or BaseWorkingDir)
except OSError:
pass
OldPageProps = PageProps
try:
execfile(InfoScriptPath, globals())
except IOError:
pass
except:
print >>sys.stderr, "----- Exception in info script ----"
traceback.print_exc(file=sys.stderr)
print >>sys.stderr, "----- End of traceback -----"
NewPageProps = PageProps
PageProps = OldPageProps
del OldPageProps
for page in NewPageProps:
for prop in NewPageProps[page]:
SetPageProp(page, prop, NewPageProps[page][prop])
del NewPageProps
# we can't save lambda expressions, so we need to warn the user
# in every possible way
ScriptTainted = False
LambdaWarning = False
def here_was_a_lambda_expression_that_could_not_be_saved():
global LambdaWarning
if not LambdaWarning:
print >>sys.stderr, "WARNING: The info script for the current file contained lambda expressions that"
print >>sys.stderr, " were removed during the a save operation."
LambdaWarning = True
# "clean" a PageProps entry so that only 'public' properties are left
def GetPublicProps(props):
props = props.copy()
# delete private (underscore) props
for prop in list(props.keys()):
if str(prop)[0] == '_':
del props[prop]
# clean props to default values
if props.get('overview', False):
del props['overview']
if not props.get('skip', True):
del props['skip']
if ('boxes' in props) and not(props['boxes']):
del props['boxes']
return props
# Generate a string representation of a property value. Mainly this converts
# classes or instances to the name of the class.
def PropValueRepr(value):
global ScriptTainted
if type(value) == types.FunctionType:
if value.__name__ != "<lambda>":
return value.__name__
if not ScriptTainted:
print >>sys.stderr, "WARNING: The info script contains lambda expressions, which cannot be saved"
print >>sys.stderr, " back. The modifed script will be written into a separate file to"
print >>sys.stderr, " minimize data loss."
ScriptTainted = True
return "here_was_a_lambda_expression_that_could_not_be_saved"
elif type(value) == types.ClassType:
return value.__name__
elif type(value) == types.InstanceType:
return value.__class__.__name__
elif type(value) == types.DictType:
return "{ " + ", ".join([PropValueRepr(k) + ": " + PropValueRepr(value[k]) for k in value]) + " }"
else:
return repr(value)
# generate a nicely formatted string representation of a page's properties
def SinglePagePropRepr(page):
props = GetPublicProps(PageProps[page])
if not props: return None
return "\n%3d: {%s\n }" % (page, \
",".join(["\n " + repr(prop) + ": " + PropValueRepr(props[prop]) for prop in props]))
# generate a nicely formatted string representation of all page properties
def PagePropRepr():
pages = PageProps.keys()
pages.sort()
return "PageProps = {%s\n}" % (",".join(filter(None, map(SinglePagePropRepr, pages))))
# count the characters of a python dictionary source code, correctly handling
# embedded strings and comments, and nested dictionaries
def CountDictChars(s, start=0):
context = None
level = 0
for i in xrange(start, len(s)):
c = s[i]
if context is None:
if c == '{': level += 1
if c == '}': level -= 1
if c == '#': context = '#'
if c == '"': context = '"'
if c == "'": context = "'"
elif context[0] == "\\":
context=context[1]
elif context == '#':
if c in "\r\n": context = None
elif context == '"':
if c == "\\": context = "\\\""
if c == '"': context = None
elif context == "'":
if c == "\\": context = "\\'"
if c == "'": context = None
if level < 0: return i
raise ValueError, "the dictionary never ends"
# modify and save a file's info script
def SaveInfoScript(filename):
# read the old info script
try:
f = file(filename, "r")
script = f.read()
f.close()
except IOError:
script = ""
if not script:
script = "# -*- coding: iso-8859-1 -*-\n"
# replace the PageProps of the old info script with the current ones
try:
m = re.search("^.*(PageProps)\s*=\s*(\{).*$", script,re.MULTILINE)
if m:
script = script[:m.start(1)] + PagePropRepr() + \
script[CountDictChars(script, m.end(2)) + 1 :]
else:
script += "\n" + PagePropRepr() + "\n"
except (AttributeError, ValueError):
pass
if ScriptTainted:
filename += ".modified"
# write the script back
try:
f = file(filename, "w")
f.write(script)
f.close()
except:
print >>sys.stderr, "Oops! Could not write info script!"
##### OPENGL RENDERING #########################################################
# draw OSD overlays
def DrawOverlays(trans_time=0.0):
reltime = Platform.GetTicks() - StartTime
gl.Enable(gl.BLEND)
if (EstimatedDuration or PageProgress or (PageTimeout and AutoAdvanceProgress)) \
and (OverviewMode or GetPageProp(Pcurrent, 'progress', True)):
r, g, b = ProgressBarColorPage
a = ProgressBarAlpha
if PageTimeout and AutoAdvanceProgress:
rel = (reltime - PageEnterTime) / float(PageTimeout)
if TransitionRunning:
a = int(a * (1.0 - TransitionPhase))
elif PageLeaveTime > PageEnterTime:
# we'll be called one frame after the transition finished, but
# before the new page has been fully activated => don't flash
a = 0
elif EstimatedDuration:
rel = (0.001 * reltime) / EstimatedDuration
if rel < 1.0:
r, g, b = ProgressBarColorNormal
elif rel < ProgressBarWarningFactor:
r, g, b = lerpColor(ProgressBarColorNormal, ProgressBarColorWarning,
(rel - 1.0) / (ProgressBarWarningFactor - 1.0))
elif rel < ProgressBarCriticalFactor:
r, g, b = lerpColor(ProgressBarColorWarning, ProgressBarColorCritical,
(rel - ProgressBarWarningFactor) / (ProgressBarCriticalFactor - ProgressBarWarningFactor))
else:
r, g, b = ProgressBarColorCritical
else: # must be PageProgress
rel = (Pcurrent + trans_time * (Pnext - Pcurrent)) / PageCount
if HalfScreen:
zero = 0.5
rel = 0.5 + 0.5 * rel
else:
zero = 0.0
ProgressBarShader.get_instance().draw(
zero, 1.0 - ProgressBarSizeFactor,
rel, 1.0,
color0=(r, g, b, 0.0),
color1=(r, g, b, a)
)
if OSDFont:
OSDFont.BeginDraw()
if WantStatus:
DrawOSDEx(OSDStatusPos, CurrentOSDStatus)
if TimeDisplay:
if ShowClock:
DrawOSDEx(OSDTimePos, ClockTime(MinutesOnly))
else:
t = reltime / 1000
DrawOSDEx(OSDTimePos, FormatTime(t, MinutesOnly))
if CurrentOSDComment and (OverviewMode or not(TransitionRunning)):
DrawOSD(ScreenWidth/2, \
ScreenHeight - 3*OSDMargin - FontSize, \
CurrentOSDComment, Center, Up)
OSDFont.EndDraw()
if CursorImage and CursorVisible:
x, y = Platform.GetMousePos()
x -= CursorHotspot[0]
y -= CursorHotspot[1]
X0 = x * PixelX
Y0 = y * PixelY
X1 = X0 + CursorSX
Y1 = Y0 + CursorSY
TexturedRectShader.get_instance().draw(
X0, Y0, X1, Y1,
s1=CursorTX, t1=CursorTY,
tex=CursorTexture
)
gl.Disable(gl.BLEND)
# draw the complete image of the current page
def DrawCurrentPage(dark=1.0, do_flip=True):
global ScreenTransform
if VideoPlaying: return
boxes = GetPageProp(Pcurrent, 'boxes')
gl.Clear(gl.COLOR_BUFFER_BIT)
# pre-transform for zoom
if ZoomArea != 1.0:
ScreenTransform = (
-2.0 * ZoomX0 / ZoomArea - 1.0,
+2.0 * ZoomY0 / ZoomArea + 1.0,
+2.0 / ZoomArea,
-2.0 / ZoomArea
)
# background layer -- the page's image, darkened if it has boxes
is_dark = (boxes or Tracing) and (dark > 0.001)
if not is_dark:
# standard mode
TexturedRectShader.get_instance().draw(
0.0, 0.0, 1.0, 1.0,
s1=TexMaxS, t1=TexMaxT,
tex=Tcurrent
)
elif UseBlurShader:
# blurred background (using shader)
blur_scale = BoxFadeBlur * ZoomArea * dark
BlurShader.get_instance().draw(
PixelX * blur_scale,
PixelY * blur_scale,
1.0 - BoxFadeDarkness * dark,
tex=Tcurrent
)
gl.Enable(gl.BLEND)
# note: BLEND stays enabled during the rest of this function;
# it will be disabled at the end of DrawOverlays()
else:
# blurred background (using oldschool multi-pass blend fallback)
intensity = 1.0 - BoxFadeDarkness * dark
for dx, dy, alpha in (
(0.0, 0.0, 1.0),
(-ZoomArea, 0.0, dark / 2),
(+ZoomArea, 0.0, dark / 3),
(0.0, -ZoomArea, dark / 4),
(0.0, +ZoomArea, dark / 5),
):
TexturedRectShader.get_instance().draw(
0.0, 0.0, 1.0, 1.0,
TexMaxS * PixelX * dx,
TexMaxT * PixelY * dy,
TexMaxS * (PixelX * dx + 1.0),
TexMaxT * (PixelY * dy + 1.0),
tex=Tcurrent,
color=(intensity, intensity, intensity, alpha)
)
gl.Enable(gl.BLEND)
if boxes and is_dark:
TexturedMeshShader.get_instance().setup(
0.0, 0.0, 1.0, 1.0,
s1=TexMaxS, t1=TexMaxT
# tex is already set
)
for X0, Y0, X1, Y1 in boxes:
vertices = (c_float * 27)(
X0, Y0, 1.0, # note: this produces two degenerate triangles
X0, Y0, 1.0,
X0 - EdgeX, Y0 - EdgeY, 0.0,
X1, Y0, 1.0,
X1 + EdgeX, Y0 - EdgeY, 0.0,
X1, Y1, 1.0,
X1 + EdgeX, Y1 + EdgeY, 0.0,
X0, Y1, 1.0,
X0 - EdgeX, Y1 + EdgeY, 0.0,
)
gl.BindBuffer(gl.ARRAY_BUFFER, 0)
gl.VertexAttribPointer(0, 3, gl.FLOAT, False, 0, vertices)
BoxIndexBuffer.draw()
if Tracing and is_dark:
x, y = MouseToScreen(Platform.GetMousePos())
TexturedMeshShader.get_instance().setup(
x, y, x + 1.0, y + 1.0,
x * TexMaxS, y * TexMaxT,
(x + 1.0) * TexMaxS, (y + 1.0) * TexMaxT
# tex is already set
)
gl.BindBuffer(gl.ARRAY_BUFFER, SpotVertices)
gl.VertexAttribPointer(0, 3, gl.FLOAT, False, 0, 0)
SpotIndices.draw()
if Marking:
x0 = min(MarkUL[0], MarkLR[0])
y0 = min(MarkUL[1], MarkLR[1])
x1 = max(MarkUL[0], MarkLR[0])
y1 = max(MarkUL[1], MarkLR[1])
# red frame (misusing the progress bar shader as a single-color shader)
color = (MarkColor[0], MarkColor[1], MarkColor[2], 1.0)
ProgressBarShader.get_instance().draw(
x0 - PixelX * ZoomArea, y0 - PixelY * ZoomArea,
x1 + PixelX * ZoomArea, y1 + PixelY * ZoomArea,
color0=color, color1=color
)
# semi-transparent inner area
gl.Enable(gl.BLEND)
TexturedRectShader.get_instance().draw(
x0, y0, x1, y1,
x0 * TexMaxS, y0 * TexMaxT,
x1 * TexMaxS, y1 * TexMaxT,
tex=Tcurrent, color=(1.0, 1.0, 1.0, 1.0 - MarkColor[3])
)
# unapply the zoom transform
ScreenTransform = DefaultScreenTransform
# Done.
DrawOverlays()
if do_flip:
Platform.SwapBuffers()
# draw a black screen with the Impressive logo at the center
def DrawLogo():
gl.Clear(gl.COLOR_BUFFER_BIT)
if not ShowLogo:
return
if HalfScreen:
x0 = 0.25
else:
x0 = 0.5
TexturedRectShader.get_instance().draw(
x0 - 128.0 / ScreenWidth, 0.5 - 32.0 / ScreenHeight,
x0 + 128.0 / ScreenWidth, 0.5 + 32.0 / ScreenHeight,
tex=LogoTexture
)
if OSDFont:
gl.Enable(gl.BLEND)
OSDFont.Draw((int(ScreenWidth * x0), ScreenHeight / 2 + 48), \
__version__.split()[0], align=Center, alpha=0.25, beveled=False)
gl.Disable(gl.BLEND)
# draw the prerender progress bar
def DrawProgress(position):
x0 = 0.1
x2 = 1.0 - x0
x1 = position * x2 + (1.0 - position) * x0
y1 = 0.9
y0 = y1 - 16.0 / ScreenHeight
if HalfScreen:
x0 *= 0.5
x1 *= 0.5
x2 *= 0.5
ProgressBarShader.get_instance().draw(
x0, y0, x2, y1,
color0=(0.25, 0.25, 0.25, 1.0),
color1=(0.50, 0.50, 0.50, 1.0)
)
ProgressBarShader.get_instance().draw(
x0, y0, x1, y1,
color0=(0.25, 0.50, 1.00, 1.0),
color1=(0.03, 0.12, 0.50, 1.0)
)
# fade mode
def DrawFadeMode(intensity, alpha):
if VideoPlaying: return
DrawCurrentPage(do_flip=False)
gl.Enable(gl.BLEND)
color = (intensity, intensity, intensity, alpha)
ProgressBarShader.get_instance().draw(
0.0, 0.0, 1.0, 1.0,
color0=color, color1=color
)
gl.Disable(gl.BLEND)
Platform.SwapBuffers()
def EnterFadeMode(intensity=0.0):
t0 = Platform.GetTicks()
while True:
if Platform.CheckAnimationCancelEvent(): break
t = (Platform.GetTicks() - t0) * 1.0 / BlankFadeDuration
if t >= 1.0: break
DrawFadeMode(intensity, t)
DrawFadeMode(intensity, 1.0)
def LeaveFadeMode(intensity=0.0):
t0 = Platform.GetTicks()
while True:
if Platform.CheckAnimationCancelEvent(): break
t = (Platform.GetTicks() - t0) * 1.0 / BlankFadeDuration
if t >= 1.0: break
DrawFadeMode(intensity, 1.0 - t)
DrawCurrentPage()
def FadeMode(intensity):
EnterFadeMode(intensity)
def fade_action_handler(action):
if action == "$quit":
PageLeft()
Quit()
elif action == "$expose":
DrawFadeMode(intensity, 1.0)
elif action == "*quit":
Platform.PostQuitEvent()
else:
return False
return True
while True:
ev = Platform.GetEvent()
if ev and not(ProcessEvent(ev, fade_action_handler)) and ev.startswith('*'):
break
LeaveFadeMode(intensity)
# gamma control
def SetGamma(new_gamma=None, new_black=None, force=False):
global Gamma, BlackLevel
if new_gamma is None: new_gamma = Gamma
if new_gamma < 0.1: new_gamma = 0.1
if new_gamma > 10.0: new_gamma = 10.0
if new_black is None: new_black = BlackLevel
if new_black < 0: new_black = 0
if new_black > 254: new_black = 254
if not(force) and (abs(Gamma - new_gamma) < 0.01) and (new_black == BlackLevel):
return
Gamma = new_gamma
BlackLevel = new_black
return Platform.SetGammaRamp(new_gamma, new_black)
# cursor image
def PrepareCustomCursor(cimg):
global CursorTexture, CursorHotspot, CursorSX, CursorSY, CursorTX, CursorTY
if not cimg:
CursorHotspot = (1,0)
cimg = Image.open(cStringIO.StringIO(DEFAULT_CURSOR.decode('base64')))
w, h = cimg.size
tw, th = map(npot, cimg.size)
if (tw > 256) or (th > 256):
print >>sys.stderr, "Custom cursor is ridiculously large, reverting to normal one."
return False
img = Image.new('RGBA', (tw, th))
img.paste(cimg, (0, 0))
CursorTexture = gl.make_texture(gl.TEXTURE_2D, gl.CLAMP_TO_EDGE, gl.NEAREST)
gl.load_texture(gl.TEXTURE_2D, img)
CursorSX = w * PixelX
CursorSY = h * PixelY
CursorTX = w / float(tw)
CursorTY = h / float(th)
return True
##### CONTROL AND NAVIGATION ###################################################
# update the applications' title bar
def UpdateCaption(page=0, force=False):
global CurrentCaption, CurrentOSDCaption, CurrentOSDPage, CurrentOSDStatus
global CurrentOSDComment
if (page == CurrentCaption) and not(force):
return
CurrentCaption = page
caption = __title__
if DocumentTitle:
caption += " - " + DocumentTitle
if page < 1:
CurrentOSDCaption = ""
CurrentOSDPage = ""
CurrentOSDStatus = ""
CurrentOSDComment = ""
Platform.SetWindowTitle(caption)
return
CurrentOSDPage = "%d/%d" % (page, PageCount)
caption = "%s (%s)" % (caption, CurrentOSDPage)
title = GetPageProp(page, 'title') or GetPageProp(page, '_title')
if title:
caption += ": %s" % title
CurrentOSDCaption = title
else:
CurrentOSDCaption = ""
status = []
if GetPageProp(page, 'skip', False):
status.append("skipped: yes")
if not GetPageProp(page, ('overview', '_overview'), True):
status.append("on overview page: no")
CurrentOSDStatus = ", ".join(status)
CurrentOSDComment = GetPageProp(page, 'comment')
Platform.SetWindowTitle(caption)
# get next/previous page
def GetNextPage(page, direction):
try_page = page
while True:
try_page += direction
if try_page == page:
return 0 # tried all pages, but none found
if Wrap:
if try_page < 1: try_page = PageCount
if try_page > PageCount: try_page = 1
else:
if try_page < 1 or try_page > PageCount:
return 0 # start or end of presentation
if not GetPageProp(try_page, 'skip', False):
return try_page
# pre-load the following page into Pnext/Tnext
def PreloadNextPage(page):
global Pnext, Tnext
if (page < 1) or (page > PageCount):
Pnext = 0
return 0
if page == Pnext:
return 1
RenderPage(page, Tnext)
Pnext = page
return 1
# perform box fading; the fade animation time is mapped through func()
def BoxFade(func):
t0 = Platform.GetTicks()
while BoxFadeDuration > 0:
if Platform.CheckAnimationCancelEvent(): break
t = (Platform.GetTicks() - t0) * 1.0 / BoxFadeDuration
if t >= 1.0: break
DrawCurrentPage(func(t))
DrawCurrentPage(func(1.0))
return 0
# reset the timer
def ResetTimer():
global StartTime, PageEnterTime
if TimeTracking and not(FirstPage):
print "--- timer was reset here ---"
StartTime = Platform.GetTicks()
PageEnterTime = 0
# start video playback
def PlayVideo(video):
global MPlayerProcess, VideoPlaying
if not video: return
StopMPlayer()
opts = ["-quiet", "-slave", \
"-monitorpixelaspect", "1:1", \
"-autosync", "100"] + \
MPlayerPlatformOptions
if Fullscreen:
opts += ["-fs"]
else:
try:
opts += ["-wid", str(Platform.GetWindowID())]
except KeyError:
print >>sys.stderr, "Sorry, but Impressive only supports video on your operating system if fullscreen"
print >>sys.stderr, "mode is used."
VideoPlaying = False
MPlayerProcess = None
return
if not isinstance(video, list):
video = [video]
try:
MPlayerProcess = subprocess.Popen([MPlayerPath] + opts + video, stdin=subprocess.PIPE)
if MPlayerColorKey:
gl.Clear(gl.COLOR_BUFFER_BIT)
Platform.SwapBuffers()
VideoPlaying = True
except OSError:
MPlayerProcess = None
# called each time a page is entered, AFTER the transition, BEFORE entering box-fade mode
def PreparePage():
global SpotRadius, SpotRadiusBase
global BoxFadeDarkness, BoxFadeDarknessBase
override = GetPageProp(Pcurrent, 'radius')
if override:
SpotRadius = override
SpotRadiusBase = override
GenerateSpotMesh()
override = GetPageProp(Pcurrent, 'darkness')
if override is not None:
BoxFadeDarkness = override * 0.01
BoxFadeDarknessBase = override * 0.01
# called each time a page is entered, AFTER the transition, AFTER entering box-fade mode
def PageEntered(update_time=True):
global PageEnterTime, PageTimeout, MPlayerProcess, IsZoomed, WantStatus
if update_time:
PageEnterTime = Platform.GetTicks() - StartTime
IsZoomed = False # no, we don't have a pre-zoomed image right now
WantStatus = False # don't show status unless it's changed interactively
PageTimeout = AutoAdvance
shown = GetPageProp(Pcurrent, '_shown', 0)
try:
os.chdir(os.path.dirname(GetPageProp(Pcurrent, '_file')))
except OSError:
pass
if not(shown) or Wrap:
PageTimeout = GetPageProp(Pcurrent, 'timeout', PageTimeout)
if not(shown) or GetPageProp(Pcurrent, 'always', False):
video = GetPageProp(Pcurrent, 'video')
sound = GetPageProp(Pcurrent, 'sound')
PlayVideo(video)
if sound and not(video):
StopMPlayer()
try:
MPlayerProcess = subprocess.Popen( \
[MPlayerPath, "-quiet", "-really-quiet", "-novideo", sound], \
stdin=subprocess.PIPE)
except OSError:
MPlayerProcess = None
SafeCall(GetPageProp(Pcurrent, 'OnEnterOnce'))
SafeCall(GetPageProp(Pcurrent, 'OnEnter'))
if PageTimeout:
Platform.ScheduleEvent("$page-timeout", PageTimeout)
SetPageProp(Pcurrent, '_shown', shown + 1)
# called each time a page is left
def PageLeft(overview=False):
global FirstPage, LastPage, WantStatus, PageLeaveTime
PageLeaveTime = Platform.GetTicks() - StartTime
WantStatus = False
if not overview:
if GetTristatePageProp(Pcurrent, 'reset'):
ResetTimer()
FirstPage = False
LastPage = Pcurrent
if GetPageProp(Pcurrent, '_shown', 0) == 1:
SafeCall(GetPageProp(Pcurrent, 'OnLeaveOnce'))
SafeCall(GetPageProp(Pcurrent, 'OnLeave'))
if TimeTracking:
t1 = Platform.GetTicks() - StartTime
dt = (t1 - PageEnterTime + 500) / 1000
if overview:
p = "over"
else:
p = "%4d" % Pcurrent
print "%s%9s%9s%9s" % (p, FormatTime(dt), \
FormatTime(PageEnterTime / 1000), \
FormatTime(t1 / 1000))
# create an instance of a transition class
def InstantiateTransition(trans_class):
try:
return trans_class()
except GLInvalidShaderError:
return None
except GLShaderCompileError:
print >>sys.stderr, "Note: all %s transitions will be disabled" % trans_class.__name__
return None
# perform a transition to a specified page
def TransitionTo(page, allow_transition=True):
global Pcurrent, Pnext, Tcurrent, Tnext
global PageCount, Marking, Tracing, Panning
global TransitionRunning, TransitionPhase
# first, stop video and kill the auto-timer
if VideoPlaying:
StopMPlayer()
Platform.ScheduleEvent("$page-timeout", 0)
# invalid page? go away
if not PreloadNextPage(page):
if QuitAtEnd:
LeaveZoomMode(allow_transition)
if FadeInOut:
EnterFadeMode()
PageLeft()
Quit()
return 0
# leave zoom mode now, if enabled
LeaveZoomMode(allow_transition)
# notify that the page has been left
PageLeft()
# box fade-out
if GetPageProp(Pcurrent, 'boxes') or Tracing:
skip = BoxFade(lambda t: 1.0 - t)
else:
skip = 0
# some housekeeping
Marking = False
Tracing = False
UpdateCaption(page)
# check if the transition is valid
tpage = max(Pcurrent, Pnext)
trans = None
if allow_transition:
trans = GetPageProp(tpage, 'transition', GetPageProp(tpage, '_transition'))
else:
trans = None
if trans is not None:
transtime = GetPageProp(tpage, 'transtime', TransitionDuration)
try:
dummy = trans.__class__
except AttributeError:
# ah, gotcha! the transition is not yet instantiated!
trans = InstantiateTransition(trans)
PageProps[tpage][tkey] = trans
if trans is None:
transtime = 0
# backward motion? then swap page buffers now
backward = (Pnext < Pcurrent)
if Wrap and (min(Pcurrent, Pnext) == 1) and (max(Pcurrent, Pnext) == PageCount):
backward = not(backward) # special case: last<->first in wrap mode
if backward:
Pcurrent, Pnext = (Pnext, Pcurrent)
Tcurrent, Tnext = (Tnext, Tcurrent)
# transition animation
if not(skip) and transtime:
transtime = 1.0 / transtime
TransitionRunning = True
trans.start()
t0 = Platform.GetTicks()
while not(VideoPlaying):
if Platform.CheckAnimationCancelEvent():
skip = 1
break
t = (Platform.GetTicks() - t0) * transtime
if t >= 1.0: break
TransitionPhase = t
if backward: t = 1.0 - t
gl.Clear(gl.COLOR_BUFFER_BIT)
trans.render(t)
DrawOverlays(t)
Platform.SwapBuffers()
TransitionRunning = False
# forward motion => swap page buffers now
if not backward:
Pcurrent, Pnext = (Pnext, Pcurrent)
Tcurrent, Tnext = (Tnext, Tcurrent)
# prepare the page's changeable metadata
PreparePage()
# box fade-in
if not(skip) and GetPageProp(Pcurrent, 'boxes'): BoxFade(lambda t: t)
# finally update the screen and preload the next page
DrawCurrentPage()
PageEntered()
if not PreloadNextPage(GetNextPage(Pcurrent, 1)):
PreloadNextPage(GetNextPage(Pcurrent, -1))
return 1
# zoom mode animation
def ZoomAnimation(targetx, targety, func, duration_override=None):
global ZoomX0, ZoomY0, ZoomArea
t0 = Platform.GetTicks()
if duration_override is None:
duration = ZoomDuration
else:
duration = duration_override
while duration > 0:
if Platform.CheckAnimationCancelEvent(): break
t = (Platform.GetTicks() - t0) * 1.0 / duration
if t >= 1.0: break
t = func(t)
t = (2.0 - t) * t
ZoomX0 = targetx * t
ZoomY0 = targety * t
ZoomArea = 1.0 - (1.0 - 1.0 / ZoomFactor) * t
DrawCurrentPage()
t = func(1.0)
ZoomX0 = targetx * t
ZoomY0 = targety * t
ZoomArea = 1.0 - (1.0 - 1.0 / ZoomFactor) * t
GenerateSpotMesh()
DrawCurrentPage()
# enter zoom mode
def EnterZoomMode(targetx, targety):
global ZoomMode, IsZoomed, HighResZoomFailed
ZoomAnimation(targetx, targety, lambda t: t)
ZoomMode = True
if IsZoomed or HighResZoomFailed:
return
gl.BindTexture(gl.TEXTURE_2D, Tcurrent)
while gl.GetError():
pass # clear all OpenGL errors
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGB, ZoomFactor * TexWidth, ZoomFactor * TexHeight, 0, gl.RGB, gl.UNSIGNED_BYTE, PageImage(Pcurrent, True))
if gl.GetError():
print >>sys.stderr, "I'm sorry, but your graphics card is not capable of rendering presentations"
print >>sys.stderr, "in this resolution. Either the texture memory is exhausted, or there is no"
print >>sys.stderr, "support for large textures (%dx%d). Please try to run Impressive in a" % (TexWidth, TexHeight)
print >>sys.stderr, "smaller resolution using the -g command-line option."
HighResZoomFailed = True
return
DrawCurrentPage()
IsZoomed = True
# leave zoom mode (if enabled)
def LeaveZoomMode(allow_transition=True):
global ZoomMode
if not ZoomMode: return
ZoomAnimation(ZoomX0, ZoomY0, lambda t: 1.0 - t, (None if allow_transition else 0))
ZoomMode = False
Panning = False
# increment/decrement spot radius
def IncrementSpotSize(delta):
global SpotRadius
if not Tracing:
return
SpotRadius = max(SpotRadius + delta, 8)
GenerateSpotMesh()
DrawCurrentPage()
# post-initialize the page transitions
def PrepareTransitions():
Unspecified = 0xAFFED00F
# STEP 1: randomly assign transitions where the user didn't specify them
cnt = sum([1 for page in xrange(1, PageCount + 1) \
if GetPageProp(page, 'transition', Unspecified) == Unspecified])
newtrans = ((cnt / len(AvailableTransitions) + 1) * AvailableTransitions)[:cnt]
random.shuffle(newtrans)
for page in xrange(1, PageCount + 1):
if GetPageProp(page, 'transition', Unspecified) == Unspecified:
SetPageProp(page, '_transition', newtrans.pop())
# STEP 2: instantiate transitions
for page in PageProps:
for key in ('transition', '_transition'):
if not key in PageProps[page]:
continue
trans = PageProps[page][key]
if trans is not None:
PageProps[page][key] = InstantiateTransition(trans)
# update timer values and screen timer
def TimerTick():
global CurrentTime, ProgressBarPos
redraw = False
newtime = (Platform.GetTicks() - StartTime) * 0.001
if EstimatedDuration:
newpos = int(ScreenWidth * newtime / EstimatedDuration)
if newpos != ProgressBarPos:
redraw = True
ProgressBarPos = newpos
newtime = int(newtime)
if TimeDisplay and (CurrentTime != newtime):
redraw = True
if PageTimeout and AutoAdvanceProgress:
redraw = True
CurrentTime = newtime
return redraw
# enables time tracking mode (if not already done so)
def EnableTimeTracking(force=False):
global TimeTracking
if force or (TimeDisplay and not(TimeTracking) and not(ShowClock) and FirstPage):
print >>sys.stderr, "Time tracking mode enabled."
TimeTracking = True
print "page duration enter leave"
print "---- -------- -------- --------"
# set cursor visibility
def SetCursor(visible):
global CursorVisible
CursorVisible = visible
if not(CursorImage) and (MouseHideDelay != 1):
Platform.SetMouseVisible(visible)
# handle a shortcut key event: store it (if shifted) or return the
# page number to navigate to (if not)
def HandleShortcutKey(key, current=0):
if not(key) or (key[0] != '*'):
return None
shift = key.startswith('*shift+')
if shift:
key = key[7:]
else:
key = key[1:]
if (len(key) == 1) or ((key >= "f1") and (key <= "f9")):
# Note: F10..F12 are implicitly included due to lexicographic sorting
page = None
for check_page, props in PageProps.iteritems():
if props.get('shortcut') == key:
page = check_page
break
if shift:
if page:
DelPageProp(page, 'shortcut')
SetPageProp(current, 'shortcut', key)
elif page and (page != current):
return page
return None
##### EVENT-TO-ACTION BINDING CODE #############################################
SpecialKeyNames = set(filter(None, """
ampersand asterisk at backquote backslash backspace break capslock caret clear
comma down escape euro end exclaim greater hash help home insert kp_divide
kp_enter kp_equals kp_minus kp_multiply kp_plus lalt last lctrl left leftbracket
leftparen less lmeta lshift lsuper menu minus mode numlock pagedown pageup pause
period plus power print question quote quotedbl ralt rctrl return right
rightbracket rightparen rmeta rshift rsuper scrollock semicolon slash space
sysreq tab underscore up
""".split()))
KnownEvents = set(list(SpecialKeyNames) + filter(None, """
a b c d e f g h i j k l m n o p q r s t u v w x y z 0 1 2 3 4 5 6 7 8 9
kp0 kp1 kp2 kp3 kp4 kp5 kp6 kp7 kp8 kp9 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12
lmb mmb rmb wheeldown wheelup
""".split()))
# event handling model:
# - Platform.GetEvent() generates platform-neutral event (= string) that
# identifies a key or mouse button, with prefix:
# - '+' = key pressed, '-' = key released, '*' = main event ('*' is generated
# directly before '-' for keys and directly after '+' for mouse buttons)
# - "ctrl+", "alt+", "shift+" modifiers, in that order
# - event gets translated into a list of actions via the EventMap dictionary
# - actions are processed in order of that list, like priorities:
# - list processing terminates at the first action that is successfully handled
# - exception: "forced actions" will always be executed, even if a higher-prio
# action of that list has already been executed; also, they will not stop
# action list execution, even if they have been handled
KnownActions = {}
EventMap = {}
ForcedActions = set()
ActivateReleaseActions = set()
class ActionNotHandled(Exception):
pass
def ActionValidIf(cond):
if not cond:
raise ActionNotHandled()
class ActionRelayBase(object):
def __init__(self):
global KnownActions, ActivateReleaseActions
for item in dir(self):
if (item[0] == '_') and (item[1] != '_') and (item[1] != 'X') and (item[-1] != '_'):
doc = getattr(self, item).__doc__
if item.endswith("_ACTIVATE"):
item = item[:-9]
ActivateReleaseActions.add(item)
elif item.endswith("_RELEASE"):
item = item[:-8]
ActivateReleaseActions.add(item)
item = item[1:].replace('_', '-')
olddoc = KnownActions.get(item)
if not olddoc:
KnownActions[item] = doc
def __call__(self, ev):
evname = ev[1:].replace('-', '_')
if ev[0] == '$':
meth = getattr(self, '_X_' + evname, None)
elif ev[0] == '*':
meth = getattr(self, '_' + evname, None)
elif ev[0] == '+':
meth = getattr(self, '_' + evname + '_ACTIVATE', None)
elif ev[0] == '-':
meth = getattr(self, '_' + evname + '_RELEASE', None)
if not meth:
return False
try:
meth()
return True
except ActionNotHandled:
return False
def ProcessEvent(ev, handler_func):
"""
calls the appropriate action handlers for an event
as returned by Platform.GetEvent()
"""
if not ev:
return False
if ev[0] == '$':
handler_func(ev)
try:
events = EventMap[ev[1:]]
except KeyError:
return False
prefix = ev[0]
handled = False
no_forced = not(any(((prefix + ev) in ForcedActions) for ev in events))
if no_forced and (prefix in "+-"):
if not(any((ev in ActivateReleaseActions) for ev in events)):
return False
for ev in events:
ev = prefix + ev
if ev in ForcedActions:
handler_func(ev)
elif not handled:
handled = handler_func(ev)
if handled and no_forced:
break
return handled
def ValidateEvent(ev, error_prefix=None):
for prefix in ("ctrl+", "alt+", "shift+"):
if ev.startswith(prefix):
ev = ev[len(prefix):]
if (ev in KnownEvents) or ev.startswith('unknown-'):
return True
if error_prefix:
error_prefix += ": "
else:
error_prefix = ""
print >>sys.stderr, "ERROR: %signoring unknown event '%s'" % (error_prefix, ev)
return False
def ValidateAction(ev, error_prefix=None):
if not(KnownActions) or (ev in KnownActions):
return True
if error_prefix:
error_prefix += ": "
else:
error_prefix = ""
print >>sys.stderr, "ERROR: %signoring unknown action '%s'" % (error_prefix, ev)
return False
def BindEvent(events, actions=None, clear=False, remove=False, error_prefix=None):
"""
bind one or more events to one or more actions
- events and actions can be lists or single comma-separated strings
- if clear is False, actions will be *added* to the raw events,
if clear is True, the specified actions will *replace* the current set,
if remove is True, the specified actions will be *removed* from the set
- actions can be omitted; instead, events can be a string consisting
of raw event and internal event names, separated by one of:
'=' -> add or replace, based on the clear flag
'+=' -> always add
':=' -> always clear
'-=' -> always remove
- some special events are recognized:
'clearall' clears *all* actions of *all* raw events;
'defaults' loads all defaults
'include', followed by whitespace and a filename, will include a file
(that's what the basedirs option is for)
"""
global EventMap
if isinstance(events, basestring):
if not actions:
if (';' in events) or ('\n' in events):
for cmd in events.replace('\n', ';').split(';'):
BindEvent(cmd, clear=clear, remove=remove, error_prefix=error_prefix)
return
if '=' in events:
events, actions = events.split('=', 1)
events = events.rstrip()
if events.endswith('+'):
clear = False
events = events[:-1]
elif events.endswith(':'):
clear = True
events = events[:-1]
elif events.endswith('-'):
remove = True
events = events[:-1]
events = events.split(',')
if actions is None:
actions = []
elif isinstance(actions, basestring):
actions = actions.split(',')
actions = [b.replace('_', '-').strip(' \t$+-').lower() for b in actions]
actions = [a for a in actions if ValidateAction(a, error_prefix)]
for event in events:
event_orig = event.replace('\t', ' ').strip(' \r\n+-$')
if not event_orig:
continue
event = event_orig.replace('-', '_').lower()
if event.startswith('include '):
filename = event_orig[8:].strip()
if (filename.startswith('"') and filename.endswith('"')) \
or (filename.startswith("'") and filename.endswith("'")):
filename = filename[1:-1]
ParseInputBindingFile(filename)
continue
elif event == 'clearall':
EventMap = {}
continue
elif event == 'defaults':
LoadDefaultBindings()
continue
event = event.replace(' ', '')
if not ValidateEvent(event, error_prefix):
continue
if remove:
if event in EventMap:
for a in actions:
try:
EventMap[event].remove(a)
except ValueError:
pass
elif clear or not(event in EventMap):
EventMap[event] = actions[:]
else:
EventMap[event].extend(actions)
def ParseInputBindingFile(filename):
"""
parse an input configuration file;
basically calls BindEvent() for each line;
'#' is the comment character
"""
try:
f = open(filename, "r")
n = 0
for line in f:
n += 1
line = line.split('#', 1)[0].strip()
if line:
BindEvent(line, error_prefix="%s:%d" % (filename, n))
f.close()
except IOError, e:
print >>sys.stderr, "ERROR: failed to read the input configuration file '%s' -" % filename, e
def EventHelp():
evlist = ["a-z", "0-9", "kp0-kp9", "f1-f12"] + sorted(list(SpecialKeyNames))
print "Event-to-action binding syntax:"
print " <event> [,<event2...>] = <action> [,<action2...>]"
print " By default, this will *add* actions to an event."
print " To *overwrite* the current binding for an event, use ':=' instead of '='."
print " To remove actions from an event, use '-=' instead of '='."
print " Join multiple bindings with a semi-colon (';')."
print "Special commands:"
print " clearall = clear all bindings"
print " defaults = load default bindings"
print " include <file> = load bindings from a file"
print "Binding files use the same syntax with one binding per line;"
print "comments start with a '#' symbol."
print
print "Recognized keyboard event names:"
while evlist:
line = " "
while evlist and ((len(line) + len(evlist[0])) < 78):
line += evlist.pop(0) + ", "
line = line.rstrip()
if not evlist:
line = line.rstrip(',')
print line
print "Recognized mouse event names:"
print " lmb, mmb, rmb (= left, middle and right mouse buttons),"
print " wheelup, wheeldown"
print
print "Recognized actions:"
maxalen = max(map(len, KnownActions))
for action in sorted(KnownActions):
doc = KnownActions[action]
if doc:
print " %s - %s" % (action.ljust(maxalen), doc)
else:
print " %s" % action
print
if not EventMap: return
print "Current bindings:"
maxelen = max(map(len, EventMap))
for event in sorted(EventMap):
if EventMap[event]:
print " %s = %s" % (event.ljust(maxelen), ", ".join(EventMap[event]))
def LoadDefaultBindings():
BindEvent("""clearall
escape, return, kp_enter, lmb, rmb = video-stop
space = video-pause
period = video-step
down = video-seek-backward-10
left = video-seek-backward-1
right = video-seek-forward-1
up = video-seek-forward-10
escape = overview-exit, zoom-exit, spotlight-exit, box-clear, quit
q = quit
f = fullscreen
tab = overview-enter, overview-exit
s = save
t = time-toggle
r = time-reset
c = box-clear
y, z = zoom-enter, zoom-exit
o = toggle-overview
i = toggle-skip
b, period = fade-to-black
w, comma = fade-to-white
return, kp_enter = overview-confirm, spotlight-enter, spotlight-exit
plus, kp_plus, 0, wheelup = spotlight-grow
minus, kp_minus, 9, wheeldown = spotlight-shrink
ctrl+9, ctrl+0 = spotlight-reset
7 = fade-less
8 = fade-more
ctrl+7, ctrl+8 = fade-reset
leftbracket = gamma-decrease
rightbracket = gamma-increase
shift+leftbracket = gamma-bl-decrease
shift+rightbracket = gamma-bl-increase
backslash = gamma-reset
lmb = box-add, hyperlink, overview-confirm
ctrl+lmb = hyperlink-notrans
rmb = zoom-pan, box-remove, overview-exit
mmb = zoom-exit, overview-enter, overview-exit
left, wheelup = overview-prev
right, wheeldown = overview-next
up = overview-up
down = overview-down
lmb, wheeldown, pagedown, down, right, space = goto-next
ctrl+lmb, ctrl+wheeldown, ctrl+pagedown, ctrl+down, ctrl+right, ctrl+space = goto-next-notrans
rmb, wheelup, pageup, up, left, backspace = goto-prev
ctrl+rmb, ctrl+wheelup, ctrl+pageup, ctrl+up, ctrl+left, ctrl+backspace = goto-prev-notrans
home = goto-start
ctrl+home = goto-start-notrans
end = goto-end
ctrl+end = goto-end-notrans
l = goto-last
ctrl+l = goto-last-notrans
""", error_prefix="LoadDefaultBindings")
# basic action implementations (i.e. stuff that is required to work in all modes)
class BaseActions(ActionRelayBase):
def _X_quit(self):
Quit()
def _X_alt_tab(self):
ActionValidIf(Fullscreen)
SetFullscreen(False)
Platform.Minimize()
def _quit(self):
"quit Impressive immediately"
Platform.PostQuitEvent()
def _X_move(self):
# mouse move in fullscreen mode -> show mouse cursor and reset mouse timer
if Fullscreen:
Platform.ScheduleEvent("$hide-mouse", MouseHideDelay)
SetCursor(True)
##### OVERVIEW MODE ############################################################
def UpdateOverviewTexture():
global OverviewNeedUpdate
Loverview.acquire()
try:
gl.load_texture(gl.TEXTURE_2D, Tnext, OverviewImage)
finally:
Loverview.release()
OverviewNeedUpdate = False
# draw the overview page
def DrawOverview():
if VideoPlaying: return
gl.Clear(gl.COLOR_BUFFER_BIT)
TexturedRectShader.get_instance().draw(
0.0, 0.0, 1.0, 1.0,
s1=TexMaxS, t1=TexMaxT,
tex=Tnext, color=0.75
)
pos = OverviewPos(OverviewSelection)
X0 = PixelX * pos[0]
Y0 = PixelY * pos[1]
X1 = PixelX * (pos[0] + OverviewCellX)
Y1 = PixelY * (pos[1] + OverviewCellY)
TexturedRectShader.get_instance().draw(
X0, Y0, X1, Y1,
X0 * TexMaxS, Y0 * TexMaxT,
X1 * TexMaxS, Y1 * TexMaxT,
color=1.0
)
gl.Enable(gl.BLEND)
if OSDFont:
OSDFont.BeginDraw()
DrawOSDEx(OSDTitlePos, CurrentOSDCaption)
DrawOSDEx(OSDPagePos, CurrentOSDPage)
DrawOSDEx(OSDStatusPos, CurrentOSDStatus)
OSDFont.EndDraw()
DrawOverlays()
Platform.SwapBuffers()
# overview zoom effect, time mapped through func
def OverviewZoom(func):
global TransitionRunning
if ZoomDuration <= 0:
return
pos = OverviewPos(OverviewSelection)
X0 = PixelX * (pos[0] + OverviewBorder)
Y0 = PixelY * (pos[1] + OverviewBorder)
X1 = PixelX * (pos[0] - OverviewBorder + OverviewCellX)
Y1 = PixelY * (pos[1] - OverviewBorder + OverviewCellY)
shader = TexturedRectShader.get_instance()
TransitionRunning = True
t0 = Platform.GetTicks()
while not(VideoPlaying):
t = (Platform.GetTicks() - t0) * 1.0 / ZoomDuration
if t >= 1.0: break
t = func(t)
t1 = t*t
t = 1.0 - t1
zoom = (t * (X1 - X0) + t1) / (X1 - X0)
OX = zoom * (t * X0 - X0) - (zoom - 1.0) * t * X0
OY = zoom * (t * Y0 - Y0) - (zoom - 1.0) * t * Y0
OX = t * X0 - zoom * X0
OY = t * Y0 - zoom * Y0
gl.Clear(gl.COLOR_BUFFER_BIT)
shader.draw( # base overview page
OX, OY, OX + zoom, OY + zoom,
s1=TexMaxS, t1=TexMaxT,
tex=Tnext, color=0.75
)
shader.draw( # highlighted part
OX + X0 * zoom, OY + Y0 * zoom,
OX + X1 * zoom, OY + Y1 * zoom,
X0 * TexMaxS, Y0 * TexMaxT,
X1 * TexMaxS, Y1 * TexMaxT,
color=1.0
)
gl.Enable(gl.BLEND)
shader.draw( # overlay of the original high-res page
t * X0, t * Y0,
t * X1 + t1, t * Y1 + t1,
s1=TexMaxS, t1=TexMaxT,
tex=Tcurrent, color=(1.0, 1.0, 1.0, 1.0 - t * t * t)
)
if OSDFont:
OSDFont.BeginDraw()
DrawOSDEx(OSDTitlePos, CurrentOSDCaption, alpha_factor=t)
DrawOSDEx(OSDPagePos, CurrentOSDPage, alpha_factor=t)
DrawOSDEx(OSDStatusPos, CurrentOSDStatus, alpha_factor=t)
OSDFont.EndDraw()
DrawOverlays()
Platform.SwapBuffers()
TransitionRunning = False
# overview keyboard navigation
def OverviewKeyboardNav(delta):
global OverviewSelection
dest = OverviewSelection + delta
if (dest >= OverviewPageCount) or (dest < 0):
return
OverviewSelection = dest
x, y = OverviewPos(OverviewSelection)
Platform.SetMousePos((x + (OverviewCellX / 2), y + (OverviewCellY / 2)))
# overview mode PageProp toggle
def OverviewTogglePageProp(prop, default):
if (OverviewSelection < 0) or (OverviewSelection >= len(OverviewPageMap)):
return
page = OverviewPageMap[OverviewSelection]
SetPageProp(page, prop, not(GetPageProp(page, prop, default)))
UpdateCaption(page, force=True)
DrawOverview()
class ExitOverview(Exception):
pass
# action implementation for overview mode
class OverviewActions(BaseActions):
def _X_move(self):
global OverviewSelection
BaseActions._X_move(self)
# determine highlighted page
x, y = Platform.GetMousePos()
OverviewSelection = \
int((x - OverviewOfsX) / OverviewCellX) + \
int((y - OverviewOfsY) / OverviewCellY) * OverviewGridSize
if (OverviewSelection < 0) or (OverviewSelection >= len(OverviewPageMap)):
UpdateCaption(0)
else:
UpdateCaption(OverviewPageMap[OverviewSelection])
DrawOverview()
def _X_quit(self):
PageLeft(overview=True)
Quit()
def _X_expose(self):
DrawOverview()
def _X_hide_mouse(self):
# mouse timer event -> hide fullscreen cursor
SetCursor(False)
DrawOverview()
def _X_timer_update(self):
force_update = OverviewNeedUpdate
if OverviewNeedUpdate:
UpdateOverviewTexture()
if TimerTick() or force_update:
DrawOverview()
def _overview_exit(self):
"exit overview mode and return to the last page"
global OverviewSelection
OverviewSelection = -1
raise ExitOverview
def _overview_confirm(self):
"exit overview mode and go to the selected page"
raise ExitOverview
def _fullscreen(self):
SetFullscreen(not(Fullscreen))
def _save(self):
SaveInfoScript(InfoScriptPath)
def _fade_to_black(self):
FadeMode(0.0)
def _fade_to_white(self):
FadeMode(1.0)
def _time_toggle(self):
global TimeDisplay
TimeDisplay = not(TimeDisplay)
DrawOverview()
def _time_reset(self):
ResetTimer()
if TimeDisplay:
DrawOverview()
def _toggle_skip(self):
TogglePageProp('skip', False)
def _toggle_overview(self):
TogglePageProp('overview', GetPageProp(Pcurrent, '_overview', True))
def _overview_up(self):
"move the overview selection upwards"
OverviewKeyboardNav(-OverviewGridSize)
def _overview_prev(self):
"select the previous page in overview mode"
OverviewKeyboardNav(-1)
def _overview_next(self):
"select the next page in overview mode"
OverviewKeyboardNav(+1)
def _overview_down(self):
"move the overview selection downwards"
OverviewKeyboardNav(+OverviewGridSize)
OverviewActions = OverviewActions()
# overview mode entry/loop/exit function
def DoOverview():
global Pcurrent, Pnext, Tcurrent, Tnext, Tracing, OverviewSelection
global PageEnterTime, OverviewMode
Platform.ScheduleEvent("$page-timeout", 0)
PageLeft()
UpdateOverviewTexture()
if GetPageProp(Pcurrent, 'boxes') or Tracing:
BoxFade(lambda t: 1.0 - t)
Tracing = False
OverviewSelection = OverviewPageMapInv[Pcurrent]
OverviewMode = True
OverviewZoom(lambda t: 1.0 - t)
DrawOverview()
PageEnterTime = Platform.GetTicks() - StartTime
try:
while True:
ev = Platform.GetEvent()
if not ev:
continue
if not ProcessEvent(ev, OverviewActions):
try:
page = OverviewPageMap[OverviewSelection]
except IndexError:
page = 0
page = HandleShortcutKey(ev, page)
if page:
OverviewSelection = OverviewPageMapInv[page]
x, y = OverviewPos(OverviewSelection)
Platform.SetMousePos((x + (OverviewCellX / 2), \
y + (OverviewCellY / 2)))
DrawOverview()
except ExitOverview:
PageLeft(overview=True)
if (OverviewSelection < 0) or (OverviewSelection >= OverviewPageCount):
OverviewSelection = OverviewPageMapInv[Pcurrent]
Pnext = Pcurrent
else:
Pnext = OverviewPageMap[OverviewSelection]
if Pnext != Pcurrent:
Pcurrent = Pnext
RenderPage(Pcurrent, Tcurrent)
UpdateCaption(Pcurrent)
OverviewZoom(lambda t: t)
OverviewMode = False
DrawCurrentPage()
if GetPageProp(Pcurrent, 'boxes'):
BoxFade(lambda t: t)
PageEntered()
if not PreloadNextPage(GetNextPage(Pcurrent, 1)):
PreloadNextPage(GetNextPage(Pcurrent, -1))
##### EVENT HANDLING ###########################################################
# set fullscreen mode
def SetFullscreen(fs, do_init=True):
global Fullscreen
if FakeFullscreen:
return # this doesn't work in fake-fullscreen mode
if do_init:
if fs == Fullscreen: return
if not Platform.ToggleFullscreen(): return
Fullscreen = fs
DrawCurrentPage()
if fs:
Platform.ScheduleEvent("$hide-mouse", MouseHideDelay)
else:
Platform.ScheduleEvent("$hide-mouse", 0)
SetCursor(True)
# PageProp toggle
def TogglePageProp(prop, default):
global WantStatus
SetPageProp(Pcurrent, prop, not(GetPageProp(Pcurrent, prop, default)))
UpdateCaption(Pcurrent, force=True)
WantStatus = True
DrawCurrentPage()
# basic action implementations (i.e. stuff that is required to work, except in overview mode)
class BaseDisplayActions(BaseActions):
def _X_quit(self):
if FadeInOut:
EnterFadeMode()
PageLeft()
Quit()
def _X_expose(self):
DrawCurrentPage()
def _X_hide_mouse(self):
# mouse timer event -> hide fullscreen cursor
SetCursor(False)
DrawCurrentPage()
def _X_page_timeout(self):
TransitionTo(GetNextPage(Pcurrent, 1))
def _X_poll_file(self):
global RTrunning, RTrestart, Pnext
dirty = False
for f in FileProps:
s = my_stat(f)
if s != GetFileProp(f, 'stat'):
dirty = True
SetFileProp(f, 'stat', s)
if dirty:
# first, check if the new file is valid
if not os.path.isfile(GetPageProp(Pcurrent, '_file')):
return
# invalidate everything we used to know about the input files
InvalidateCache()
for props in PageProps.itervalues():
for prop in ('_overview_rendered', '_box', '_href'):
if prop in props: del props[prop]
LoadInfoScript()
# force a transition to the current page, reloading it
Pnext = -1
TransitionTo(Pcurrent)
# restart the background renderer thread. this is not completely safe,
# i.e. there's a small chance that we fail to restart the thread, but
# this isn't critical
if CacheMode and BackgroundRendering:
if RTrunning:
RTrestart = True
else:
RTrunning = True
thread.start_new_thread(RenderThread, (Pcurrent, Pnext))
def _X_timer_update(self):
if VideoPlaying and MPlayerProcess:
if MPlayerProcess.poll() is not None:
StopMPlayer()
DrawCurrentPage()
elif TimerTick():
DrawCurrentPage()
# action implementations for video playback
class VideoActions(BaseDisplayActions):
def _video_stop(self):
"stop video playback"
StopMPlayer()
DrawCurrentPage()
def mplayer_command(self, cmd):
"helper for the various video-* actions"
try:
MPlayerProcess.stdin.write(cmd + "\n")
except:
StopMPlayer()
DrawCurrentPage()
def _video_pause(self):
"pause video playback"
self.mplayer_command("pause")
def _video_step(self):
"advance to the next frame in paused video"
self.mplayer_command("framestep")
def _video_seek_backward_10(self):
"seek 10 seconds backward in video"
self.mplayer_command("seek -10 pausing_keep")
def _video_seek_backward_1(self):
"seek 1 second backward in video"
self.mplayer_command("seek -1 pausing_keep")
def _video_seek_forward_1(self):
"seek 1 second forward in video"
self.mplayer_command("seek 1 pausing_keep")
def _video_seek_forward_10(self):
"seek 10 seconds forward in video"
self.mplayer_command("seek 10 pausing_keep")
VideoActions = VideoActions()
# action implementation for normal page display (i.e. everything except overview mode)
class PageDisplayActions(BaseDisplayActions):
def _X_move(self):
global Marking, MarkLR, Panning, ZoomX0, ZoomY0
BaseActions._X_move(self)
x, y = Platform.GetMousePos()
# activate marking if mouse is moved away far enough
if MarkValid and not(Marking):
if (abs(x - MarkBaseX) > 4) and (abs(y - MarkBaseY) > 4):
Marking = True
# mouse move while marking -> update marking box
if Marking:
MarkLR = MouseToScreen((x, y))
# mouse move while RMB is pressed -> panning
if PanValid and ZoomMode:
if not(Panning) and (abs(x - PanBaseX) > 1) and (abs(y - PanBaseY) > 1):
Panning = True
ZoomX0 = PanAnchorX + (PanBaseX - x) * ZoomArea / ScreenWidth
ZoomY0 = PanAnchorY + (PanBaseY - y) * ZoomArea / ScreenHeight
ZoomX0 = min(max(ZoomX0, 0.0), 1.0 - ZoomArea)
ZoomY0 = min(max(ZoomY0, 0.0), 1.0 - ZoomArea)
# if anything changed, redraw the page
if Marking or Tracing or Panning or (CursorImage and CursorVisible):
DrawCurrentPage()
def _zoom_pan_ACTIVATE(self):
"pan visible region in zoom mode"
global PanValid, Panning, PanBaseX, PanBaseY, PanAnchorX, PanAnchorY
ActionValidIf(ZoomMode)
PanValid = True
Panning = False
PanBaseX, PanBaseY = Platform.GetMousePos()
PanAnchorX = ZoomX0
PanAnchorY = ZoomY0
def _zoom_pan(self):
ActionValidIf(ZoomMode and Panning)
def _zoom_pan_RELEASE(self):
global PanValid, Panning
PanValid = False
Panning = False
def _zoom_enter(self):
"enter zoom mode"
ActionValidIf(not(ZoomMode))
tx, ty = MouseToScreen(Platform.GetMousePos())
EnterZoomMode((1.0 - 1.0 / ZoomFactor) * tx, \
(1.0 - 1.0 / ZoomFactor) * ty)
def _zoom_exit(self):
"leave zoom mode"
ActionValidIf(ZoomMode)
LeaveZoomMode()
def _box_add_ACTIVATE(self):
"draw a new highlight box [mouse-only]"
global MarkValid, Marking, MarkBaseX, MarkBaseY, MarkUL, MarkLR
MarkValid = True
Marking = False
MarkBaseX, MarkBaseY = Platform.GetMousePos()
MarkUL = MarkLR = MouseToScreen((MarkBaseX, MarkBaseY))
def _box_add(self):
global Marking
ActionValidIf(Marking)
Marking = False
# reject too small boxes
if ((abs(MarkUL[0] - MarkLR[0]) * ScreenWidth) >= MinBoxSize) \
and ((abs(MarkUL[1] - MarkLR[1]) * ScreenHeight) >= MinBoxSize):
boxes = GetPageProp(Pcurrent, 'boxes', [])
oldboxcount = len(boxes)
boxes.append(NormalizeRect(MarkUL[0], MarkUL[1], MarkLR[0], MarkLR[1]))
SetPageProp(Pcurrent, 'boxes', boxes)
if not(oldboxcount) and not(Tracing):
BoxFade(lambda t: t)
else:
raise ActionNotHandled()
DrawCurrentPage()
def _box_add_RELEASE(self):
global MarkValid
MarkValid = False
def _box_remove(self):
"remove the highlight box under the mouse cursor"
ActionValidIf(not(Panning) and not(Marking))
boxes = GetPageProp(Pcurrent, 'boxes', [])
x, y = MouseToScreen(Platform.GetMousePos())
try:
# if a box is already present around the clicked position, kill it
idx = FindBox(x, y, boxes)
if (len(boxes) == 1) and not(Tracing):
BoxFade(lambda t: 1.0 - t)
del boxes[idx]
SetPageProp(Pcurrent, 'boxes', boxes)
DrawCurrentPage()
except ValueError:
# no box present
raise ActionNotHandled()
def _box_clear(self):
"remove all highlight boxes on the current page"
ActionValidIf(GetPageProp(Pcurrent, 'boxes'))
if not Tracing:
BoxFade(lambda t: 1.0 - t)
DelPageProp(Pcurrent, 'boxes')
DrawCurrentPage()
def _hyperlink(self, allow_transition=True):
"navigate to the hyperlink under the mouse cursor"
x, y = Platform.GetMousePos()
for valid, target, x0, y0, x1, y1 in GetPageProp(Pcurrent, '_href', []):
if valid and (x >= x0) and (x < x1) and (y >= y0) and (y < y1):
if type(target) == types.IntType:
TransitionTo(target, allow_transition=allow_transition)
elif dest:
RunURL(target)
return
raise ActionNotHandled()
def _hyperlink_notrans(self):
"like 'hyperlink', but no transition on page change"
return self._hyperlink(allow_transition=False)
def _goto_prev(self):
"go to the previous page (with transition)"
TransitionTo(GetNextPage(Pcurrent, -1), allow_transition=True)
def _goto_prev_notrans(self):
"go to the previous page (without transition)"
TransitionTo(GetNextPage(Pcurrent, -1), allow_transition=False)
def _goto_next(self):
"go to the next page (with transition)"
TransitionTo(GetNextPage(Pcurrent, +1), allow_transition=True)
def _goto_next_notrans(self):
"go to the next page (without transition)"
TransitionTo(GetNextPage(Pcurrent, +1), allow_transition=False)
def _goto_last(self):
"go to the last visited page (with transition)"
TransitionTo(LastPage, allow_transition=True)
def _goto_last_notrans(self):
"go to the last visited page (without transition)"
TransitionTo(LastPage, allow_transition=False)
def _goto_start(self):
"go to the first page (with transition)"
ActionValidIf(Pcurrent != 1)
TransitionTo(1, allow_transition=True)
def _goto_start_notrans(self):
"go to the first page (without transition)"
ActionValidIf(Pcurrent != 1)
TransitionTo(1, allow_transition=False)
def _goto_end(self):
"go to the final page (with transition)"
ActionValidIf(Pcurrent != PageCount)
TransitionTo(PageCount, allow_transition=True)
def _goto_end_notrans(self):
"go to the final page (without transition)"
ActionValidIf(Pcurrent != PageCount)
TransitionTo(PageCount, allow_transition=False)
def _overview_enter(self):
"zoom out to the overview page"
LeaveZoomMode()
DoOverview()
def _spotlight_enter(self):
"enter spotlight mode"
global Tracing
ActionValidIf(not(Tracing))
Tracing = True
if GetPageProp(Pcurrent, 'boxes'):
DrawCurrentPage()
else:
BoxFade(lambda t: t)
def _spotlight_exit(self):
"exit spotlight mode"
global Tracing
ActionValidIf(Tracing)
if not GetPageProp(Pcurrent, 'boxes'):
BoxFade(lambda t: 1.0 - t)
Tracing = False
DrawCurrentPage()
def _spotlight_shrink(self):
"decrease the spotlight radius"
ActionValidIf(Tracing)
IncrementSpotSize(-8)
def _spotlight_grow(self):
"increase the spotlight radius"
ActionValidIf(Tracing)
IncrementSpotSize(+8)
def _spotlight_reset(self):
"reset the spotlight radius to its default value"
global SpotRadius
ActionValidIf(Tracing)
SpotRadius = SpotRadiusBase
GenerateSpotMesh()
DrawCurrentPage()
def _fullscreen(self):
"toggle fullscreen mode"
SetFullscreen(not(Fullscreen))
def _save(self):
"save the info script"
SaveInfoScript(InfoScriptPath)
def _fade_to_black(self):
"fade to a black screen"
FadeMode(0.0)
def _fade_to_white(self):
"fade to a white screen"
FadeMode(1.0)
def _time_toggle(self):
"toggle time display and/or time tracking mode"
global TimeDisplay
TimeDisplay = not(TimeDisplay)
DrawCurrentPage()
EnableTimeTracking()
def _time_reset(self):
"reset the on-screen timer"
ResetTimer()
if TimeDisplay:
DrawCurrentPage()
def _toggle_skip(self):
"toggle 'skip' flag of current page"
TogglePageProp('skip', False)
def _toggle_overview(self):
"toggle 'visible on overview' flag of current page"
TogglePageProp('overview', GetPageProp(Pcurrent, '_overview', True))
def _fade_less(self):
"decrease the spotlight/box background darkness"
global BoxFadeDarkness
BoxFadeDarkness = max(0.0, BoxFadeDarkness - BoxFadeDarknessStep)
DrawCurrentPage()
def _fade_more(self):
"increase the spotlight/box background darkness"
global BoxFadeDarkness
BoxFadeDarkness = min(1.0, BoxFadeDarkness + BoxFadeDarknessStep)
DrawCurrentPage()
def _fade_reset(self):
"reset spotlight/box background darkness to default"
global BoxFadeDarkness
BoxFadeDarkness = BoxFadeDarknessBase
DrawCurrentPage()
def _gamma_decrease(self):
"decrease gamma"
SetGamma(new_gamma=Gamma / GammaStep)
def _gamma_increase(self):
"increase gamma"
SetGamma(new_gamma=Gamma * GammaStep)
def _gamma_bl_decrease(self):
"decrease black level"
SetGamma(new_black=BlackLevel - BlackLevelStep)
def _gamma_bl_increase(self):
"increase black level"
SetGamma(new_black=BlackLevel + BlackLevelStep)
def _gamma_reset(self):
"reset gamma and black level to the defaults"
SetGamma(1.0, 0)
PageDisplayActions = PageDisplayActions()
ForcedActions.update(("-zoom-pan", "+zoom-pan", "-box-add", "+box-add"))
# main event handling function
def EventHandlerLoop():
while True:
ev = Platform.GetEvent()
if VideoPlaying:
# video mode -> ignore all non-video actions
ProcessEvent(ev, VideoActions)
elif ProcessEvent(ev, PageDisplayActions):
# normal action has been handled -> done
continue
elif ev and (ev[0] == '*'):
# handle a shortcut key
ctrl = ev.startswith('*ctrl+')
if ctrl:
ev = '*' + ev[6:]
page = HandleShortcutKey(ev, Pcurrent)
if page:
TransitionTo(page, allow_transition=not(ctrl))
##### FILE LIST GENERATION #####################################################
def IsImageFileName(name):
return os.path.splitext(name)[1].lower() in \
(".jpg", ".jpeg", ".png", ".tif", ".tiff", ".bmp", ".ppm", ".pgm")
def IsPlayable(name):
return IsImageFileName(name) or name.lower().endswith(".pdf") or os.path.isdir(name)
def AddFile(name, title=None, implicit=False):
global FileList, FileName
# handle list files
if name.startswith('@') and os.path.isfile(name[1:]):
name = name[1:]
dirname = os.path.dirname(name)
try:
f = file(name, "r")
next_title = None
for line in f:
line = [part.strip() for part in line.split('#', 1)]
if len(line) == 1:
subfile = line[0]
title = None
else:
subfile, title = line
if subfile:
AddFile(os.path.normpath(os.path.join(dirname, subfile)), title, implicit=True)
f.close()
except IOError:
print >>sys.stderr, "Error: cannot read list file `%s'" % name
return
# generate absolute path
path_sep_at_end = name.endswith(os.path.sep)
name = os.path.normpath(os.path.abspath(name)).rstrip(os.path.sep)
if path_sep_at_end:
name += os.path.sep
# set FileName to first (explicitly specified) input file
if not implicit:
if not FileList:
FileName = name
else:
FileName = ""
if os.path.isfile(name):
FileList.append(name)
if title: SetFileProp(name, 'title', title)
elif os.path.isdir(name):
images = [os.path.join(name, f) for f in os.listdir(name) if IsImageFileName(f)]
images.sort(lambda a, b: cmp(a.lower(), b.lower()))
if not images:
print >>sys.stderr, "Warning: no image files in directory `%s'" % name
for img in images:
AddFile(img, implicit=True)
else:
files = list(filter(IsPlayable, glob.glob(name)))
if files:
for f in files: AddFile(f, implicit=True)
else:
print >>sys.stderr, "Error: input file `%s' not found" % name
##### INITIALIZATION ###########################################################
LoadDefaultBindings()
def main():
global gl, ScreenWidth, ScreenHeight, TexWidth, TexHeight, TexSize
global TexMaxS, TexMaxT, EdgeX, EdgeY, PixelX, PixelY, LogoImage
global OverviewGridSize, OverviewCellX, OverviewCellY
global OverviewOfsX, OverviewOfsY, OverviewBorder, OverviewImage, OverviewPageCount
global OverviewPageMap, OverviewPageMapInv, FileName, FileList, PageCount
global DocumentTitle, PageProps, LogoTexture, OSDFont
global Pcurrent, Pnext, Tcurrent, Tnext, InitialPage
global CacheFile, CacheFileName, BaseWorkingDir, RenderToDirectory
global PAR, DAR, TempFileName
global BackgroundRendering, FileStats, RTrunning, RTrestart, StartTime
global CursorImage, CursorVisible, InfoScriptPath
global HalfScreen, AutoAdvance, WindowPos
global BoxFadeDarknessBase, SpotRadiusBase
global BoxIndexBuffer, UseBlurShader
# allocate temporary file
TempFileName = tempfile.mktemp(prefix="impressive-", suffix="_tmp")
# some input guesswork
BaseWorkingDir = os.getcwd()
if not(FileName) and (len(FileList) == 1):
FileName = FileList[0]
if FileName and not(FileList):
AddFile(FileName)
if FileName:
DocumentTitle = os.path.splitext(os.path.split(FileName)[1])[0]
# early graphics initialization
Platform.Init()
# detect screen size and compute aspect ratio
if Fullscreen and (UseAutoScreenSize or not(Platform.allow_custom_fullscreen_res)):
size = Platform.GetScreenSize()
if size:
ScreenWidth, ScreenHeight = size
print >>sys.stderr, "Detected screen size: %dx%d pixels" % (ScreenWidth, ScreenHeight)
if DAR is None:
PAR = 1.0
DAR = float(ScreenWidth) / float(ScreenHeight)
else:
PAR = DAR / float(ScreenWidth) * float(ScreenHeight)
# override some irrelevant settings in event test mode
if EventTestMode:
FileList = ["XXX.EventTestDummy.XXX"]
InfoScriptPath = None
RenderToDirectory = False
InitialPage = None
HalfScreen = False
# fill the page list
if Shuffle:
random.shuffle(FileList)
PageCount = 0
for name in FileList:
ispdf = name.lower().endswith(".pdf")
if ispdf:
# PDF input -> initialize renderers and if none available, reject
if not InitPDFRenderer():
print >>sys.stderr, "Ignoring unrenderable input file '%s'." % name
continue
# try to pre-parse the PDF file
pages = 0
out = [(ScreenWidth + Overscan, ScreenHeight + Overscan),
(ScreenWidth + Overscan, ScreenHeight + Overscan)]
res = [(72.0, 72.0), (72.0, 72.0)]
# phase 1: internal PDF parser
try:
pages, pdf_width, pdf_height = analyze_pdf(name)
out = [ZoomToFit((pdf_width, pdf_height * PAR)),
ZoomToFit((pdf_height, pdf_width * PAR))]
res = [(out[0][0] * 72.0 / pdf_width, out[0][1] * 72.0 / pdf_height),
(out[1][1] * 72.0 / pdf_width, out[1][0] * 72.0 / pdf_height)]
except KeyboardInterrupt:
raise
except:
pass
# phase 2: use pdftk
try:
assert 0 == subprocess.Popen([pdftkPath, name, "dump_data", "output", TempFileName + ".txt"]).wait()
title, pages = pdftkParse(TempFileName + ".txt", PageCount)
if title and (len(FileList) == 1):
DocumentTitle = title
except KeyboardInterrupt:
raise
except:
pass
else:
# Image File
pages = 1
SetPageProp(PageCount + 1, '_title', os.path.split(name)[-1])
# validity check
if not pages:
print >>sys.stderr, "WARNING: The input file `%s' could not be analyzed." % name
continue
# add pages and files into PageProps and FileProps
pagerange = list(range(PageCount + 1, PageCount + pages + 1))
for page in pagerange:
SetPageProp(page, '_file', name)
if ispdf: SetPageProp(page, '_page', page - PageCount)
title = GetFileProp(name, 'title')
if title: SetPageProp(page, '_title', title)
SetFileProp(name, 'pages', GetFileProp(name, 'pages', []) + pagerange)
SetFileProp(name, 'offsets', GetFileProp(name, 'offsets', []) + [PageCount])
if not GetFileProp(name, 'stat'): SetFileProp(name, 'stat', my_stat(name))
if ispdf:
SetFileProp(name, 'out', out)
SetFileProp(name, 'res', res)
PageCount += pages
# no pages? strange ...
if not PageCount:
print >>sys.stderr, "The presentation doesn't have any pages, quitting."
sys.exit(1)
# if rendering is wanted, do it NOW
if RenderToDirectory:
sys.exit(DoRender())
# load and execute info script
if not InfoScriptPath:
InfoScriptPath = FileName + ".info"
LoadInfoScript()
# initialize some derived variables
BoxFadeDarknessBase = BoxFadeDarkness
SpotRadiusBase = SpotRadius
# get the initial page number
if not InitialPage:
InitialPage = GetNextPage(0, 1)
Pcurrent = InitialPage
if (Pcurrent <= 0) or (Pcurrent > PageCount):
print >>sys.stderr, "Attempt to start the presentation at an invalid page (%d of %d), quitting." % (InitialPage, PageCount)
sys.exit(1)
# initialize graphics
try:
Platform.StartDisplay()
except:
print >>sys.stderr, "FATAL: failed to create rendering surface in the desired resolution (%dx%d)" % (ScreenWidth, ScreenHeight)
sys.exit(1)
if Fullscreen:
Platform.SetMouseVisible(False)
CursorVisible = False
if (Gamma <> 1.0) or (BlackLevel <> 0):
SetGamma(force=True)
# initialize OpenGL
try:
gl = Platform.LoadOpenGL()
print >>sys.stderr, "OpenGL renderer:", GLRenderer
# check if graphics are unaccelerated
renderer = GLRenderer.lower().replace(' ', '').replace('(r)', '')
if not(renderer) \
or (renderer in ("mesaglxindirect", "gdigeneric")) \
or renderer.startswith("software") \
or ("llvmpipe" in renderer):
print >>sys.stderr, "WARNING: Using an OpenGL software renderer. Impressive will work, but it will"
print >>sys.stderr, " very likely be too slow to be usable."
# check for old hardware that can't deal with the blur shader
for substr in ("i915", "intel915", "intel945", "intelq3", "intelg3", "inteligd", "gma900", "gma950", "gma3000", "gma3100", "gma3150"):
if substr in renderer:
UseBlurShader = False
# check the OpenGL version (2.0 needed to ensure NPOT texture support)
extensions = set((gl.GetString(gl.EXTENSIONS) or "").split())
if (GLVersion < "2") and (not("GL_ARB_shader_objects" in extensions) or not("GL_ARB_texture_non_power_of_two" in extensions)):
raise ImportError("OpenGL version %r is below 2.0 and the necessary extensions are unavailable" % GLVersion)
except ImportError, e:
if GLVendor: print >>sys.stderr, "OpenGL vendor:", GLVendor
if GLRenderer: print >>sys.stderr, "OpenGL renderer:", GLRenderer
if GLVersion: print >>sys.stderr, "OpenGL version:", GLVersion
print >>sys.stderr, "FATAL:", e
print >>sys.stderr, "This likely means that your graphics driver or hardware is too old."
sys.exit(1)
# some further OpenGL configuration
if Verbose:
GLShader.LOG_DEFAULT = GLShader.LOG_IF_NOT_EMPTY
for shader in RequiredShaders:
shader.get_instance()
if UseBlurShader:
try:
BlurShader.get_instance()
except GLShaderCompileError:
UseBlurShader = False
if Verbose:
if UseBlurShader:
print >>sys.stderr, "Using blur-and-desaturate shader for highlight box and spotlight mode."
else:
print >>sys.stderr, "Using legacy multi-pass blur for highlight box and spotlight mode."
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
BoxIndexBuffer = HighlightIndexBuffer(4)
# setup the OpenGL texture size
TexWidth = (ScreenWidth + 3) & (-4)
TexHeight = (ScreenHeight + 3) & (-4)
TexMaxS = float(ScreenWidth) / TexWidth
TexMaxT = float(ScreenHeight) / TexHeight
TexSize = TexWidth * TexHeight * 3
# set up some variables
PixelX = 1.0 / ScreenWidth
PixelY = 1.0 / ScreenHeight
ScreenAspect = float(ScreenWidth) / float(ScreenHeight)
EdgeX = BoxEdgeSize * PixelX
EdgeY = BoxEdgeSize * PixelY
# prepare logo image
LogoImage = Image.open(cStringIO.StringIO(LOGO.decode('base64')))
LogoTexture = gl.make_texture(gl.TEXTURE_2D, filter=gl.NEAREST, img=LogoImage)
DrawLogo()
Platform.SwapBuffers()
# initialize OSD font
try:
OSDFont = GLFont(FontTextureWidth, FontTextureHeight, FontList, FontSize, search_path=FontPath)
DrawLogo()
titles = []
for key in ('title', '_title'):
titles.extend([p[key] for p in PageProps.itervalues() if key in p])
if titles:
OSDFont.AddString("".join(titles))
except ValueError:
print >>sys.stderr, "The OSD font size is too large, the OSD will be rendered incompletely."
except IOError:
print >>sys.stderr, "Could not open OSD font file, disabling OSD."
except (NameError, AttributeError, TypeError):
print >>sys.stderr, "Your version of PIL is too old or incomplete, disabling OSD."
# handle event test mode
if EventTestMode:
DoEventTestMode()
# initialize mouse cursor
if CursorImage or not(Platform.has_hardware_cursor):
img = None
if CursorImage and not(CursorImage.lower() in ("-", "default")):
try:
img = Image.open(CursorImage).convert('RGBA')
img.load()
except:
print >>sys.stderr, "Could not open the mouse cursor image, using standard cursor."
img = None
CursorImage = PrepareCustomCursor(img)
# set up page cache
if CacheMode == PersistentCache:
if not CacheFileName:
CacheFileName = FileName + ".cache"
InitPCache()
if CacheMode == FileCache:
CacheFile = tempfile.TemporaryFile(prefix="impressive-", suffix=".cache")
# initialize overview metadata
OverviewPageMap=[i for i in xrange(1, PageCount + 1) \
if GetPageProp(i, ('overview', '_overview'), True) \
and (i >= PageRangeStart) and (i <= PageRangeEnd)]
OverviewPageCount = max(len(OverviewPageMap), 1)
OverviewPageMapInv = {}
for page in xrange(1, PageCount + 1):
OverviewPageMapInv[page] = len(OverviewPageMap) - 1
for i in xrange(len(OverviewPageMap)):
if OverviewPageMap[i] >= page:
OverviewPageMapInv[page] = i
break
# initialize overview page geometry
OverviewGridSize = 1
while OverviewPageCount > OverviewGridSize * OverviewGridSize:
OverviewGridSize += 1
if HalfScreen:
# in half-screen mode, temporarily override ScreenWidth
saved_screen_width = ScreenWidth
ScreenWidth /= 2
OverviewCellX = int(ScreenWidth / OverviewGridSize)
OverviewCellY = int(ScreenHeight / OverviewGridSize)
OverviewOfsX = int((ScreenWidth - OverviewCellX * OverviewGridSize)/2)
OverviewOfsY = int((ScreenHeight - OverviewCellY * \
int((OverviewPageCount + OverviewGridSize - 1) / OverviewGridSize)) / 2)
while OverviewBorder and (min(OverviewCellX - 2 * OverviewBorder, OverviewCellY - 2 * OverviewBorder) < 16):
OverviewBorder -= 1
OverviewImage = Image.new('RGB', (TexWidth, TexHeight))
if HalfScreen:
OverviewOfsX += ScreenWidth
ScreenWidth = saved_screen_width
# fill overlay "dummy" images
dummy = LogoImage.copy()
border = max(OverviewLogoBorder, 2 * OverviewBorder)
maxsize = (OverviewCellX - border, OverviewCellY - border)
if (dummy.size[0] > maxsize[0]) or (dummy.size[1] > maxsize[1]):
dummy.thumbnail(ZoomToFit(dummy.size, maxsize), Image.ANTIALIAS)
margX = int((OverviewCellX - dummy.size[0]) / 2)
margY = int((OverviewCellY - dummy.size[1]) / 2)
dummy = dummy.convert(mode='RGB')
for page in range(OverviewPageCount):
pos = OverviewPos(page)
OverviewImage.paste(dummy, (pos[0] + margX, pos[1] + margY))
del dummy
# compute auto-advance timeout, if applicable
if EstimatedDuration and AutoAutoAdvance:
time_left = EstimatedDuration * 1000
pages = 0
p = InitialPage
while p:
override = GetPageProp(p, 'timeout')
if override:
time_left -= override
else:
pages += 1
pnext = GetNextPage(p, 1)
if pnext:
time_left -= GetPageProp(p, 'transtime', TransitionDuration)
p = pnext
if pages and (time_left >= pages):
AutoAdvance = time_left / pages
print >>sys.stderr, "Setting auto-advance timeout to %.1f seconds." % (0.001 * AutoAdvance)
else:
print >>sys.stderr, "Warning: Could not determine auto-advance timeout automatically."
# set up background rendering
if not HaveThreads:
print >>sys.stderr, "Note: Background rendering isn't available on this platform."
BackgroundRendering = False
# if caching is enabled, pre-render all pages
if CacheMode and not(BackgroundRendering):
DrawLogo()
DrawProgress(0.0)
Platform.SwapBuffers()
for pdf in FileProps:
if pdf.lower().endswith(".pdf"):
ParsePDF(pdf)
stop = False
progress = 0.0
def prerender_action_handler(action):
if action in ("$quit", "*quit"):
Quit()
for page in range(InitialPage, PageCount + 1) + range(1, InitialPage):
while True:
ev = Platform.GetEvent(poll=True)
if not ev: break
ProcessEvent(ev, prerender_action_handler)
if ev.startswith('*'):
stop = True
if stop: break
if (page >= PageRangeStart) and (page <= PageRangeEnd):
PageImage(page)
DrawLogo()
progress += 1.0 / PageCount
DrawProgress(progress)
Platform.SwapBuffers()
# create buffer textures
DrawLogo()
Platform.SwapBuffers()
Tcurrent, Tnext = [gl.make_texture(gl.TEXTURE_2D, gl.CLAMP_TO_EDGE, gl.LINEAR) for dummy in (1,2)]
# prebuffer current and next page
Pnext = 0
RenderPage(Pcurrent, Tcurrent)
PageEntered(update_time=False)
PreloadNextPage(GetNextPage(Pcurrent, 1))
# some other preparations
PrepareTransitions()
GenerateSpotMesh()
if PollInterval:
Platform.ScheduleEvent("$poll-file", PollInterval * 1000, periodic=True)
# start the background rendering thread
if CacheMode and BackgroundRendering:
RTrunning = True
thread.start_new_thread(RenderThread, (Pcurrent, Pnext))
# parse PDF file if caching is disabled
if not CacheMode:
for pdf in FileProps:
if pdf.lower().endswith(".pdf"):
SafeCall(ParsePDF, [pdf])
# start output and enter main loop
StartTime = Platform.GetTicks()
if TimeTracking:
EnableTimeTracking(True)
Platform.ScheduleEvent("$timer-update", 100, periodic=True)
if not(Fullscreen) and CursorImage:
Platform.SetMouseVisible(False)
if FadeInOut:
LeaveFadeMode()
else:
DrawCurrentPage()
UpdateCaption(Pcurrent)
EventHandlerLoop() # never returns
# event test mode implementation
def DoEventTestMode():
last_event = "(None)"
need_redraw = True
cx = ScreenWidth / 2
y1 = ScreenHeight / 5
y2 = (ScreenHeight * 4) / 5
if OSDFont:
dy = OSDFont.GetLineHeight()
Platform.ScheduleEvent('$dummy', 1000) # required to ensure that time measurement works :(
print >>sys.stderr, "Entering Event Test Mode."
print " timestamp | delta-time | event"
t0 = Platform.GetTicks()
while True:
if need_redraw:
DrawLogo()
if OSDFont:
gl.Enable(gl.BLEND)
OSDFont.BeginDraw()
OSDFont.Draw((cx, y1 - dy), "Event Test Mode", align=Center, beveled=False, bold=True)
OSDFont.Draw((cx, y1), "press Alt+F4 to quit", align=Center, beveled=False)
OSDFont.Draw((cx, y2 - dy), "Last Event:", align=Center, beveled=False, bold=True)
OSDFont.Draw((cx, y2), last_event, align=Center, beveled=False)
OSDFont.EndDraw()
gl.Disable(gl.BLEND)
Platform.SwapBuffers()
need_redraw = False
ev = Platform.GetEvent()
if ev == '$expose':
need_redraw = True
elif ev == '$quit':
Quit()
elif ev and ev.startswith('*'):
now = Platform.GetTicks()
print "%7d ms | %7d ms | %s" % (int(now), int(now - t0), ev[1:])
t0 = now
last_event = ev[1:]
need_redraw = True
# wrapper around main() that ensures proper uninitialization
def run_main():
global CacheFile
try:
try:
main()
except SystemExit:
raise
except KeyboardInterrupt:
pass
except:
print >>sys.stderr
print >>sys.stderr, 79 * "="
print >>sys.stderr, "OOPS! Impressive crashed!"
print >>sys.stderr, "This shouldn't happen. Please report this incident to the author, including the"
print >>sys.stderr, "full output of the program, particularly the following lines. If possible,"
print >>sys.stderr, "please also send the input files you used."
print >>sys.stderr
print >>sys.stderr, "Impressive version:", __version__
print >>sys.stderr, "Python version:", sys.version
print >>sys.stderr, "PyGame version:", pygame.__version__
print >>sys.stderr, "PIL version:", Image.VERSION
if PDFRenderer:
print >>sys.stderr, "PDF renderer:", PDFRenderer.name
else:
print >>sys.stderr, "PDF renderer: None"
if GLVendor: print >>sys.stderr, "OpenGL vendor:", GLVendor
if GLRenderer: print >>sys.stderr, "OpenGL renderer:", GLRenderer
if GLVersion: print >>sys.stderr, "OpenGL version:", GLVersion
if hasattr(os, 'uname'):
uname = os.uname()
print >>sys.stderr, "Operating system: %s %s (%s)" % (uname[0], uname[2], uname[4])
else:
print >>sys.stderr, "Python platform:", sys.platform
if os.path.isfile("/usr/bin/lsb_release"):
lsb_release = subprocess.Popen(["/usr/bin/lsb_release", "-sd"], stdout=subprocess.PIPE)
print >>sys.stderr, "Linux distribution:", lsb_release.stdout.read().strip()
lsb_release.wait()
print >>sys.stderr, "Command line:", ' '.join(('"%s"'%arg if (' ' in arg) else arg) for arg in sys.argv)
traceback.print_exc(file=sys.stderr)
finally:
StopMPlayer()
# ensure that background rendering is halted
Lrender.acquire()
Lcache.acquire()
# remove all temp files
if 'CacheFile' in globals():
del CacheFile
for tmp in glob.glob(TempFileName + "*"):
try:
os.remove(tmp)
except OSError:
pass
Platform.Quit()
# release all locks
try:
if Lrender.locked():
Lrender.release()
except:
pass
try:
if Lcache.locked():
Lcache.release()
except:
pass
try:
if Loverview.locked():
Loverview.release()
except:
pass
##### COMMAND-LINE PARSER AND HELP #############################################
def if_op(cond, res_then, res_else):
if cond: return res_then
else: return res_else
def HelpExit(code=0):
print """A nice presentation tool.
Usage: """+os.path.basename(sys.argv[0])+""" [OPTION...] <INPUT(S)...>
You may either play a PDF file, a directory containing image files or
individual image files.
Input options:
-r, --rotate <n> rotate pages clockwise in 90-degree steps
--scale scale images to fit screen (not used in PDF mode)
--supersample use supersampling (only used in PDF mode)
-s --supersample for PDF files, --scale for image files
-I, --script <path> set the path of the info script
-u, --poll <seconds> check periodically if the source file has been
updated and reload it if it did
-X, --shuffle put input files into random order
-h, --help show this help text and exit
Output options:
-o, --output <dir> don't display the presentation, only render to .png
--fullscreen start in fullscreen mode
-ff, --fake-fullscreen start in "fake fullscreen" mode
-f, --windowed start in windowed mode
-g, --geometry <WxH> set window size or fullscreen resolution
-A, --aspect <X:Y> adjust for a specific display aspect ratio (e.g. 5:4)
-G, --gamma <G[:BL]> specify startup gamma and black level
Page options:
-i, --initialpage <n> start with page <n>
-p, --pages <A-B> only cache pages in the specified range;
implicitly sets -i <A>
-w, --wrap go back to the first page after the last page
-O, --autooverview <x> automatically derive page visibility on overview page
-O first = show pages with captions
-O last = show pages before pages with captions
-Q, --autoquit quit after the last slide (no effect with --wrap)
Display options:
-t, --transition <trans[,trans2...]>
force a specific transitions or set of transitions
-l, --listtrans print a list of available transitions and exit
-F, --font <file> use a specific TrueType font file for the OSD
-S, --fontsize <px> specify the OSD font size in pixels
-C, --cursor <F[:X,Y]> use a .png image as the mouse cursor
-L, --layout <spec> set the OSD layout (please read the documentation)
-z, --zoom <factor> set zoom factor (integer number, default: 2)
-x, --fade fade in at start and fade out at end
--spot-radius <px> set the initial radius of the spotlight, in pixels
--invert display slides in inverted colors
--min-box-size <x> set minimum size of a highlight box, in pixels
--darkness <p> set highlight box mode darkness to <p> percent
--noblur use legacy blur implementation
Timing options:
-M, --minutes display time in minutes, not seconds
--clock show current time instead of time elapsed
--tracking enable time tracking mode
-a, --auto <seconds> automatically advance to next page after some seconds
-d, --duration <time> set the desired duration of the presentation and show
a progress bar at the bottom of the screen
-y, --auto-auto if a duration is set, set the default time-out so
that it will be reached exactly
-k, --auto-progress shows a progress bar for each page for auto-advance
-T, --transtime <ms> set transition duration in milliseconds
-D, --mousedelay <ms> set mouse hide delay for fullscreen mode (in ms)
(0 = show permanently, 1 = don't show at all)
-B, --boxfade <ms> set highlight box fade duration in milliseconds
-Z, --zoomtime <ms> set zoom animation duration in milliseconds
-q, --page-progress shows a progress bar based on the position in the
presentation (based on pages, not time)
Control options:
--control-help display help about control configuration and exit
-e, --bind set controls (modify event/action bindings)
-E, --controls <file> load control configuration from a file
--noclicks disable page navigation via left/right mouse click
-W, --nowheel disable page navigation via mouse wheel
--evtest run Impressive in event test mode
Advanced options:
-c, --cache <mode> set page cache mode:
-c none = disable caching completely
-c memory = store cache in RAM, uncompressed
-c compressed = store cache in RAM, compressed
-c disk = store cache on disk temporarily
-c persistent = store cache on disk persistently
--cachefile <path> set the persistent cache file path (implies -cp)
-b, --noback don't pre-render images in the background
-P, --renderer <path> set path to PDF renderer executable (GhostScript,
Xpdf/Poppler pdftoppm, or MuPDF mudraw/pdfdraw)
-V, --overscan <px> render PDF files <px> pixels larger than the screen
--nologo disable startup logo and version number display
-H, --half-screen show OSD on right half of the screen only
-v, --verbose (slightly) more verbose operation
For detailed information, visit""", __website__
sys.exit(code)
def ListTransitions():
print "Available transitions:"
standard = dict([(tc.__name__, None) for tc in AvailableTransitions])
trans = [(tc.__name__, tc.__doc__) for tc in AllTransitions]
trans.append(('None', "no transition"))
trans.sort()
maxlen = max([len(item[0]) for item in trans])
for name, desc in trans:
if name in standard:
star = '*'
else:
star = ' '
print star, name.ljust(maxlen), '-', desc
print "(transitions with * are enabled by default)"
sys.exit(0)
def TryTime(s, regexp, func):
m = re.match(regexp, s, re.I)
if not m: return 0
return func(map(int, m.groups()))
def ParseTime(s):
return TryTime(s, r'([0-9]+)s?$', lambda m: m[0]) \
or TryTime(s, r'([0-9]+)m$', lambda m: m[0] * 60) \
or TryTime(s, r'([0-9]+)[m:]([0-9]+)[ms]?$', lambda m: m[0] * 60 + m[1]) \
or TryTime(s, r'([0-9]+)[h:]([0-9]+)[hm]?$', lambda m: m[0] * 3600 + m[1] * 60) \
or TryTime(s, r'([0-9]+)[h:]([0-9]+)[m:]([0-9]+)s?$', lambda m: m[0] * 3600 + m[1] * 60 + m[2])
def opterr(msg, extra_lines=[]):
print >>sys.stderr, "command line parse error:", msg
for line in extra_lines:
print >>sys.stderr, line
print >>sys.stderr, "use `%s -h' to get help" % sys.argv[0]
print >>sys.stderr, "or visit", __website__, "for full documentation"
sys.exit(2)
def SetTransitions(list):
global AvailableTransitions
index = dict([(tc.__name__.lower(), tc) for tc in AllTransitions])
index['none'] = None
AvailableTransitions=[]
for trans in list.split(','):
try:
AvailableTransitions.append(index[trans.lower()])
except KeyError:
opterr("unknown transition `%s'" % trans)
def ParseLayoutPosition(value):
xpos = []
ypos = []
for c in value.strip().lower():
if c == 't': ypos.append(0)
elif c == 'b': ypos.append(1)
elif c == 'l': xpos.append(0)
elif c == 'r': xpos.append(1)
elif c == 'c': xpos.append(2)
else: opterr("invalid position specification `%s'" % value)
if not xpos: opterr("position `%s' lacks X component" % value)
if not ypos: opterr("position `%s' lacks Y component" % value)
if len(xpos)>1: opterr("position `%s' has multiple X components" % value)
if len(ypos)>1: opterr("position `%s' has multiple Y components" % value)
return (xpos[0] << 1) | ypos[0]
def SetLayoutSubSpec(key, value):
global OSDTimePos, OSDTitlePos, OSDPagePos, OSDStatusPos
global OSDAlpha, OSDMargin
lkey = key.strip().lower()
if lkey in ('a', 'alpha', 'opacity'):
try:
OSDAlpha = float(value)
except ValueError:
opterr("invalid alpha value `%s'" % value)
if OSDAlpha > 1.0:
OSDAlpha *= 0.01 # accept percentages, too
if (OSDAlpha < 0.0) or (OSDAlpha > 1.0):
opterr("alpha value %s out of range" % value)
elif lkey in ('margin', 'dist', 'distance'):
try:
OSDMargin = float(value)
except ValueError:
opterr("invalid margin value `%s'" % value)
if OSDMargin < 0:
opterr("margin value %s out of range" % value)
elif lkey in ('t', 'time'):
OSDTimePos = ParseLayoutPosition(value)
elif lkey in ('title', 'caption'):
OSDTitlePos = ParseLayoutPosition(value)
elif lkey in ('page', 'number'):
OSDPagePos = ParseLayoutPosition(value)
elif lkey in ('status', 'info'):
OSDStatusPos = ParseLayoutPosition(value)
else:
opterr("unknown layout element `%s'" % key)
def SetLayout(spec):
for sub in spec.replace(':', '=').split(','):
try:
key, value = sub.split('=')
except ValueError:
opterr("invalid layout spec `%s'" % sub)
SetLayoutSubSpec(key, value)
def ParseCacheMode(arg):
arg = arg.strip().lower()
if "none".startswith(arg): return NoCache
if "off".startswith(arg): return NoCache
if "memory".startswith(arg): return MemCache
if arg == 'z': return CompressedCache
if "compressed".startswith(arg): return CompressedCache
if "disk".startswith(arg): return FileCache
if "file".startswith(arg): return FileCache
if "persistent".startswith(arg): return PersistentCache
opterr("invalid cache mode `%s'" % arg)
def ParseAutoOverview(arg):
arg = arg.strip().lower()
if "off".startswith(arg): return Off
if "first".startswith(arg): return First
if "last".startswith(arg): return Last
try:
i = int(arg)
assert (i >= Off) and (i <= Last)
except:
opterr("invalid auto-overview mode `%s'" % arg)
def ParseOptions(argv):
global FileName, FileList, Fullscreen, Scaling, Supersample, CacheMode
global TransitionDuration, MouseHideDelay, BoxFadeDuration, ZoomDuration
global ScreenWidth, ScreenHeight, InitialPage, Wrap, TimeTracking
global AutoAdvance, RenderToDirectory, Rotation, DAR, Verbose
global BackgroundRendering, UseAutoScreenSize, PollInterval, CacheFileName
global PageRangeStart, PageRangeEnd, FontList, FontSize, Gamma, BlackLevel
global EstimatedDuration, CursorImage, CursorHotspot, MinutesOnly, Overscan
global PDFRendererPath, InfoScriptPath, EventTestMode
global AutoOverview, ZoomFactor, FadeInOut, ShowLogo, Shuffle, PageProgress
global QuitAtEnd, ShowClock, HalfScreen, SpotRadius, InvertPages
global MinBoxSize, AutoAutoAdvance, AutoAdvanceProgress, BoxFadeDarkness
global WindowPos, FakeFullscreen, UseBlurShader
DefaultControls = True
try: # unused short options: jnJKNRUY
opts, args = getopt.getopt(argv, \
"vhfg:sc:i:wa:t:lo:r:T:D:B:Z:P:A:mbp:u:F:S:G:d:C:ML:I:O:z:xXqV:QHykWe:E:", \
["help", "fullscreen", "geometry=", "scale", "supersample", \
"nocache", "initialpage=", "wrap", "auto=", "listtrans", "output=", \
"rotate=", "transition=", "transtime=", "mousedelay=", "boxfade=", \
"zoom=", "gspath=", "renderer=", "aspect=", "memcache", \
"noback", "pages=", "poll=", "font=", "fontsize=", "gamma=",
"duration=", "cursor=", "minutes", "layout=", "script=", "cache=",
"cachefile=", "autooverview=", "zoomtime=", "fade", "nologo",
"shuffle", "page-progress", "overscan", "autoquit", "noclicks",
"clock", "half-screen", "spot-radius=", "invert", "min-box-size=",
"auto-auto", "auto-progress", "darkness=", "no-clicks", "nowheel",
"no-wheel", "fake-fullscreen", "windowed", "verbose", "noblur",
"tracking", "bind=", "controls=", "control-help", "evtest"])
except getopt.GetoptError, message:
opterr(message)
for opt, arg in opts:
if opt in ("-h", "--help"):
HelpExit()
if opt in ("-l", "--listtrans"):
ListTransitions()
if opt in ("-v", "--verbose"):
Verbose = not(Verbose)
if opt == "--fullscreen": Fullscreen, FakeFullscreen = True, False
if opt == "--fake-fullscreen": Fullscreen, FakeFullscreen = True, True
if opt == "--windowed": Fullscreen, FakeFullscreen = False, False
if opt == "-f":
if FakeFullscreen: Fullscreen, FakeFullscreen = True, False
elif Fullscreen: Fullscreen, FakeFullscreen = False, False
else: Fullscreen, FakeFullscreen = True, True
if opt in ("-s", "--scale"):
Scaling = not(Scaling)
if opt in ("-s", "--supersample"):
Supersample = 2
if opt in ("-w", "--wrap"):
Wrap = not(Wrap)
if opt in ("-x", "--fade"):
FadeInOut = not(FadeInOut)
if opt in ("-O", "--autooverview"):
AutoOverview = ParseAutoOverview(arg)
if opt in ("-c", "--cache"):
CacheMode = ParseCacheMode(arg)
if opt == "--nocache":
print >>sys.stderr, "Note: The `--nocache' option is deprecated, use `--cache none' instead."
CacheMode = NoCache
if opt in ("-m", "--memcache"):
print >>sys.stderr, "Note: The `--memcache' option is deprecated, use `--cache memory' instead."
CacheMode = MemCache
if opt == "--cachefile":
CacheFileName = arg
CacheMode = PersistentCache
if opt in ("-M", "--minutes"):
MinutesOnly = not(MinutesOnly)
if opt in ("-b", "--noback"):
BackgroundRendering = not(BackgroundRendering)
if opt in ("-t", "--transition"):
SetTransitions(arg)
if opt in ("-L", "--layout"):
SetLayout(arg)
if opt in ("-o", "--output"):
RenderToDirectory = arg
if opt in ("-I", "--script"):
InfoScriptPath = arg
if opt in ("-F", "--font"):
FontList = [arg]
if opt == "--nologo":
ShowLogo = not(ShowLogo)
if opt in ("--noclicks", "--no-clicks"):
if not DefaultControls:
print >>sys.stderr, "Note: The default control settings have been modified, the `--noclicks' option might not work as expected."
BindEvent("lmb, rmb, ctrl+lmb, ctrl+rmb -= goto-next, goto-prev, goto-next-notrans, goto-prev-notrans")
if opt in ("-W", "--nowheel", "--no-wheel"):
if not DefaultControls:
print >>sys.stderr, "Note: The default control settings have been modified, the `--nowheel' option might not work as expected."
BindEvent("wheelup, wheeldown, ctrl+wheelup, ctrl+wheeldown -= goto-next, goto-prev, goto-next-notrans, goto-prev-notrans, overview-next, overview-prev")
if opt in ("-e", "--bind"):
BindEvent(arg, error_prefix="--bind")
DefaultControls = False
if opt in ("-E", "--controls"):
ParseInputBindingFile(arg)
DefaultControls = False
if opt == "--control-help":
EventHelp()
sys.exit(0)
if opt == "--evtest":
EventTestMode = not(EventTestMode)
if opt == "--clock":
ShowClock = not(ShowClock)
if opt == "--tracking":
TimeTracking = not(TimeTracking)
if opt in ("-X", "--shuffle"):
Shuffle = not(Shuffle)
if opt in ("-Q", "--autoquit"):
QuitAtEnd = not(QuitAtEnd)
if opt in ("-y", "--auto-auto"):
AutoAutoAdvance = not(AutoAutoAdvance)
if opt in ("-k", "--auto-progress"):
AutoAdvanceProgress = not(AutoAdvanceProgress)
if opt in ("-q", "--page-progress"):
PageProgress = not(PageProgress)
if opt in ("-H", "--half-screen"):
HalfScreen = not(HalfScreen)
if HalfScreen:
ZoomDuration = 0
if opt == "--invert":
InvertPages = not(InvertPages)
if opt in ("-P", "--gspath", "--renderer"):
if any(r.supports(arg) for r in AvailableRenderers):
PDFRendererPath = arg
else:
opterr("unrecognized --renderer",
["supported renderer binaries are:"] +
["- %s (%s)" % (", ".join(r.binaries), r.name) for r in AvailableRenderers])
if opt in ("-S", "--fontsize"):
try:
FontSize = int(arg)
assert FontSize > 0
except:
opterr("invalid parameter for --fontsize")
if opt in ("-i", "--initialpage"):
try:
InitialPage = int(arg)
assert InitialPage > 0
except:
opterr("invalid parameter for --initialpage")
if opt in ("-d", "--duration"):
try:
EstimatedDuration = ParseTime(arg)
assert EstimatedDuration > 0
except:
opterr("invalid parameter for --duration")
if opt in ("-a", "--auto"):
try:
AutoAdvance = int(float(arg) * 1000)
assert (AutoAdvance > 0) and (AutoAdvance <= 86400000)
except:
opterr("invalid parameter for --auto")
if opt in ("-T", "--transtime"):
try:
TransitionDuration = int(arg)
assert (TransitionDuration >= 0) and (TransitionDuration < 32768)
except:
opterr("invalid parameter for --transtime")
if opt in ("-D", "--mousedelay"):
try:
MouseHideDelay = int(arg)
assert (MouseHideDelay >= 0) and (MouseHideDelay < 32768)
except:
opterr("invalid parameter for --mousedelay")
if opt in ("-B", "--boxfade"):
try:
BoxFadeDuration = int(arg)
assert (BoxFadeDuration >= 0) and (BoxFadeDuration < 32768)
except:
opterr("invalid parameter for --boxfade")
if opt in ("-Z", "--zoomtime"):
try:
ZoomDuration = int(arg)
assert (ZoomDuration >= 0) and (ZoomDuration < 32768)
except:
opterr("invalid parameter for --zoomtime")
if opt == "--spot-radius":
try:
SpotRadius = int(arg)
except:
opterr("invalid parameter for --spot-radius")
if opt == "--min-box-size":
try:
MinBoxSize = int(arg)
except:
opterr("invalid parameter for --min-box-size")
if opt in ("-r", "--rotate"):
try:
Rotation = int(arg)
except:
opterr("invalid parameter for --rotate")
while Rotation < 0: Rotation += 4
Rotation = Rotation & 3
if opt in ("-u", "--poll"):
try:
PollInterval = int(arg)
assert PollInterval >= 0
except:
opterr("invalid parameter for --poll")
if opt in ("-g", "--geometry"):
try:
parts = arg.replace('+', '|+').replace('-', '|-').split('|')
assert len(parts) in (1, 3)
if len(parts) == 3:
WindowPos = (int(parts[1]), int(parts[2]))
else:
assert len(parts) == 1
ScreenWidth, ScreenHeight = map(int, parts[0].split("x"))
assert (ScreenWidth >= 320) and (ScreenWidth < 32768)
assert (ScreenHeight >= 200) and (ScreenHeight < 32768)
UseAutoScreenSize = False
except:
opterr("invalid parameter for --geometry")
if opt in ("-p", "--pages"):
try:
PageRangeStart, PageRangeEnd = map(int, arg.split("-"))
assert PageRangeStart > 0
assert PageRangeStart <= PageRangeEnd
except:
opterr("invalid parameter for --pages")
InitialPage = PageRangeStart
if opt in ("-A", "--aspect"):
try:
if ':' in arg:
fx, fy = map(float, arg.split(':'))
DAR = fx / fy
else:
DAR = float(arg)
assert DAR > 0.0
except:
opterr("invalid parameter for --aspect")
if opt in ("-G", "--gamma"):
try:
if ':' in arg:
arg, bl = arg.split(':', 1)
BlackLevel = int(bl)
Gamma = float(arg)
assert Gamma > 0.0
assert (BlackLevel >= 0) and (BlackLevel < 255)
except:
opterr("invalid parameter for --gamma")
if opt in ("-C", "--cursor"):
try:
if ':' in arg:
arg = arg.split(':')
assert len(arg) > 1
CursorImage = ':'.join(arg[:-1])
CursorHotspot = map(int, arg[-1].split(','))
else:
CursorImage = arg
assert (BlackLevel >= 0) and (BlackLevel < 255)
except:
opterr("invalid parameter for --cursor")
if opt in ("-z", "--zoom"):
try:
ZoomFactor = int(arg)
assert ZoomFactor > 1
except:
opterr("invalid parameter for --zoom")
if opt in ("-V", "--overscan"):
try:
Overscan = int(arg)
except:
opterr("invalid parameter for --overscan")
if opt == "--darkness":
try:
BoxFadeDarkness = float(arg) * 0.01
except:
opterr("invalid parameter for --darkness")
if opt == "--noblur":
UseBlurShader = not(UseBlurShader)
for arg in args:
AddFile(arg)
if not(FileList) and not(EventTestMode):
opterr("no playable files specified")
# use this function if you intend to use Impressive as a library
def run():
try:
run_main()
except SystemExit, e:
return e.code
if __name__ == "__main__":
try:
ParseOptions(sys.argv[1:])
run_main()
finally:
if not(CleanExit) and (os.name == 'nt') and getattr(sys, "frozen", False):
print
raw_input("<-- press ENTER to quit the program --> ")
|
bcousson/impressive
|
impressive.py
|
Python
|
gpl-2.0
| 244,069
|
[
"VisIt"
] |
74287b42cbab3de537008a78d453abd29b9534b4d30dbe53046676c3f139310d
|
"""
This module starts with an aterm graph and finds suitable executors for the
subgraphs that can be handled by that executor.
"""
import blaze
from blaze.engine import pipeline
from blaze.engine import llvm_execution
from blaze.engine import dispatch
class ExecutionPipeline(object):
def __init__(self):
self.pipeline = [
build_operand_dict,
try_llvm,
execute,
]
def run_pipeline(self, pipeline_context, aterm_graph):
# Map executor IDs to executor objects
executors = {}
pipeline_context['executors'] = executors
for substitutor in self.pipeline:
aterm_graph = substitutor(pipeline_context, aterm_graph)
return pipeline_context['result']
def build_operand_dict(pipeline_context, aterm_graph):
operands = pipeline_context['operands']
operand_dict = dict((id(op), op) for op in operands)
pipeline_context['operand_dict'] = operand_dict
return aterm_graph
def try_llvm(pipeline_context, aterm_graph):
"Substitute executors for the parts of the graph we can handle"
executors = pipeline_context['executors']
aterm_graph = llvm_execution.substitute_llvm_executors(aterm_graph, executors)
return aterm_graph
def execute(pipeline_context, aterm_graph):
"Execute the executor graph"
operands = pipeline_context['operand_dict']
executors = pipeline_context['executors']
visitor = dispatch.ExecutorDispatcher(operands, executors)
result = visitor.visit(aterm_graph)
pipeline_context['result'] = result
return aterm_graph
|
davidcoallier/blaze
|
blaze/engine/execution_pipeline.py
|
Python
|
bsd-2-clause
| 1,595
|
[
"VisIt"
] |
f5025c26bada9c7f6a9598b156ef24c7b178beddb563166f5618da16019b95cc
|
""" Unit tests for pipelines expressed via arlexecute
"""
import logging
import sys
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from data_models.polarisation import PolarisationFrame
from data_models.memory_data_models import BlockVisibility, Visibility
from processing_components.griddata.convolution_functions import apply_bounding_box_convolutionfunction
from processing_components.griddata.kernels import create_awterm_convolutionfunction
from workflows.arlexecute.imaging.imaging_arlexecute import zero_list_arlexecute_workflow, \
predict_list_arlexecute_workflow, invert_list_arlexecute_workflow, subtract_list_arlexecute_workflow, \
weight_list_arlexecute_workflow, residual_list_arlexecute_workflow, sum_invert_results_arlexecute, \
restore_list_arlexecute_workflow
from workflows.shared.imaging.imaging_shared import sum_invert_results, sum_invert_results_local
from wrappers.arlexecute.execution_support.arlexecutebase import ARLExecuteBase
from wrappers.arlexecute.execution_support.dask_init import get_dask_Client
from wrappers.arlexecute.image.operations import export_image_to_fits, smooth_image, qa_image
from wrappers.arlexecute.imaging.base import predict_skycomponent_visibility
from wrappers.arlexecute.simulation.testing_support import ingest_unittest_visibility, \
create_unittest_model, insert_unittest_errors, create_unittest_components
from processing_components.simulation.configurations import create_named_configuration
from wrappers.arlexecute.skycomponent.operations import find_skycomponents, find_nearest_skycomponent, \
insert_skycomponent
from processing_components.visibility.coalesce import convert_blockvisibility_to_visibility
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
log.addHandler(logging.StreamHandler(sys.stderr))
class TestImaging(unittest.TestCase):
def setUp(self):
client = get_dask_Client(memory_limit=4 * 1024 * 1024 * 1024, n_workers=4, dashboard_address=None)
global arlexecute
arlexecute = ARLExecuteBase(use_dask=True)
arlexecute.set_client(client, verbose=True)
from data_models.parameters import arl_path
self.dir = arl_path('test_results')
self.persist = False
def tearDown(self):
global arlexecute
arlexecute.close()
del arlexecute
def actualSetUp(self, add_errors=False, freqwin=3, block=False, dospectral=True, dopol=False, zerow=False,
makegcfcf=False):
self.npixel = 256
self.low = create_named_configuration('LOWBD2', rmax=750.0)
self.freqwin = freqwin
self.vis_list = list()
self.ntimes = 5
self.cellsize = 0.0005
# Choose the interval so that the maximum change in w is smallish
integration_time = numpy.pi * (24 / (12 * 60))
self.times = numpy.linspace(-integration_time * (self.ntimes // 2), integration_time * (self.ntimes // 2),
self.ntimes)
if freqwin > 1:
self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin)
self.channelwidth = numpy.array(freqwin * [self.frequency[1] - self.frequency[0]])
else:
self.frequency = numpy.array([1.0e8])
self.channelwidth = numpy.array([4e7])
if dopol:
self.vis_pol = PolarisationFrame('linear')
self.image_pol = PolarisationFrame('stokesIQUV')
f = numpy.array([100.0, 20.0, -10.0, 1.0])
else:
self.vis_pol = PolarisationFrame('stokesI')
self.image_pol = PolarisationFrame('stokesI')
f = numpy.array([100.0])
if dospectral:
flux = numpy.array([f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency])
else:
flux = numpy.array([f])
self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
self.bvis_list = [arlexecute.execute(ingest_unittest_visibility)(self.low,
[self.frequency[freqwin]],
[self.channelwidth[freqwin]],
self.times,
self.vis_pol,
self.phasecentre, block=True,
zerow=zerow)
for freqwin, _ in enumerate(self.frequency)]
self.vis_list = [arlexecute.execute(convert_blockvisibility_to_visibility)(bvis) for bvis in self.bvis_list]
self.model_list = [arlexecute.execute(create_unittest_model, nout=freqwin)(self.vis_list[freqwin],
self.image_pol,
cellsize=self.cellsize,
npixel=self.npixel)
for freqwin, _ in enumerate(self.frequency)]
self.components_list = [arlexecute.execute(create_unittest_components)(self.model_list[freqwin],
flux[freqwin, :][numpy.newaxis, :],
single=True)
for freqwin, _ in enumerate(self.frequency)]
self.components_list = arlexecute.compute(self.components_list, sync=True)
self.model_list = [arlexecute.execute(insert_skycomponent, nout=1)(self.model_list[freqwin],
self.components_list[freqwin])
for freqwin, _ in enumerate(self.frequency)]
self.model_list = arlexecute.compute(self.model_list, sync=True)
self.vis_list = [arlexecute.execute(predict_skycomponent_visibility)(self.vis_list[freqwin],
self.components_list[freqwin])
for freqwin, _ in enumerate(self.frequency)]
centre = self.freqwin // 2
# Calculate the model convolved with a Gaussian.
self.model = self.model_list[centre]
self.cmodel = smooth_image(self.model)
if self.persist: export_image_to_fits(self.model, '%s/test_imaging_model.fits' % self.dir)
if self.persist: export_image_to_fits(self.cmodel, '%s/test_imaging_cmodel.fits' % self.dir)
if add_errors and block:
self.vis_list = [arlexecute.execute(insert_unittest_errors)(self.vis_list[i])
for i, _ in enumerate(self.frequency)]
self.components = self.components_list[centre]
if makegcfcf:
self.gcfcf = [create_awterm_convolutionfunction(self.model, nw=61, wstep=16.0,
oversampling=8,
support=64,
use_aaf=True)]
self.gcfcf_clipped = [(self.gcfcf[0][0], apply_bounding_box_convolutionfunction(self.gcfcf[0][1],
fractional_level=1e-3))]
self.gcfcf_joint = [create_awterm_convolutionfunction(self.model, nw=11, wstep=16.0,
oversampling=8,
support=64,
use_aaf=True)]
else:
self.gcfcf = None
self.gcfcf_clipped = None
self.gcfcf_joint = None
def test_time_setup(self):
self.actualSetUp()
def _checkcomponents(self, dirty, fluxthreshold=0.6, positionthreshold=1.0):
comps = find_skycomponents(dirty, fwhm=1.0, threshold=10 * fluxthreshold, npixels=5)
assert len(comps) == len(self.components), "Different number of components found: original %d, recovered %d" % \
(len(self.components), len(comps))
cellsize = abs(dirty.wcs.wcs.cdelt[0])
for comp in comps:
# Check for agreement in direction
ocomp, separation = find_nearest_skycomponent(comp.direction, self.components)
assert separation / cellsize < positionthreshold, "Component differs in position %.3f pixels" % \
separation / cellsize
def _predict_base(self, context='2d', extra='', fluxthreshold=1.0, facets=1, vis_slices=1,
gcfcf=None, **kwargs):
centre = self.freqwin // 2
vis_list = zero_list_arlexecute_workflow(self.vis_list)
vis_list = predict_list_arlexecute_workflow(vis_list, self.model_list, context=context,
vis_slices=vis_slices, facets=facets,
gcfcf=gcfcf, **kwargs)
vis_list = subtract_list_arlexecute_workflow(self.vis_list, vis_list)
vis_list = arlexecute.compute(vis_list, sync=True)
dirty = invert_list_arlexecute_workflow(vis_list, self.model_list, context=context, dopsf=False,
gcfcf=gcfcf, normalize=True, vis_slices=vis_slices)
dirty = arlexecute.compute(dirty, sync=True)[centre]
assert numpy.max(numpy.abs(dirty[0].data)), "Residual image is empty"
if self.persist: export_image_to_fits(dirty[0], '%s/test_imaging_predict_%s%s_%s_dirty.fits' %
(self.dir, context, extra, arlexecute.type()))
maxabs = numpy.max(numpy.abs(dirty[0].data))
assert maxabs < fluxthreshold, "Error %.3f greater than fluxthreshold %.3f " % (maxabs, fluxthreshold)
def _invert_base(self, context, extra='', fluxthreshold=1.0, positionthreshold=1.0, check_components=True,
facets=1, vis_slices=1, gcfcf=None, **kwargs):
centre = self.freqwin // 2
dirty = invert_list_arlexecute_workflow(self.vis_list, self.model_list, context=context,
dopsf=False, normalize=True, facets=facets, vis_slices=vis_slices,
gcfcf=gcfcf, **kwargs)
dirty = arlexecute.compute(dirty, sync=True)[centre]
print(dirty)
if self.persist: export_image_to_fits(dirty[0], '%s/test_imaging_invert_%s%s_%s_dirty.fits' %
(self.dir, context, extra, arlexecute.type()))
assert numpy.max(numpy.abs(dirty[0].data)), "Image is empty"
if check_components:
self._checkcomponents(dirty[0], fluxthreshold, positionthreshold)
def test_predict_2d(self):
self.actualSetUp(zerow=True)
self._predict_base(context='2d')
@unittest.skip("Facets need overlap")
def test_predict_facets(self):
self.actualSetUp()
self._predict_base(context='facets', fluxthreshold=17.0, facets=4)
@unittest.skip("Timeslice predict needs better interpolation and facets need overlap")
def test_predict_facets_timeslice(self):
self.actualSetUp()
self._predict_base(context='facets_timeslice', fluxthreshold=19.0, facets=8, vis_slices=self.ntimes)
@unittest.skip("Facets need overlap")
def test_predict_facets_wprojection(self, makegcfcf=True):
self.actualSetUp()
self._predict_base(context='facets', extra='_wprojection', facets=8, fluxthreshold=15.0,
gcfcf=self.gcfcf_joint)
@unittest.skip("Facets need overlap")
def test_predict_facets_wstack(self):
self.actualSetUp()
self._predict_base(context='facets_wstack', fluxthreshold=15.0, facets=8, vis_slices=101)
def test_predict_timeslice(self):
self.actualSetUp()
self._predict_base(context='timeslice', fluxthreshold=3.0, vis_slices=self.ntimes)
def test_predict_wsnapshots(self):
self.actualSetUp(makegcfcf=True)
self._predict_base(context='wsnapshots', fluxthreshold=3.0,
vis_slices=self.ntimes // 2, gcfcf=self.gcfcf_joint)
def test_predict_wprojection(self):
self.actualSetUp(makegcfcf=True)
self._predict_base(context='2d', extra='_wprojection', fluxthreshold=1.0,
gcfcf=self.gcfcf)
def test_predict_wprojection_clip(self):
self.actualSetUp(makegcfcf=True)
self._predict_base(context='2d', extra='_wprojection_clipped', fluxthreshold=1.0,
gcfcf=self.gcfcf_clipped)
def test_predict_wstack(self):
self.actualSetUp()
self._predict_base(context='wstack', fluxthreshold=1.0, vis_slices=101)
def test_predict_wstack_serial(self):
self.actualSetUp()
self._predict_base(context='wstack', fluxthreshold=1.0, vis_slices=101, use_serial_predict=True)
def test_predict_wstack_wprojection(self):
self.actualSetUp(makegcfcf=True)
self._predict_base(context='wstack', extra='_wprojection', fluxthreshold=1.0, vis_slices=11,
gcfcf=self.gcfcf_joint)
def test_predict_wstack_spectral(self):
self.actualSetUp(dospectral=True)
self._predict_base(context='wstack', extra='_spectral', fluxthreshold=4.0, vis_slices=101)
@unittest.skip("Too much for jenkins")
def test_predict_wstack_spectral_pol(self):
self.actualSetUp(dospectral=True, dopol=True)
self._predict_base(context='wstack', extra='_spectral', fluxthreshold=4.0, vis_slices=101)
def test_invert_2d(self):
self.actualSetUp(zerow=True)
self._invert_base(context='2d', positionthreshold=2.0, check_components=False)
def test_invert_2d_uniform(self):
self.actualSetUp(zerow=True, makegcfcf=True)
self.vis_list = weight_list_arlexecute_workflow(self.vis_list, self.model_list, gcfcf=self.gcfcf,
weighting='uniform')
self._invert_base(context='2d', extra='_uniform', positionthreshold=2.0, check_components=False)
def test_invert_2d_uniform_block(self):
self.actualSetUp(zerow=True, makegcfcf=True, block=True)
self.bvis_list = weight_list_arlexecute_workflow(self.bvis_list, self.model_list, gcfcf=self.gcfcf,
weighting='uniform')
self.bvis_list = arlexecute.compute(self.bvis_list, sync=True)
assert isinstance(self.bvis_list[0], BlockVisibility)
def test_invert_2d_uniform_nogcfcf(self):
self.actualSetUp(zerow=True)
self.vis_list = weight_list_arlexecute_workflow(self.vis_list, self.model_list)
self._invert_base(context='2d', extra='_uniform', positionthreshold=2.0, check_components=False)
@unittest.skip("Facets need overlap")
def test_invert_facets(self):
self.actualSetUp()
self._invert_base(context='facets', positionthreshold=2.0, check_components=True, facets=8)
@unittest.skip("Facets need overlap")
def test_invert_facets_timeslice(self):
self.actualSetUp()
self._invert_base(context='facets_timeslice', check_components=True, vis_slices=self.ntimes,
positionthreshold=5.0, flux_threshold=1.0, facets=8)
@unittest.skip("Facets need overlap")
def test_invert_facets_wprojection(self):
self.actualSetUp(makegcfcf=True)
self._invert_base(context='facets', extra='_wprojection', check_components=True,
positionthreshold=2.0, facets=4, gcfcf=self.gcfcf)
@unittest.skip("Facets need overlap")
def test_invert_facets_wstack(self):
self.actualSetUp()
self._invert_base(context='facets_wstack', positionthreshold=1.0, check_components=False, facets=4,
vis_slices=101)
def test_invert_timeslice(self):
self.actualSetUp()
self._invert_base(context='timeslice', positionthreshold=1.0, check_components=True,
vis_slices=self.ntimes)
def test_invert_wsnapshots(self):
self.actualSetUp(makegcfcf=True)
self._invert_base(context='wsnapshots', positionthreshold=1.0,
check_components=True, vis_slices=self.ntimes // 2, gcfcf=self.gcfcf_joint)
def test_invert_wprojection(self):
self.actualSetUp(makegcfcf=True)
self._invert_base(context='2d', extra='_wprojection', positionthreshold=2.0, gcfcf=self.gcfcf)
def test_invert_wprojection_clip(self):
self.actualSetUp(makegcfcf=True)
self._invert_base(context='2d', extra='_wprojection_clipped', positionthreshold=2.0,
gcfcf=self.gcfcf_clipped)
def test_invert_wprojection_wstack(self):
self.actualSetUp(makegcfcf=True)
self._invert_base(context='wstack', extra='_wprojection', positionthreshold=1.0, vis_slices=11,
gcfcf=self.gcfcf_joint)
def test_invert_wstack(self):
self.actualSetUp()
self._invert_base(context='wstack', positionthreshold=1.0, vis_slices=101)
def test_invert_wstack_spectral(self):
self.actualSetUp(dospectral=True)
self._invert_base(context='wstack', extra='_spectral', positionthreshold=2.0,
vis_slices=101)
@unittest.skip("Too much for jenkins")
def test_invert_wstack_spectral_pol(self):
self.actualSetUp(dospectral=True, dopol=True)
self._invert_base(context='wstack', extra='_spectral_pol', positionthreshold=2.0,
vis_slices=101)
def test_zero_list(self):
self.actualSetUp()
centre = self.freqwin // 2
vis_list = zero_list_arlexecute_workflow(self.vis_list)
vis_list = arlexecute.compute(vis_list, sync=True)
assert numpy.max(numpy.abs(vis_list[centre].vis)) < 1e-15, numpy.max(numpy.abs(vis_list[centre].vis))
predicted_vis_list = [arlexecute.execute(predict_skycomponent_visibility)(vis_list[freqwin],
self.components_list[freqwin])
for freqwin, _ in enumerate(self.frequency)]
predicted_vis_list = arlexecute.compute(predicted_vis_list, sync=True)
assert numpy.max(numpy.abs(predicted_vis_list[centre].vis)) > 0.0, \
numpy.max(numpy.abs(predicted_vis_list[centre].vis))
diff_vis_list = subtract_list_arlexecute_workflow(self.vis_list, predicted_vis_list)
diff_vis_list = arlexecute.compute(diff_vis_list, sync=True)
assert numpy.max(numpy.abs(diff_vis_list[centre].vis)) < 1e-15, numpy.max(numpy.abs(diff_vis_list[centre].vis))
def test_residual_list(self):
self.actualSetUp(zerow=True)
centre = self.freqwin // 2
residual_image_list = residual_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d')
residual_image_list = arlexecute.compute(residual_image_list, sync=True)
qa = qa_image(residual_image_list[centre][0])
assert numpy.abs(qa.data['max'] - 0.35139716991480785) < 1.0, str(qa)
assert numpy.abs(qa.data['min'] + 0.7681701460717593) < 1.0, str(qa)
def test_restored_list(self):
self.actualSetUp(zerow=True)
centre = self.freqwin // 2
psf_image_list = invert_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d', dopsf=True)
residual_image_list = residual_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d')
restored_image_list = restore_list_arlexecute_workflow(self.model_list, psf_image_list, residual_image_list,
psfwidth=1.0)
restored_image_list = arlexecute.compute(restored_image_list, sync=True)
if self.persist: export_image_to_fits(restored_image_list[centre], '%s/test_imaging_invert_%s_restored.fits' %
(self.dir, arlexecute.type()))
qa = qa_image(restored_image_list[centre])
assert numpy.abs(qa.data['max'] - 99.43438263927834) < 1e-7, str(qa)
assert numpy.abs(qa.data['min'] + 0.6328915148563365) < 1e-7, str(qa)
def test_restored_list_noresidual(self):
self.actualSetUp(zerow=True)
centre = self.freqwin // 2
psf_image_list = invert_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d', dopsf=True)
restored_image_list = restore_list_arlexecute_workflow(self.model_list, psf_image_list, psfwidth=1.0)
restored_image_list = arlexecute.compute(restored_image_list, sync=True)
if self.persist: export_image_to_fits(restored_image_list[centre],
'%s/test_imaging_invert_%s_restored_noresidual.fits' %
(self.dir, arlexecute.type()))
qa = qa_image(restored_image_list[centre])
assert numpy.abs(qa.data['max'] - 100.0) < 1e-7, str(qa)
assert numpy.abs(qa.data['min']) < 1e-7, str(qa)
def test_restored_list_facet(self):
self.actualSetUp(zerow=True)
centre = self.freqwin // 2
psf_image_list = invert_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d', dopsf=True)
residual_image_list = residual_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d')
restored_4facets_image_list = restore_list_arlexecute_workflow(self.model_list, psf_image_list,
residual_image_list,
restore_facets=4, psfwidth=1.0)
restored_4facets_image_list = arlexecute.compute(restored_4facets_image_list, sync=True)
restored_1facets_image_list = restore_list_arlexecute_workflow(self.model_list, psf_image_list,
residual_image_list,
restore_facets=1, psfwidth=1.0)
restored_1facets_image_list = arlexecute.compute(restored_1facets_image_list, sync=True)
if self.persist: export_image_to_fits(restored_4facets_image_list[0],
'%s/test_imaging_invert_%s_restored_4facets.fits' %
(self.dir, arlexecute.type()))
qa = qa_image(restored_4facets_image_list[centre])
assert numpy.abs(qa.data['max'] - 99.43438263927833) < 1e-7, str(qa)
assert numpy.abs(qa.data['min'] + 0.6328915148563354) < 1e-7, str(qa)
restored_4facets_image_list[centre].data -= restored_1facets_image_list[centre].data
if self.persist: export_image_to_fits(restored_4facets_image_list[centre],
'%s/test_imaging_invert_%s_restored_4facets_error.fits' %
(self.dir, arlexecute.type()))
qa = qa_image(restored_4facets_image_list[centre])
assert numpy.abs(qa.data['max']) < 1e-10, str(qa)
def test_sum_invert_list(self):
self.actualSetUp(zerow=True)
residual_image_list = residual_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d')
residual_image_list = arlexecute.compute(residual_image_list, sync=True)
route2 = sum_invert_results(residual_image_list)
route1 = sum_invert_results_arlexecute(residual_image_list)
route1 = arlexecute.compute(route1, sync=True)
for r in route1, route2:
assert len(r) == 2
qa = qa_image(r[0])
assert numpy.abs(qa.data['max'] - 0.35139716991480785) < 1.0, str(qa)
assert numpy.abs(qa.data['min'] + 0.7681701460717593) < 1.0, str(qa)
assert numpy.abs(r[1]-415950.0) < 1e-7, str(qa)
if __name__ == '__main__':
unittest.main()
|
SKA-ScienceDataProcessor/algorithm-reference-library
|
tests/workflows/test_imaging_arlexecute.py
|
Python
|
apache-2.0
| 25,025
|
[
"Gaussian"
] |
2abb25253bf4f8dabd7c672a6ab0a1a79ec1f62cbd88d95926082f2ccf78cbaf
|
# Author: Robert McGibbon <rmcgibbo@gmail.com>
# Contributors: Matthew Harrigan <matthew.harrigan@outlook.com>
# Copyright (c) 2015, Stanford University
# All rights reserved.
from __future__ import absolute_import, print_function, division
import sys
import os
import re
import glob
from os.path import join, exists, expanduser
import socket
import getpass
from datetime import datetime
from collections import Sequence
import tables
import mdtraj as md
from mdtraj.core.trajectory import _parse_topology
import numpy as np
from . import version
_PYTABLES_DISABLE_COMPRESSION = tables.Filters(complevel=0)
__all__ = ['dataset']
def dataset(path, mode='r', fmt=None, verbose=False, **kwargs):
"""Open a dataset object
MSMBuilder supports several dataset 'formats' for storing
lists of sequences on disk.
This function can also be used as a context manager.
Parameters
----------
path : str
The path to the dataset on the filesystem
mode : {'r', 'w', 'a'}
Open a dataset for reading, writing, or appending. Note that
some formats only support a subset of these modes.
fmt : {'dir-npy', 'hdf5', 'mdtraj'}
The format of the data on disk
``dir-npy``
A directory of binary numpy files, one file per sequence
``hdf5``
A single hdf5 file with each sequence as an array node
``mdtraj``
A read-only set of trajectory files that can be loaded
with mdtraj
verbose : bool
Whether to print information about the dataset
"""
if mode == 'r' and fmt is None:
fmt = _guess_format(path)
elif mode in 'wa' and fmt is None:
raise ValueError('mode="%s", but no fmt. fmt=%s' % (mode, fmt))
if fmt == 'dir-npy':
return NumpyDirDataset(path, mode=mode, verbose=verbose)
elif fmt == 'mdtraj':
return MDTrajDataset(path, mode=mode, verbose=verbose, **kwargs)
elif fmt == 'hdf5':
return HDF5Dataset(path, mode=mode, verbose=verbose)
elif fmt.endswith("-union"):
raise ValueError("union datasets have been removed. "
"Please use msmbuilder.featurizer.FeatureUnion")
else:
raise NotImplementedError("Unknown format fmt='%s'" % fmt)
def _guess_format(path):
"""Guess the format of a dataset based on its filename / filenames.
"""
if os.path.isdir(path):
return 'dir-npy'
if path.endswith('.h5') or path.endswith('.hdf5'):
# TODO: Check for mdtraj .h5 file
return 'hdf5'
# TODO: What about a list of trajectories, e.g. from command line nargs='+'
return 'mdtraj'
class _BaseDataset(Sequence):
_PROVENANCE_TEMPLATE = '''MSMBuilder Dataset:
MSMBuilder:\t{version}
Command:\t{cmdline}
Path:\t\t{path}
Username:\t{user}
Hostname:\t{hostname}
Date:\t\t{date}
Comments:\t\t{comments}
'''
_PREV_TEMPLATE = '''
== Derived from ==
{previous}
'''
def __init__(self, path, mode='r', verbose=False):
self.path = path
self.mode = mode
self.verbose = verbose
if mode not in ('r', 'w', 'a'):
raise ValueError('mode must be one of "r", "w", "a"')
if mode in 'wa':
if mode == 'w' and exists(path):
raise ValueError('File exists: %s' % path)
try:
os.makedirs(path)
except OSError:
pass
self._write_provenance()
def create_derived(self, out_path, comments='', fmt=None):
if fmt is None:
out_dataset = self.__class__(out_path, mode='w',
verbose=self.verbose)
else:
out_dataset = dataset(out_path, mode='w', verbose=self.verbose,
fmt=fmt)
out_dataset._write_provenance(previous=self.provenance,
comments=comments)
return out_dataset
def apply(self, fn):
for key in self.keys():
yield fn(self.get(key))
def _build_provenance(self, previous=None, comments=''):
val = self._PROVENANCE_TEMPLATE.format(
version=version.full_version,
cmdline=' '.join(sys.argv),
user=getpass.getuser(),
hostname=socket.gethostname(),
path=self.path,
comments=comments,
date=datetime.now().strftime("%B %d, %Y %I:%M %p"))
if previous:
val += self._PREV_TEMPLATE.format(previous=previous)
return val
def fit_with(self, estimator):
"""Call the fit method of the estimator on this dataset
Parameters
----------
estimator : BaseEstimator
estimator.fit will be called on this dataset.
Returns
-------
estimator
The fit estimator.
"""
estimator.fit(self)
return estimator
def transform_with(self, estimator, out_ds, fmt=None):
"""Call the partial_transform method of the estimator on this dataset
Parameters
----------
estimator : object with ``partial_fit`` method
This object will be used to transform this dataset into a new
dataset. The estimator should be fitted prior to calling
this method.
out_ds : str or Dataset
This dataset will be transformed and saved into out_ds. If
out_ds is a path, a new dataset will be created at that path.
fmt : str
The type of dataset to create if out_ds is a string.
Returns
-------
out_ds : Dataset
The tranformed dataset.
"""
if isinstance(out_ds, str):
out_ds = self.create_derived(out_ds, fmt=fmt)
elif isinstance(out_ds, _BaseDataset):
err = "Dataset must be opened in write mode."
assert out_ds.mode in ('w', 'a'), err
else:
err = "Please specify a dataset path or an existing dataset."
raise ValueError(err)
for key in self.keys():
out_ds[key] = estimator.partial_transform(self[key])
return out_ds
def fit_transform_with(self, estimator, out_ds, fmt=None):
"""Create a new dataset with the given estimator.
The estimator will be fit by this dataset, and then each trajectory
will be transformed by the estimator.
Parameters
----------
estimator : BaseEstimator
This object will be fit and used to transform this dataset
into a new dataset.
out_ds : str or Dataset
This dataset will be transformed and saved into out_ds. If
out_ds is a path, a new dataset will be created at that path.
fmt : str
The type of dataset to create if out_ds is a string.
Returns
-------
out_ds : Dataset
The transformed dataset.
Examples
--------
diheds = dataset("diheds")
tica = diheds.fit_transform_with(tICA(), 'tica')
kmeans = tica.fit_transform_with(KMeans(), 'kmeans')
msm = kmeans.fit_with(MarkovStateModel())
"""
self.fit_with(estimator)
return self.transform_with(estimator, out_ds, fmt=fmt)
@property
def provenance(self):
raise NotImplementedError('implemented in subclass')
def _write_provenance(self, previous=None, comments=''):
raise NotImplementedError('implemented in subclass')
def __len__(self):
return sum(1 for xx in self.keys())
def __getitem__(self, i):
if isinstance(i, slice):
err = "Please index datasets with explicit indices or ds[:]."
assert i.start is None and i.step is None and i.stop is None, err
return [self.get(i) for i in self.keys()]
return self.get(i)
def __setitem__(self, i, x):
return self.set(i, x)
def __iter__(self):
for key in self.keys():
yield self.get(key)
def keys(self):
# keys()[i], get(i) and set(i, x) should all follow
# the same ordering convention for the indices / items.
raise NotImplementedError('implemeneted in subclass')
def items(self):
for key in self.keys():
yield (key, self.get(key))
def get(self, i):
raise NotImplementedError('implemeneted in subclass')
def set(self, i, x):
raise NotImplementedError('implemeneted in subclass')
def close(self):
pass
def flush(self):
pass
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
class NumpyDirDataset(_BaseDataset):
"""Mixtape dataset container
Parameters
----------
path : str
mode : {'r', 'w', 'a'}
Read, write, or append. If mode is set to 'a' or 'w',
duplicate keys will be overwritten.
Examples
--------
for X in Dataset('path/to/dataset'):
print X
"""
_ITEM_FORMAT = '%08d.npy'
_ITEM_RE = re.compile('(\d{8}).npy')
_PROVENANCE_FILE = 'PROVENANCE.txt'
def get(self, i, mmap=False):
mmap_mode = 'r' if mmap else None
filename = join(self.path, self._ITEM_FORMAT % i)
if self.verbose:
print('[NumpydirDataset] loading %s' % filename)
try:
return np.load(filename, mmap_mode)
except IOError as e:
raise IndexError(e)
def set(self, i, x):
if self.mode not in 'wa':
raise IOError('Dataset not opened for writing')
filename = join(self.path, self._ITEM_FORMAT % i)
if self.verbose:
print('[NumpydirDataset] saving %s' % filename)
return np.save(filename, x)
def keys(self):
for fn in sorted(os.listdir(os.path.expanduser(self.path)),
key=_keynat):
match = self._ITEM_RE.match(fn)
if match:
yield int(match.group(1))
@property
def provenance(self):
try:
with open(join(self.path, self._PROVENANCE_FILE), 'r') as f:
return f.read()
except IOError:
return 'No available provenance'
def _write_provenance(self, previous=None, comments=''):
with open(join(self.path, self._PROVENANCE_FILE), 'w') as f:
p = self._build_provenance(previous=previous, comments=comments)
f.write(p)
class HDF5Dataset(_BaseDataset):
_ITEM_FORMAT = 'arr_%d'
_ITEM_RE = re.compile('arr_(\d+)')
def __init__(self, path, mode='r', verbose=False):
if mode not in ('r', 'w'):
raise ValueError('mode must be one of "r", "w"')
if mode == 'w':
if exists(path):
raise ValueError('File exists: %s' % path)
self._handle = tables.open_file(path, mode=mode,
filters=_PYTABLES_DISABLE_COMPRESSION)
self.path = path
self.mode = mode
self.verbose = verbose
if mode == 'w':
self._write_provenance()
def __getstate__(self):
# pickle does not like to pickle the pytables handle, so...
# self.flush()
return {'path': self.path, 'mode': self.mode, 'verbose': self.verbose}
def __setstate__(self, state):
self.path = state['path']
self.mode = state['mode']
self.verbose = state['verbose']
self._handle = tables.open_file(self.path, mode=self.mode,
filters=_PYTABLES_DISABLE_COMPRESSION)
def get(self, i, mmap=False):
return self._handle.get_node('/', self._ITEM_FORMAT % i)[:]
def keys(self):
nodes = self._handle.list_nodes('/')
for node in sorted(nodes, key=lambda x: _keynat(x.name)):
match = self._ITEM_RE.match(node.name)
if match:
yield int(match.group(1))
def set(self, i, x):
if 'w' not in self.mode:
raise IOError('Dataset not opened for writing')
try:
self._handle.create_carray('/', self._ITEM_FORMAT % i, obj=x)
except tables.exceptions.NodeError:
self._handle.remove_node('/', self._ITEM_FORMAT % i)
self.set(i, x)
@property
def provenance(self):
try:
return self._handle.root._v_attrs['provenance']
except KeyError:
return 'No available provenance'
def _write_provenance(self, previous=None, comments=''):
p = self._build_provenance(previous=previous, comments=comments)
self._handle.root._v_attrs['provenance'] = p
def close(self):
if hasattr(self, '_handle'):
self._handle.close()
def flush(self):
self._handle.flush()
def __del__(self):
self.close()
class MDTrajDataset(_BaseDataset):
_PROVENANCE_TEMPLATE = '''MDTraj dataset:
path:\t\t{path}
topology:\t{topology}
stride:\t{stride}
atom_indices\t{atom_indices}
'''
def __init__(self, path, mode='r', topology=None, stride=1,
atom_indices=None, verbose=False):
if mode != 'r':
raise ValueError('mode must be "r"')
self.path = path
self.topology = topology
self.stride = stride
self.atom_indices = atom_indices
self.verbose = verbose
if isinstance(path, list):
self.glob_matches = [expanduser(fn) for fn in path]
else:
self.glob_matches = sorted(glob.glob(expanduser(path)), key=_keynat)
if topology is None:
self._topology = None
else:
self._topology = _parse_topology(os.path.expanduser(topology))
def get(self, i):
if self.verbose:
print('[MDTraj dataset] loading %s' % self.filename(i))
if self._topology is None:
t = md.load(self.filename(i), stride=self.stride,
atom_indices=self.atom_indices)
else:
t = md.load(self.filename(i), stride=self.stride,
atom_indices=self.atom_indices, top=self._topology)
return t
def filename(self, i):
return self.glob_matches[i]
def iterload(self, i, chunk):
if self.verbose:
print('[MDTraj dataset] iterloading %s' % self.filename(i))
if self._topology is None:
return md.iterload(
self.filename(i), chunk=chunk, stride=self.stride,
atom_indices=self.atom_indices)
else:
return md.iterload(
self.filename(i), chunk=chunk, stride=self.stride,
atom_indices=self.atom_indices, top=self._topology)
def keys(self):
return iter(range(len(self.glob_matches)))
@property
def provenance(self):
return self._PROVENANCE_TEMPLATE.format(
path=self.path, topology=self.topology,
atom_indices=self.atom_indices, stride=self.stride)
def _dim_match(arr):
if arr.ndim == 1:
return arr[:, np.newaxis]
return arr
def _keynat(string):
"""A natural sort helper function for sort() and sorted()
without using regular expression.
"""
r = []
for c in string:
if c.isdigit():
if r and isinstance(r[-1], int):
r[-1] = r[-1] * 10 + int(c)
else:
r.append(int(c))
else:
r.append(9 + ord(c))
return r
|
stephenliu1989/msmbuilder
|
msmbuilder/dataset.py
|
Python
|
lgpl-2.1
| 15,521
|
[
"MDTraj"
] |
c5492ca8ab66579c294333342e5e1d6ff42fc2b79b632aa916a7618707e0df7b
|
#!/usr/bin/env python
"""
TextureCoordinates
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.3 $
$Date: 2001-05-31 17:48:55 $
Pearu Peterson
"""
import DataSetAttr
class TextureCoordinates(DataSetAttr.DataSetAttr):
"""Holds VTK Texture Coordinates.
Usage:
TextureCoordinates(<sequence of (1,2, or 3)-sequences> ,name = <string>)
Attributes:
coords
name
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,scalars,name=None):
self.name = self._get_name(name)
self.coords = self.get_n_seq_seq(scalars,self.default_value)
if not 1<=len(self.coords[0])<=3:
raise ValueError,'texture coordinates dimension must be 1, 2, or 3 but got %s'%(len(self.coords[0]))
def to_string(self,format='ascii'):
t = self.get_datatype(self.coords)
ret = ['TEXTURE_COORDINATES %s %s %s'%(self.name,len(self.coords[0]),t),
self.seq_to_string(self.coords,format,t)]
return '\n'.join(ret)
def get_size(self):
return len(self.coords)
def texture_coordinates_fromfile(f,n,sl):
assert len(sl)==3
dataname = sl[0].strip()
dim = eval(sl[1])
datatype = sl[2].strip().lower()
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],`datatype`
arr = []
while len(arr)<dim*n:
arr += map(eval,common._getline(f).split(' '))
assert len(arr)==dim*n
arr2 = []
for i in range(0,len(arr),dim):
arr2.append(arr[i:i+dim])
return TextureCoordinates(arr2,dataname)
if __name__ == "__main__":
print TextureCoordinates([[3,3],[4,3],240,3,2]).to_string()
|
chunshen1987/iSS
|
utilities/for_paraview/lib/TextureCoordinates.py
|
Python
|
mit
| 1,968
|
[
"VTK"
] |
2508ba1b4bd66f6160df48534a962ba7955ea494696ef56b1db90d01754bcae3
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements an interface to enumlib, Gus Hart"s excellent Fortran
code for enumerating derivative structures.
This module depends on a compiled enumlib with the executables multienum.x and
makestr.x available in the path. Please download the library at
http://enum.sourceforge.net/ and follow the instructions in the README to
compile these two executables accordingly.
If you use this module, please cite the following:
Gus L. W. Hart and Rodney W. Forcade, "Algorithm for generating derivative
structures," Phys. Rev. B 77 224115 (26 June 2008)
Gus L. W. Hart and Rodney W. Forcade, "Generating derivative structures from
multilattices: Application to hcp alloys," Phys. Rev. B 80 014120 (July 2009)
Gus L. W. Hart, Lance J. Nelson, and Rodney W. Forcade, "Generating
derivative structures at a fixed concentration," Comp. Mat. Sci. 59
101-107 (March 2012)
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 16, 2012"
import re
import math
import subprocess
import itertools
import logging
import numpy as np
from monty.fractions import lcm
from monty.fractions import fractions
from six.moves import reduce
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.periodic_table import DummySpecie
from monty.os.path import which
from monty.dev import requires
from monty.tempfile import ScratchDir
logger = logging.getLogger(__name__)
# Favor the use of the newer "enum.x" by Gus Hart instead of the older
# "multienum.x"
enum_cmd = which('multienum.x')
@requires(enum_cmd and which('makestr.x'),
"EnumlibAdaptor requires the executables 'enum.x' or 'multienum.x' "
"and 'makestr.x' to be in the path. Please download the library at"
"http://enum.sourceforge.net/ and follow the instructions in "
"the README to compile these two executables accordingly.")
class EnumlibAdaptor(object):
"""
An adaptor for enumlib.
.. attribute:: structures
List of all enumerated structures.
"""
amount_tol = 1e-5
def __init__(self, structure, min_cell_size=1, max_cell_size=1,
symm_prec=0.1, enum_precision_parameter=0.001,
refine_structure=False, check_ordered_symmetry=True):
"""
Initializes the adapter with a structure and some parameters.
Args:
structure: An input structure.
min_cell_size (int): The minimum cell size wanted. Defaults to 1.
max_cell_size (int): The maximum cell size wanted. Defaults to 1.
symm_prec (float): Symmetry precision. Defaults to 0.1.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
refine_structure (bool): If you are starting from a structure that
has been relaxed via some electronic structure code,
it is usually much better to start with symmetry determination
and then obtain a refined structure. The refined structure have
cell parameters and atomic positions shifted to the expected
symmetry positions, which makes it much less sensitive precision
issues in enumlib. If you are already starting from an
experimental cif, refinement should have already been done and
it is not necessary. Defaults to False.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
"""
if refine_structure:
finder = SpacegroupAnalyzer(structure, symm_prec)
self.structure = finder.get_refined_structure()
else:
self.structure = structure
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.symm_prec = symm_prec
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
def run(self):
"""
Run the enumeration.
"""
#Create a temporary directory for working.
with ScratchDir(".") as d:
logger.debug("Temp dir : {}".format(d))
try:
#Generate input files
self._gen_input_file()
#Perform the actual enumeration
num_structs = self._run_multienum()
#Read in the enumeration output as structures.
if num_structs > 0:
self.structures = self._get_structures(num_structs)
else:
raise ValueError("Unable to enumerate structure.")
except Exception:
import sys
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=10, file=sys.stdout)
def _gen_input_file(self):
"""
Generate the necessary struct_enum.in file for enumlib. See enumlib
documentation for details.
"""
coord_format = "{:.6f} {:.6f} {:.6f}"
# Using symmetry finder, get the symmetrically distinct sites.
fitter = SpacegroupAnalyzer(self.structure, self.symm_prec)
symmetrized_structure = fitter.get_symmetrized_structure()
logger.debug("Spacegroup {} ({}) with {} distinct sites".format(
fitter.get_spacegroup_symbol(),
fitter.get_spacegroup_number(),
len(symmetrized_structure.equivalent_sites))
)
"""
Enumlib doesn"t work when the number of species get too large. To
simplify matters, we generate the input file only with disordered sites
and exclude the ordered sites from the enumeration. The fact that
different disordered sites with the exact same species may belong to
different equivalent sites is dealt with by having determined the
spacegroup earlier and labelling the species differently.
"""
# index_species and index_amounts store mappings between the indices
# used in the enum input file, and the actual species and amounts.
index_species = []
index_amounts = []
#Stores the ordered sites, which are not enumerated.
ordered_sites = []
disordered_sites = []
coord_str = []
for sites in symmetrized_structure.equivalent_sites:
if sites[0].is_ordered:
ordered_sites.append(sites)
else:
sp_label = []
species = {k: v for k, v in sites[0].species_and_occu.items()}
if sum(species.values()) < 1 - EnumlibAdaptor.amount_tol:
#Let us first make add a dummy element for every single
#site whose total occupancies don't sum to 1.
species[DummySpecie("X")] = 1 - sum(species.values())
for sp in species.keys():
if sp not in index_species:
index_species.append(sp)
sp_label.append(len(index_species) - 1)
index_amounts.append(species[sp] * len(sites))
else:
ind = index_species.index(sp)
sp_label.append(ind)
index_amounts[ind] += species[sp] * len(sites)
sp_label = "/".join(["{}".format(i) for i in sorted(sp_label)])
for site in sites:
coord_str.append("{} {}".format(
coord_format.format(*site.coords),
sp_label))
disordered_sites.append(sites)
def get_sg_info(ss):
finder = SpacegroupAnalyzer(Structure.from_sites(ss), self.symm_prec)
sgnum = finder.get_spacegroup_number()
return sgnum
curr_sites = list(itertools.chain.from_iterable(disordered_sites))
min_sgnum = get_sg_info(curr_sites)
logger.debug("Disorderd sites has sgnum %d" % (
min_sgnum))
#It could be that some of the ordered sites has a lower symmetry than
#the disordered sites. So we consider the lowest symmetry sites as
#disordered in our enumeration.
self.ordered_sites = []
to_add = []
if self.check_ordered_symmetry:
for sites in ordered_sites:
temp_sites = list(curr_sites) + sites
sgnum = get_sg_info(temp_sites)
if sgnum < min_sgnum:
logger.debug("Adding {} to sites to be ordered. "
"New sgnum {}"
.format(sites, sgnum))
to_add = sites
min_sgnum = sgnum
for sites in ordered_sites:
if sites == to_add:
index_species.append(sites[0].specie)
index_amounts.append(len(sites))
sp_label = len(index_species) - 1
logger.debug("Lowest symmetry {} sites are included in enum."
.format(sites[0].specie))
for site in sites:
coord_str.append("{} {}".format(
coord_format.format(*site.coords),
sp_label))
disordered_sites.append(sites)
else:
self.ordered_sites.extend(sites)
self.index_species = index_species
lattice = self.structure.lattice
output = [self.structure.formula, "bulk"]
for vec in lattice.matrix:
output.append(coord_format.format(*vec))
output.append("{}".format(len(index_species)))
output.append("{}".format(len(coord_str)))
output.extend(coord_str)
output.append("{} {}".format(self.min_cell_size, self.max_cell_size))
output.append(str(self.enum_precision_parameter))
output.append("partial")
ndisordered = sum([len(s) for s in disordered_sites])
base = int(ndisordered*reduce(lcm,
[f.limit_denominator(
ndisordered *
self.max_cell_size).denominator
for f in map(fractions.Fraction,
index_amounts)]))
#base = ndisordered #10 ** int(math.ceil(math.log10(ndisordered)))
#To get a reasonable number of structures, we fix concentrations to the
#range expected in the original structure.
total_amounts = sum(index_amounts)
for amt in index_amounts:
conc = amt / total_amounts
if abs(conc * base - round(conc * base)) < 1e-5:
output.append("{} {} {}".format(int(round(conc * base)),
int(round(conc * base)),
base))
else:
min_conc = int(math.floor(conc * base))
output.append("{} {} {}".format(min_conc - 1, min_conc + 1,
base))
output.append("")
logger.debug("Generated input file:\n{}".format("\n".join(output)))
with open("struct_enum.in", "w") as f:
f.write("\n".join(output))
def _run_multienum(self):
p = subprocess.Popen([enum_cmd],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
output = p.communicate()[0].decode("utf-8")
count = 0
start_count = False
for line in output.strip().split("\n"):
if line.strip().endswith("RunTot"):
start_count = True
elif start_count and re.match("\d+\s+.*", line.strip()):
count = int(line.split()[-1])
logger.debug("Enumeration resulted in {} structures".format(count))
return count
def _get_structures(self, num_structs):
structs = []
rs = subprocess.Popen(["makestr.x",
"struct_enum.out", str(0),
str(num_structs - 1)],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if len(self.ordered_sites) > 0:
original_latt = self.ordered_sites[0].lattice
# Need to strip sites of site_properties, which would otherwise
# result in an index error. Hence Structure is reconstructed in
# the next step.
ordered_structure = Structure(
original_latt,
[site.species_and_occu for site in self.ordered_sites],
[site.frac_coords for site in self.ordered_sites])
inv_org_latt = np.linalg.inv(original_latt.matrix)
for n in range(1, num_structs + 1):
with open("vasp.{:06d}".format(n)) as f:
data = f.read()
data = re.sub("scale factor", "1", data)
data = re.sub("(\d+)-(\d+)", r"\1 -\2", data)
poscar = Poscar.from_string(data, self.index_species)
sub_structure = poscar.structure
#Enumeration may have resulted in a super lattice. We need to
#find the mapping from the new lattice to the old lattice, and
#perform supercell construction if necessary.
new_latt = sub_structure.lattice
sites = []
if len(self.ordered_sites) > 0:
transformation = np.dot(new_latt.matrix, inv_org_latt)
transformation = [[int(round(cell)) for cell in row]
for row in transformation]
logger.debug("Supercell matrix: {}".format(transformation))
s = Structure.from_sites(ordered_structure)
s.make_supercell(transformation)
sites.extend([site.to_unit_cell for site in s])
super_latt = sites[-1].lattice
else:
super_latt = new_latt
for site in sub_structure:
if site.specie.symbol != "X": # We exclude vacancies.
sites.append(PeriodicSite(site.species_and_occu,
site.frac_coords,
super_latt).to_unit_cell)
structs.append(Structure.from_sites(sorted(sites)))
logger.debug("Read in a total of {} structures.".format(num_structs))
return structs
|
rousseab/pymatgen
|
pymatgen/command_line/enumlib_caller.py
|
Python
|
mit
| 15,703
|
[
"VASP",
"pymatgen"
] |
3856bd60bedee48eee613f75ba3a8aa373501fc96146a37f9dffda6df38d4496
|
#!/usr/bin/env python2.7
# This version is created at Mon Mar 17 12:54:44 CET 2014
# Author: Asli I. Ozen (asli@cbs.dtu.dk)
# License: GPL 3.0 (http://www.gnu.org/licenses/gpl-3.0.txt)
import sys, gzip
import re, string
import argparse
import os
from Bio.Blast import NCBIStandalone
from operator import itemgetter, attrgetter
from datetime import datetime as dt
import time
from os.path import basename
sys.path.append('/home/projects5/pr_53035/people/asli/bin/lib/python2.7/site-packages')
prog= sys.argv[0]
example = "----------------------------------------------------------------------------- \
example usage: \n" + prog + " -it test.blasttab -l test.lengths -v \n" + \
prog + " -id test.blastdecide -s 30 -q 30 -e 0.0001 -sn nosave \
-----------------------------------------------------------------------------"
helpstr = '''
description: This script parses blast/ublast results and filters them based on the given cut-offs.
Blast results should be in -m 0 format or tab separated -m 6 format. With ublast, the results should be
obtained with -blast6out option.
'''
epi="Author: Asli I. Ozen (asli@cbs.dtu.dk)"
class BlastDecision:
def __init__(self):
self.start = time.time()
d_ = dt.today()
self.timestarted = d_.strftime("%d-%m-%Y %H:%M:%S")
self.parseArgs()
def parseArgs(self):
self.parser = argparse.ArgumentParser(description=example + helpstr, epilog = epi, conflict_handler='resolve')
self.parser.add_argument("-id", metavar="bldecidein", help="pre-made blastdecide result FILE as an input back again",nargs=1)
self.parser.add_argument("-it", metavar="bltabin", help="blast tabular result FILE as an input")
self.parser.add_argument("-ib", metavar="blin", help="blast/psi-blast -m 0 result FILE as an input", nargs=1)
self.parser.add_argument("-o", metavar="output", help="Output FILE name (default=inputfile.blastdecide)", nargs=1)
self.parser.add_argument("-bf", metavar="[blast|psiblast]",type=str,default="blast", help="blast -m 0 output file format (default=blast)", nargs=1)
self.parser.add_argument("-l", metavar="lengths", help="Query lengths FILE (required if tabular blast result input(-it) is given)")
self.parser.add_argument("-n", metavar="[savenew|nosave]", type=str, default="nosave", help="save new blastdecide or not (default=savenew) ",nargs=1)
self.parser.add_argument("-s", metavar="INT", default= "50", help="minimum similarity cutoff")
self.parser.add_argument("-q", metavar="INT",default= "50", help="minimum query coverage cutoff")
#self.parser.add_argument("-tc", metavar="targetcoverage", help="minimum target coverage cutoff")
self.parser.add_argument("-e", metavar="FLOAT", default= "1e-10" , help="evalue cutoff i.e. 1e-5 (default=1e-10), decimals allowed i.e. 0.0001")
self.parser.add_argument("-v","--verbose", action="store_true" , help="increase output verbosity")
def read_lengths(self):
fl= open(self.lenfile,"rU")
self.lendict={}
for line in fl:
#print line
query = line.split("\t")[0]
query_name = query.split(" ")[0].strip(">")
length= int(line.split("\t")[1].strip("\n"))
self.lendict[query_name]=length
fl.close()
def ReadBlast(self, file, OUT, iszipped = 0, is_psiblast=None):
output= open(OUT, "w")
self.selfhits=[]
if is_psiblast:
print >> sys.stderr, 'Parsing PSI-Blast'
self.parser = NCBIStandalone.PSIBlastParser()
else:
self.parser = NCBIStandalone.BlastParser()
if file[-3:] == '.gz' or iszipped:
handle = gzip.open(file)
else:
handle = open(file)
self.iter = NCBIStandalone.Iterator(handle = handle, parser = self.parser)
self.blastDict = {}
while 1:
try:
rec = self.iter.next()
if not rec: break
except:
sys.stderr.write('Can\'t iterate on blast records anymore. Abort.\n')
import traceback
traceback.print_exc()
return 'Error parsing %s' % file
self.query = rec.query.split(" ")[0] ## blast_record.query.split(" ")[0]
self.length = rec.query_letters
if self.length < self.min_size:
self.printer("Does not meet the minimum length " + str(self.min_size))
break
if is_psiblast: rec = rec.rounds[-1]
# each alignment is one potential hit
for n, alignment in enumerate(rec.alignments):
# to make it easy, skip all alignments with multiple HSPS
hsp = alignment.hsps[0]
alnlength=hsp.align_length
hit = alignment.title
#targetlength = alignment.length
#m = re.search("sp\|([A-Z0-9]+)\|([A-Z0-9_]+) ?(.+)?", alignment.title)
m = re.search("sp\|(.+?)\|(.+?) (.+)?", alignment.title)
if m: # pyphynr blast result
hit_sp_ac = m.group(1)
hit_sp_id = m.group(2)
hit_sp_note = m.group(3)
elif alignment.title[0] == '>': # result from qadditional blast databases
hit_sp_ac = None
hit_sp_id = alignment.title[1:].split()[0]
hit_sp_note = None
else:
hit_sp_ac = None
hit_sp_id = None
hit_sp_note = None
# fix annoying dots in ids
if hit_sp_ac: hit_sp_ac = hit_sp_ac.replace('.','_')
if hit_sp_id: hit_sp_id = hit_sp_id.replace('.','_')
#if not hit_sp_id: print 'XXXXXXX', alignment.title
self.printer(hit_sp_id)
similarity = hsp.positives[0]/float(hsp.positives[1])*100
if float(hsp.expect) <= float(self.HSP_max_evalue):
if float(similarity) >= int(self.HSP_minimal_positives):
coverage = hsp.positives[1]/float(self.length)*100
if float(coverage) >= int(self.HSP_minimal_coverage):
#targetcoverage = hsp.positives[1]/float(targetlength)*100
#if float(targetcoverage) > int(self.HSP_minimal_targetcov):
#self.compatibles.append((hit_sp_ac, hit))
#hitlist = [hit_sp_id, n+1 , hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.positives[1]/float(targetlength)*100, hsp.score, hsp.expect]
hitlist = [hit_sp_id, hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.score, hsp.expect]
if self.cB: self.createblastDict(query,hitlist)
output.write("%s\t" % (self.query)),
for element in hitlist:
output.write("%s\t" % element),
output.write("\n")
output.close()
handle.close()
return None
def ReadBlastresultsTab(self, filename, OUT):
if filename[-3:] == '.gz':
fh = gzip.open(filename)
else:
fh= open(filename,"rU")
#hitsdict={}
#hitlist = [hit_sp_id, n+1 , hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.score, hsp.expect]
self.blastDict={}
self.selfhits=[]
self.read_lengths()
output= open(OUT, "w")
self.printer(basename(OUT) + " file initiated")
#lines=fh.readlines()
for line in fh:
line = line.strip("\n")
if len(line.split("\t")) > 2:
query = line.split("\t")[0]
#print query
query_name = query.split(" ")[0]
hit_sp_id = line.split("\t")[1]
percent_id = float(line.split("\t")[2])
aln_len=float(line.split("\t")[3])
query_length=self.lendict[query_name]
coverage = 100*int(aln_len)/float(query_length)
bitscore = float(line.split("\t")[11])
evalue = float(line.split("\t")[10])
if float(coverage) > 100 : coverage = 100
if str(query_name) == str(hit_sp_id):
#print "SameSameSame"
self.selfhits.append(query)
else:
if float(evalue) <= float(self.HSP_max_evalue):
if float(percent_id) >= int(self.HSP_minimal_positives):
if float(coverage) >= int(self.HSP_minimal_coverage):
hitlist=[hit_sp_id, percent_id, coverage, bitscore, evalue]
if self.cB: self.createblastDict(query,hitlist)
output.write("%s\t" % (query_name)),
for element in hitlist:
output.write("%s\t" % element),
output.write("\n")
self.printer(basename(OUT) + " file DONE!")
output.close()
fh.close()
def ReadBlastdecide(self, OUT):
#hitsdict={}
#hitlist = [hit_sp_id, n+1 , hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.score, hsp.expect]
if self.blastdecide[-3:] == '.gz':
fh = gzip.open(self.blastdecide)
else:
fh= open(self.blastdecide,"rU")
lines=fh.readlines()
output= open(OUT, "w")
self.blastDict={}
for line in lines:
line = line.strip("\n")
if len(line.split("\t")) > 2:
query = line.split("\t")[0]
hit_sp_id = line.split("\t")[1]
#n=float(line.split("\t")[2])
percent_id = float(line.split("\t")[2])
coverage = float(line.split("\t")[3])
#targetcoverage = float(line.split("\t")[5])
bitscore = float(line.split("\t")[4])
evalue = float(line.split("\t")[5])
if str(query) == str(hit_sp_id):
#print "SameSameSame"
self.selfhits.append(query)
else:
if float(evalue) <= float(self.HSP_max_evalue):
if float(percent_id) >= int(self.HSP_minimal_positives):
if float(coverage) >= int(self.HSP_minimal_coverage):
hitlist=[hit_sp_id, percent_id, coverage, bitscore, evalue]
if self.cB == 'savenew':
self.createblastDict(query,hitlist)
self.writeoutput(output,query,hitlist)
else:
self.createblastDict(query,hitlist)
output.close()
if self.cB != 'savenew' and os.path.getsize(OUT) == 0:
os.system("rm " + OUT)
fh.close()
def writeoutput(self, oh, query, hitlist):
oh.write("%s\t" % (query))
for element in hitlist:
oh.write("%s\t" % element),
oh.write("\n")
def createblastDict(self, query, hitlist):
self.selfhits=[]
hit_sp_id=hitlist[0]
if str(query) is not str(hit_sp_id):
#hitlist=[hit_sp_id, n, percent_id, coverage,targetcoverage, bitscore,evalue]
#hitlist=[hit_sp_id, percent_id, coverage, bitscore,evalue]
if query in self.blastDict:
self.blastDict[query].append(hitlist)
else:
self.blastDict[query] = [hitlist]
def mainthing(self):
self.HSP_minimal_positives = self.opts.s
self.HSP_minimal_coverage = self.opts.q
#self.HSP_minimal_targetcov = self.opts.tc
self.HSP_minimal_coverage_length = 20
self.lenfile= self.opts.l
self.HSP_max_evalue = self.opts.e
self.v = self.opts.verbose
self.min_size = 0
self.cB = self.opts.n[0]
if self.opts.id:
self.blastdecide=self.opts.id[0]
if self.opts.o:
output = self.opts.o[0]
else:
newname= str(self.blastdecide).split(".")[0:-1]
output = ".".join(newname) + ".new.blastdecide"
self.ReadBlastdecide(output)
elif self.opts.it:
blasttab = self.opts.it
if self.opts.o:
output = self.opts.o[0]
else:
output = blasttab + ".blastdecide"
self.ReadBlastresultsTab(blasttab,output)
else:
try:
blastfile = self.opts.io[0]
typ = self.opts.bo[1]
if self.opts.o:
output = self.opts.o[0]
else:
output = blastfile + ".blastdecide"
if typ == "psiblast":
self.ReadBlast(blastfile, output, is_psiblast=True)
else:
self.ReadBlast(blastfile, output)
except:
raise IOError('If you dont have Pre-made blastdecide or ublast-tab results, you should provide a normal blast output (-m0)')
#timeused = (time.time() - self.start) / 60
self.timing= (time.time() - self.start) /60
self.printer("### Time used for running: "+str(round(self.timing*60)) + " seconds ("+str(round(self.timing)) + " min)")
timeended= dt.today().strftime("%d-%m-%Y %H:%M:%S")
def printer(self,string):
if self.opts.verbose:
print string
if __name__ == '__main__':
try:
obj = BlastDecision()
obj.opts=obj.parser.parse_args(sys.argv[1:])
obj.printer("\n### " + sys.argv[0] + " initialized at " + obj.timestarted)
obj.printer("### OPTIONS : " + str(obj.opts))
obj.mainthing()
# obj.parser.print_help()
except Exception,e:
print str(e)
import traceback
traceback.print_exc()
#
###############
# INPUT LIST
# blast output in tab format & query lengths file : genecatalogue_vs_uniprot.blasttab OR genecatalogue_vs_genecatalogue.blasttab & genecatalogue.lengths
# blast output in -m 0 format : genecatalogue_vs_uniprot.blastout OR genecatalogue_vs_genecatalogue.blastout
# pre-made blastdecide file : genecatalogue_vs_uniprot.blastdecide
#
# OUTPUT LIST
# new blastdecide file based on given parameters : genecatalogue_vs_uniprot.blastdecide
# if premade blastdecide is given, the blastDict is generated : obj.blastDict
#
# OPTIONS LIST
# '-id', '--blastdecidein', help="pre-made blastdecide output file"
# '-it', '--blasttabin', help="blast tabular output file"
# '-ib', '--blastm0in', help="blast -m 0 output file
# '-bf', type of blast ('blast' or 'psiblast')"
# '-l', '--lengths', help="Query lengths file"
# '-s', '--similarity', default= "50", help="minimum similarity cutoff"
# '-qc', '--querycoverage',default= "50", help="minimum query coverage cutoff"
# '-tc', '--targetcoverage', help="minimum target coverage cutoff"
# '-e', '--maxevalue', default= "1e-10" , help="evalue cutoff"
#
#
|
MG-group-tools/MGFunc
|
mgfunc_v2/bldecide.py
|
Python
|
gpl-3.0
| 14,596
|
[
"BLAST"
] |
ecc48efd4a22cad1444640b1e9bda6c7d90ebf0fcb6e1a2f9a2ff5e420b6c99e
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2020 OSMC (KodeKarnage)
This file is part of script.module.osmcsetting.pi
SPDX-License-Identifier: GPL-2.0-or-later
See LICENSES/GPL-2.0-or-later for more information.
"""
import os
import subprocess
import traceback
import xbmcaddon
import xbmcgui
from osmccommon import osmc_setting
from osmccommon.osmc_logging import StandardLogger
from .. import osmc_reparser
ADDON_ID = "script.module.osmcsetting.pi"
DIALOG = xbmcgui.Dialog()
log = StandardLogger(ADDON_ID, os.path.basename(__file__)).log
class OSMCSettingClass(osmc_setting.OSMCSettingClass):
def __init__(self):
super(OSMCSettingClass, self).__init__()
self.addon_id = ADDON_ID
self.short_name = 'Pi Config'
self.short_name_i18n = 32054
self.description = 'The Raspberry Pi doesn\'t have a conventional BIOS. ' \
'System configuration parameters are stored in a "config-user.txt" file. ' \
'For more detail, visit http://elinux.org/RPiconfig[CR]This settings ' \
'module allows you to edit your config-user.txt from within OSMC using a ' \
'graphical interface.[CR][CR]The module includes:' \
'[CR]- display rotation[CR]- hdmi_safe & hdmi_boost' \
'[CR]- hdmi_group & hdmi_mode[CR]- function to save edid to file[CR]' \
'- sdtv_mode & sdtv_aspect[CR]- GPU memory split[CR]' \
'- MPG2 & WVC1 licences (including status)[CR]' \
'- your Pi\'s serial number[CR][CR]Finally, there is a Config Editor ' \
'that will allow you to quickly add, edit, or delete lines in your ' \
'config-user.txt.[CR][CR]Overclock settings are set using the ' \
'Pi Overclock module.'
self.description_i18n = 32055
self.config_location = '/boot/config-user.txt'
self.populate_misc_info()
try:
self.clean_user_config()
except Exception:
log('Error cleaning users config')
log(traceback.format_exc())
def run(self):
# read the config-user.txt file every time the settings are opened. This is unavoidable because
# it is possible for the user to have made manual changes to the config-user.txt while
# OSG is active.
config = osmc_reparser.read_config_file(self.config_location)
extracted_settings = osmc_reparser.config_to_kodi(osmc_reparser.MASTER_SETTINGS, config)
# load the settings into kodi
log('Settings extracted from the config-user.txt')
for k, v in extracted_settings.items():
log("%s : %s" % (k, v))
self.me.setSetting(k, str(v))
# open the settings GUI and let the user monkey about with the controls
self.me.openSettings()
# retrieve the new settings from kodi
new_settings = self.settings_retriever_xml()
log('New settings applied to the config-user.txt')
for k, v in new_settings.items():
log("%s : %s" % (k, v))
# read the config into a list of lines again
config = osmc_reparser.read_config_file(self.config_location)
# construct the new set of config lines using the protocols and the new settings
new_settings = osmc_reparser.kodi_to_config(osmc_reparser.MASTER_SETTINGS,
config, new_settings)
# write the new lines to the temporary config file
osmc_reparser.write_config_file('/var/tmp/config-user.txt', new_settings)
# copy over the temp config-user.txt to /boot/ as superuser
subprocess.call(["sudo", "mv", '/var/tmp/config-user.txt', self.config_location])
DIALOG.notification(self.lang(32095), self.lang(32096))
def settings_retriever_xml(self):
latest_settings = {}
addon = xbmcaddon.Addon(self.addon_id)
for key in osmc_reparser.MASTER_SETTINGS.keys():
latest_settings[key] = addon.getSetting(key)
return latest_settings
def populate_misc_info(self):
# grab the Pi serial number and check to see whether the codec licences are enabled
mpg = subprocess.check_output(["/opt/vc/bin/vcgencmd", "codec_enabled", "MPG2"])
wvc = subprocess.check_output(["/opt/vc/bin/vcgencmd", "codec_enabled", "WVC1"])
serial_raw = subprocess.check_output(["cat", "/proc/cpuinfo"])
if isinstance(mpg, (bytes, bytearray)):
mpg = mpg.decode('utf-8', 'ignore')
if isinstance(wvc, (bytes, bytearray)):
wvc = wvc.decode('utf-8', 'ignore')
if isinstance(serial_raw, (bytes, bytearray)):
serial_raw = serial_raw.decode('utf-8', 'ignore')
# grab just the serial number
serial = serial_raw[serial_raw.index('Serial') + len('Serial'):].replace('\n', '') \
.replace(':', '').replace(' ', '').replace('\t', '')
# load the values into the settings gui
self.me.setSetting('codec_check', mpg.replace('\n', '') + ', ' + wvc.replace('\n', ''))
self.me.setSetting('serial', serial)
def clean_user_config(self):
""" Comment out problematic lines in the users config-user.txt """
patterns = [
r".*=.*\[remove\].*",
r".*=remove",
]
config = osmc_reparser.read_config_file(self.config_location)
new_config = osmc_reparser.clean_config(config, patterns)
# write the new lines to the temporary config file
osmc_reparser.write_config_file('/var/tmp/config-user.txt', new_config)
# copy over the temp config-user.txt to /boot/ as superuser
subprocess.call(["sudo", "mv", '/var/tmp/config-user.txt', self.config_location])
|
osmc/osmc
|
package/mediacenter-addon-osmc/src/script.module.osmcsetting.pi/resources/lib/osmcpi/osmc/osmc_setting.py
|
Python
|
gpl-2.0
| 5,950
|
[
"VisIt"
] |
4a8e4c86dd6a5c4fc49468a5298bb9e2544c177f395d94f20b189cc26c2d7543
|
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
pick weighted random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from _collections_abc import Set as _Set, Sequence as _Sequence
from hashlib import sha512 as _sha512
import itertools as _itertools
import bisect as _bisect
import os as _os
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits", "choices",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If *a* is an int, all bits are used.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1 (provided for reproducing random
sequences from older versions of Python), the algorithm for str and
bytes generates a narrower range of seeds.
"""
if version == 1 and isinstance(a, (str, bytes)):
a = a.decode('latin-1') if isinstance(a, bytes) else a
x = ord(a[0]) << 7 if a else 0
for c in map(ord, a):
x = ((1000003 * x) ^ c) & 0xFFFFFFFFFFFFFFFF
x ^= len(a)
a = -2 if x == -1 else x
if version == 2 and isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
# Issue 17489: Since __reduce__ was defined to fix #759889 this is no
# longer called; we leave it here because it has been here since random was
# rewritten back in 2001 and why risk breaking something.
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
random = self.random
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overridden random() method but no new getrandbits() method,
# so we can only use random() from here.
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
if n == 0:
raise ValueError("Boundary cannot be zero")
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence') from None
return seq[i]
def shuffle(self, x, random=None):
"""Shuffle list x in place, and return None.
Optional argument random is a 0-argument function returning a
random float in [0.0, 1.0); if it is the default None, the
standard random.random will be used.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population or is negative")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
def choices(self, population, weights=None, *, cum_weights=None, k=1):
"""Return a k sized list of population elements chosen with replacement.
If the relative weights or cumulative weights are not specified,
the selections are made with equal probability.
"""
random = self.random
if cum_weights is None:
if weights is None:
_int = int
total = len(population)
return [population[_int(random() * total)] for i in range(k)]
cum_weights = list(_itertools.accumulate(weights))
elif weights is not None:
raise TypeError('Cannot specify both weights and cumulative weights')
if len(cum_weights) != len(population):
raise ValueError('The number of weights does not match the population')
bisect = _bisect.bisect
total = cum_weights[-1]
return [population[bisect(cum_weights, random() * total)] for i in range(k)]
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
try:
c = 0.5 if mode is None else (mode - low) / (high - low)
except ZeroDivisionError:
return low
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * _sqrt(u * c)
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1/beta)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.0)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.0))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g\n' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
choices = _inst.choices
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if hasattr(_os, "fork"):
_os.register_at_fork(after_in_child=_inst.seed)
if __name__ == '__main__':
_test()
|
kenshay/ImageScript
|
Script_Runner/PYTHON/Lib/random.py
|
Python
|
gpl-3.0
| 27,484
|
[
"Gaussian"
] |
c83be042e9a868d29d7db05e68dd8e10b567a44697a76b702e6dae2d5ba5591e
|
from ase.dft.kpoints import monkhorst_pack
assert [0, 0, 0] in monkhorst_pack((1, 3, 5)).tolist()
assert [0, 0, 0] not in monkhorst_pack((1, 3, 6)).tolist()
assert len(monkhorst_pack((3, 4, 6))) == 3 * 4 * 6
from ase.units import Hartree, Bohr, kJ, mol, kcal, kB, fs
print Hartree, Bohr, kJ/mol, kcal/mol, kB*300, fs, 1/fs
from ase.lattice import bulk
hcp = bulk('X', 'hcp', a=1) * (2, 2, 1)
assert abs(hcp.get_distance(0, 3, mic=True) - 1) < 1e-12
assert abs(hcp.get_distance(0, 4, mic=True) - 1) < 1e-12
assert abs(hcp.get_distance(2, 5, mic=True) - 1) < 1e-12
|
grhawk/ASE
|
tools/ase/test/things.py
|
Python
|
gpl-2.0
| 568
|
[
"ASE"
] |
369f8022bca68d3f2357ea7eaf3949ed32b858c88daed152063c7e21ea6b773d
|
#! /usr/bin/env python
from StringIO import StringIO
import numpy as np
###
def loadstate(logfn, dumpfn):
f = open(logfn)
l = f.readline()
while len(l.split()) == 0 or l.split()[0] != 'Step':
l = f.readline()
l = f.readline().split()
ect, ecoul, elong, lx = map(float, [l[4], l[6], l[5], l[8]])
f.close()
types, charges = np.loadtxt(dumpfn, skiprows=9, usecols=[1, 5], unpack=True)
return lx, ect, ecoul, elong, types, charges
###
M = 1.74756459463318219 # Madelung constant of Na-Cl
nat = 64 # Number of atoms
X = 1.084
U = 5.0
V = 5.0
p = 2
verbose = False
###
for a0 in [3.0, 4.0, 5.0, 7.0, 10.0]:
if verbose:
print '=== log.lammps.{}, dump.custom.{} ==='.format(a0, a0)
lx, ect, ecoul, elong, types, charges = loadstate('log.lammps.{}'.format(a0), 'dump.custom.{}'.format(a0))
charges1 = charges[types==1]
charges2 = charges[types==2]
# Charges should have equal magnitude but opposite sign
assert np.all(np.abs(charges1+charges2) < 1e-6)
# Check Coulomb energy
r0 = lx/2 # Lattice constant
charge = np.mean(charges1-charges2)/2
ecoul_check = -charge**2/r0 * M * nat
if verbose:
print 'Coulomb energy error:', (ecoul+elong-ecoul_check)/ecoul_check
assert abs(ecoul+elong-ecoul_check) < 1e-5
# Check energy from charge-transfer model
ect_check = -X*charge
ect_check += 0.5*U*charge**2
ect_check += 0.5*V*charge**p
ect_check *= nat
if verbose:
print 'CT energy error:', (ect-ect_check)/ect_check
assert abs(ect-ect_check) < 1e-6
# Check charges
# Total energy: -q**2*M/r0 - X*q + 0.5*(U+V)*q**2
# Derivative: -2*q*M/r0 - X + (U+V)*q
# Equilibrium at
charge_check = X/(U+V-2*M/r0)
if verbose:
print 'Charge error:', abs(charge-charge_check)/charge_check
assert abs(charge-charge_check) < 1e-6
|
Atomistica/lammps-bsct
|
tests/TEST_NaCl/eval.py
|
Python
|
gpl-2.0
| 2,001
|
[
"LAMMPS"
] |
11d47cdc3ddc85c18c8d5753a37868c446196045fd5c6136244858bc54121c0d
|
# cell.py ---
#
# Filename: cell.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Fri Mar 9 23:17:17 2012 (+0530)
# Version:
# Last-Updated: Fri Jul 10 15:56:23 2015 (+0530)
# By: subha
# Update #: 692
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
# 2012-03-09 23:17:24 (+0530) Subha started porting the cell.py file
# from old moose to dh_branch.
#
# Code:
import csv
import numpy as np
from collections import defaultdict
import moose
import config
from config import logger
import nachans
import kchans
import archan
import cachans
import capool
from channelinit import init_chanlib
channel_types = ['ar',
'cad',
'cal',
'cat',
'k2',
'ka',
'kahp',
'kc',
'kdr',
'km',
'naf',
'naf2',
'nap',
'napf']
channel_type_dict = {
'cad': ['CaPool'],
'km': ['KM'],
'ar': ['AR'],
'cal': ['CaL'],
'cat':['CaT', 'CaT_A'],
'k2': ['K2'],
'ka': ['KA', 'KA_IB'],
'kahp': ['KAHP', 'KAHP_DP','KAHP_SLOWER'],
'kc': ['KC', 'KC_FAST'],
'kdr': ['KDR', 'KDR_FS'],
'nap':['NaP'],
'naf': ['NaF', 'NaF_TCR'],
'napf': ['NaPF', 'NaPF_SS','NaPF_TCR'],
'naf2': ['NaF2', 'NaF2_nRT']}
def read_keyvals(filename):
"""Read the mapping between key value pairs from file.
The file filename should have two columns:
key value
"""
ret = defaultdict(set)
try:
with(open(filename, 'r')) as level_file:
for line in level_file:
tokens = line.split()
if not tokens:
continue
if len(tokens) != 2:
print filename, ' - Tokens: ', tokens, len(tokens)
return None
ret[tokens[1]].add(tokens[0])
except IOError:
config.logger.info('No such file %s' % (filename))
return ret
def adjust_chanlib(cdict):
"""Update the revarsal potentials for channels. Set the initial X
value for AR channel. Set the tau for Ca pool."""
channel_dict = init_chanlib()
for ch in channel_dict.values():
config.logger.info('adjusting properties of %s' % (ch.path))
if isinstance(ch, kchans.KChannel):
ch.Ek = cdict['EK']
elif isinstance(ch, nachans.NaChannel):
ch.Ek = cdict['ENa']
elif isinstance(ch, cachans.CaChannel):
ch.Ek = cdict['ECa']
elif isinstance(ch, archan.AR):
ch.Ek = cdict['EAR']
if 'X_AR' in cdict:
ch.X = cdict['X_AR']
elif isinstance(ch, moose.CaConc):
ch.tau = cdict['TauCa']
if isinstance(ch, moose.HHChannel):
config.logger.debug('%s.Ek = %g' % (ch.path, ch.Ek))
def read_prototype(celltype, cdict):
"""Read the cell prototype file for the specified class. The
channel properties are updated using values in cdict."""
filename = '%s/%s.p' % (config.modelSettings.protodir, celltype)
logger.debug('Reading prototype file %s' % (filename))
adjust_chanlib(cdict)
cellpath = '%s/%s' % (config.modelSettings.libpath, celltype)
if moose.exists(cellpath):
return moose.element(cellpath)
for handler in logger.handlers:
handler.flush()
proto = moose.loadModel(filename, cellpath)
# If prototype files do not have absolute compartment positions,
# set the compartment postions to origin. This will avoid
# incorrect assignemnt of position when the x/y/z values in
# prototype file is just to for setting the compartment length.
if not config.modelSettings.morph_has_postion:
for comp in moose.wildcardFind('%s/#[TYPE=Compartment]' % (proto.path)):
comp.x = 0.0
comp.y = 0.0
comp.z = 0.0
leveldict = read_keyvals('%s/%s.levels' % (config.modelSettings.protodir, celltype))
depths = read_keyvals('%s/%s.depths' % (config.modelSettings.protodir, celltype))
depthdict = {}
for level, depthset in depths.items():
if len(depthset) != 1:
raise Exception('Depth set must have only one entry.')
depthdict[level] = depthset.pop()
assign_depths(proto, depthdict, leveldict)
config.logger.debug('Read %s with %d compartments' % (celltype, len(moose.wildcardFind('%s/#[TYPE=Compartment]' % (proto.path)))))
return proto
def assign_depths(cell, depthdict, leveldict):
"""Assign depths to the compartments in the cell. The original
model assigns sets of compartments to particular levels and a
depth is specified for each level. This should not be required if
we have the z value in prototype file.
cell : (prototype) cell instance
depth : dict mapping level no. to physical depth
level : dict mapping level no. to compartment nos. belonging to
that level.
"""
if not depthdict:
return
for level, depth in depthdict.items():
z = float(depth)
complist = leveldict[level]
for comp_number in complist:
comp = moose.element('%s/comp_%s' % (cell.path, comp_number))
comp.z = z
class CellMeta(type):
def __new__(cls, name, bases, cdict):
if name != 'CellBase':
proto = read_prototype(name, cdict)
annotation = None
if 'annotation' in cdict:
annotation = cdict['annotation']
else:
for base in bases:
if hasattr(base, 'annotation'):
annotation = base.annotation
break
if annotation is not None:
info = moose.Annotator('%s/info' % (proto.path))
info.notes = '\n'.join('"%s": "%s"' % kv for kv in annotation.items())
if 'soma_tauCa' in cdict:
moose.element(proto.path + '/comp_1/CaPool').tau = cdict['soma_tauCa']
cdict['prototype'] = proto
return type.__new__(cls, name, bases, cdict)
class CellBase(moose.Neuron):
__metaclass__ = CellMeta
annotation = {'cno': 'cno_0000020'}
def __init__(self, path):
if not moose.exists(path):
path_tokens = path.rpartition('/')
moose.copy(self.prototype, path_tokens[0], path_tokens[-1])
moose.Neutral.__init__(self, path)
self.solver = moose.HSolve('{}/solver'.format(path, 'solver'))
self.solver.target = path
self.solver.dt = config.simulationSettings.simulationDt
def comp(self, number):
path = '%s/comp_%d' % (self.path, number)
return moose.element(path)
@property
def soma(self):
return self.comp(1)
@property
def presynaptic(self):
"""Presynaptic compartment. Each subclass should define
_presynaptic as the index of this compartment."""
return self.comp(self.__class__._presynaptic)
def dump_cell(self, file_path):
"""Dump the cell information compartment by compartment for
comparison with NEURON in csv format. All parameters are
converted to SI units."""
with open(file_path, 'w') as dump_file:
fieldnames = ["comp", "len", "dia", "sarea", "xarea", "Em", "Cm","Rm","Ra"]
for chtype in channel_types:
if chtype != 'cad':
fieldnames += ['e_' + chtype, 'gbar_' + chtype]
else:
fieldnames += ['tau_' + chtype, 'beta_' + chtype]
# print fieldnames
writer = csv.DictWriter(dump_file, fieldnames=fieldnames, delimiter=',')
writer.writeheader()
comps = moose.wildcardFind('%s/##[TYPE=Compartment]' % (self.path))
comps = sorted(comps, key=lambda x: int(x.name[0].rpartition('_')[-1]))
for comp_e in comps:
comp = moose.element(comp_e)
row = {}
row['comp'] = comp.name
row['len'] = comp.length
row['dia'] = comp.diameter
row['sarea'] = comp.length * comp.diameter * np.pi
row['xarea'] = comp.diameter * comp.diameter * np.pi/4
row['Em'] = comp.Em
row['Cm'] = comp.Cm
row['Rm'] = comp.Rm
row['Ra'] = comp.Ra
if moose.exists(comp.path + '/CaPool'):
ca_pool = moose.CaConc(comp.path + '/CaPool')
for chtype in channel_types:
found = False
for chname in channel_type_dict[chtype]:
chpath = comp.path + '/' + chname
if moose.exists(chpath):
found = True
channel = moose.element(chpath)
if channel.className == 'HHChannel':
row['e_'+chtype] = channel.Ek
row['gbar_'+chtype] = channel.Gbar
elif channel.className == 'CaConc':
row['tau_cad'] = channel.tau
row['beta_cad'] = channel.B
break
if not found:
if chtype != 'cad':
row['e_'+chtype] = 0.0
row['gbar_'+chtype] = 0.0
else:
row['tau_cad'] = 0.0
row['beta_cad'] = 0.0
writer.writerow(row)
class SupPyrRS(CellBase):
_presynaptic = 72
ENa = 50e-3
EK = -95e-3
ECa = 125e-3
EAR = -35e-3
EGABA = -81e-3
TauCa = 20e-3
soma_tauCa = 100e-3
def __init__(self, path):
CellBase.__init__(self, path)
class SupPyrFRB(CellBase):
_presynaptic = 72
ENa = 50e-3
EK = -95e-3
EAR = -35e-3
ECa = 125e-3
EGABA = -81e-3
TauCa = 20e-3
soma_tauCa = 100e-3
def __init__(self, path):
CellBase.__init__(self, path)
class SupLTS(CellBase):
_presynaptic = 59
ENa = 50e-3
EK = -100e-3
ECa = 125e-3
EAR = -40e-3 # dummy to set things back to original
EGABA = -75e-3 # Sanchez-Vives et al. 1997
TauCa = 20e-3
X_AR = 0.25
soma_tauCa = 50e-3
def __init__(self, path):
CellBase.__init__(self, path)
class SupAxoaxonic(CellBase):
_presynaptic = 59
ENa = 50e-3
EK = -100e-3
ECa = 125e-3
EAR = -40e-3
EGABA = -75e-3
X_AR = 0.0
TauCa = 20e-3
soma_tauCa = 50e-3
def __init__(self, path):
CellBase.__init__(self, path)
class SupBasket(CellBase):
_presynaptic = 59
ENa = 50e-3
EK = -100e-3
EAR = -40e-3
ECa = 125e-3
EGABA = -75e-3 # Sanchez-Vives et al. 1997
TauCa = 20e-3
X_AR = 0.0
soma_tauCa = 50e-3
def __init__(self, path):
CellBase.__init__(self, path)
class SpinyStellate(CellBase):
"""Spiny-stellate cells of layer 4."""
_presynaptic = 57
ENa = 50e-3
EK = -100e-3
EAR = -40e-3
ECa = 125e-3
EGABA = -75e-3
TauCa = 20e-3
X_AR = 0.0
soma_tauCa = 50e-3
def __init__(self, path):
CellBase.__init__(self, path)
class NontuftedRS(CellBase):
_presynaptic = 48
ENa = 50e-3
EK = -95e-3
EAR = -35e-3
ECa = 125e-3
EGABA = -75e-3 # Sanchez-Vives et al. 1997
TauCa = 20e-3
X_AR = 0.25
soma_tauCa = 100e-3
def __init__(self, path):
CellBase.__init__(self, path)
class TuftedIB(CellBase):
_presynaptic = 60
ENa = 50e-3
EK = -95e-3
EAR = -35e-3
ECa = 125e-3
EGABA = -75e-3 # Sanchez-Vives et al. 1997
TauCa = 1e-3/0.075
X_AR = 0.25
soma_tauCa = 100e-3
def __init__(self, path):
CellBase.__init__(self, path)
# for compartments in level 2, i.e. comp_2, 5, 6 have tauCa = 1e-3/0.02
@classmethod
def post_init(cls):
moose.element(cls.prototype.path + '/comp_2/CaPool').tau = 1e-3/0.02
moose.element(cls.prototype.path + '/comp_5/CaPool').tau = 1e-3/0.02
moose.element(cls.prototype.path + '/comp_6/CaPool').tau = 1e-3/0.02
TuftedIB.post_init()
class TuftedRS(CellBase):
_presynaptic = 60
ENa = 50e-3
EK = -95e-3
EAR = -35e-3
ECa = 125e-3
EGABA = -75e-3 # Sanchez-Vives et al. 1997
TauCa = 1e-3/0.075
X_AR = 0.25
soma_tauCa = 100e-3
def __init__(self, path):
CellBase.__init__(self, path)
@classmethod
def post_init(cls):
moose.element(cls.prototype.path + '/comp_2/CaPool').tau = 1e-3/0.02
moose.element(cls.prototype.path + '/comp_5/CaPool').tau = 1e-3/0.02
moose.element(cls.prototype.path + '/comp_6/CaPool').tau = 1e-3/0.02
TuftedRS.post_init()
class DeepLTS(CellBase):
_presynaptic = 59
ENa = 50e-3
EK = -100e-3
EAR = -40e-3
ECa = 125e-3
EGABA = -75e-3 # Sanchez-Vives et al. 1997
TauCa = 20e-3
X_AR = 0.25
soma_tauCa = 50e-3
def __init__(self, path):
CellBase.__init__(self, path)
class DeepAxoaxonic(CellBase):
_presynaptic = 59
ENa = 50e-3
EK = -100e-3
ECa = 125e-3
EAR = -40e-3
EGABA = -75e-3
X_AR = 0.0
TauCa = 20e-3
soma_tauCa = 50e-3
def __init__(self, path):
CellBase.__init__(self, path)
class DeepBasket(CellBase):
_presynaptic = 59
ENa = 50e-3
EK = -100e-3
EAR = -40e-3
ECa = 125e-3
EGABA = -75e-3 # Sanchez-Vives et al. 1997
TauCa = 20e-3
X_AR = 0.25
soma_tauCa = 50e-3
def __init__(self, path):
CellBase.__init__(self, path)
class TCR(CellBase):
_presynaptic = 135
ENa = 50e-3
EK = -95e-3
EAR = -35e-3
ECa = 125e-3
EGABA = -81e-3
TauCa = 20e-3
X_AR = 0.25
soma_tauCa = 50e-3
def __init__(self, path):
CellBase.__init__(self, path)
class nRT(CellBase):
_presynaptic = 59
ENa = 50e-3
EK = -100e-3
EAR = -40e-3
ECa = 125e-3
EGABA = -75e-3 # Sanchez-Vives et al. 1997
TauCa = 20e-3
X_AR = 0.0
soma_tauCa = 50e-3
def __init__(self, path):
CellBase.__init__(self, path)
_cellprototypes = {}
def init_prototypes():
global _cellprototypes
if _cellprototypes:
return _cellprototypes
_cellprototypes = {
'SupPyrRS': SupPyrRS(SupPyrRS.prototype.path),
'SupPyrFRB': SupPyrFRB(SupPyrFRB.prototype.path),
'SupLTS': SupLTS(SupLTS.prototype.path),
'SupAxoaxonic': SupAxoaxonic(SupAxoaxonic.prototype.path),
'SupBasket': SupBasket(SupBasket.prototype.path),
'SpinyStellate': SpinyStellate(SpinyStellate.prototype.path),
'NontuftedRS': NontuftedRS(NontuftedRS.prototype.path),
'TuftedIB': TuftedIB(TuftedIB.prototype.path),
'TuftedRS': TuftedRS(TuftedRS.prototype.path),
'DeepLTS': DeepLTS(DeepLTS.prototype.path),
'DeepAxoaxonic': DeepAxoaxonic(DeepAxoaxonic.prototype.path),
'DeepBasket': DeepBasket(DeepBasket.prototype.path),
'TCR': TCR(TCR.prototype.path),
'nRT': nRT(nRT.prototype.path),
}
return _cellprototypes
#
# cells.py ends here
|
dilawar/moose-full
|
moose-examples/traub_2005/py/cells.py
|
Python
|
gpl-2.0
| 15,472
|
[
"MOOSE",
"NEURON"
] |
0b7e127598697b75c3745ebe7ec6965699b0cdc279e29611edbc34508abacf9d
|
"""network2.py
~~~~~~~~~~~~~~
An improved version of network.py, implementing the stochastic
gradient descent learning algorithm for a feedforward neural network.
Improvements include the addition of the cross-entropy cost function,
regularization, and better initialization of network weights. Note
that I have focused on making the code simple, easily readable, and
easily modifiable. It is not optimized, and omits many desirable
features.
"""
#### Libraries
# Standard library
import json
import random
import sys
# Third-party libraries
import numpy as np
#### Define the quadratic and cross-entropy cost functions
class QuadraticCost(object):
@staticmethod
def fn(a, y):
"""Return the cost associated with an output ``a`` and desired output
``y``.
"""
return 0.5*np.linalg.norm(a-y)**2
@staticmethod
def delta(z, a, y):
"""Return the error delta from the output layer."""
return (a-y) * sigmoid_prime(z)
class CrossEntropyCost(object):
@staticmethod
def fn(a, y):
"""Return the cost associated with an output ``a`` and desired output
``y``. Note that np.nan_to_num is used to ensure numerical
stability. In particular, if both ``a`` and ``y`` have a 1.0
in the same slot, then the expression (1-y)*np.log(1-a)
returns nan. The np.nan_to_num ensures that that is converted
to the correct value (0.0).
"""
return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))
@staticmethod
def delta(z, a, y):
"""Return the error delta from the output layer. Note that the
parameter ``z`` is not used by the method. It is included in
the method's parameters in order to make the interface
consistent with the delta method for other cost classes.
"""
return (a-y)
#### Main Network class
class Network(object):
def __init__(self, sizes, cost=CrossEntropyCost):
"""The list ``sizes`` contains the number of neurons in the respective
layers of the network. For example, if the list was [2, 3, 1]
then it would be a three-layer network, with the first layer
containing 2 neurons, the second layer 3 neurons, and the
third layer 1 neuron. The biases and weights for the network
are initialized randomly, using
``self.default_weight_initializer`` (see docstring for that
method).
"""
self.num_layers = len(sizes)
self.sizes = sizes
self.default_weight_initializer()
self.cost=cost
def default_weight_initializer(self):
"""Initialize each weight using a Gaussian distribution with mean 0
and standard deviation 1 over the square root of the number of
weights connecting to the same neuron. Initialize the biases
using a Gaussian distribution with mean 0 and standard
deviation 1.
Note that the first layer is assumed to be an input layer, and
by convention we won't set any biases for those neurons, since
biases are only ever used in computing the outputs from later
layers.
"""
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
self.weights = [np.random.randn(y, x)/np.sqrt(x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def large_weight_initializer(self):
"""Initialize the weights using a Gaussian distribution with mean 0
and standard deviation 1. Initialize the biases using a
Gaussian distribution with mean 0 and standard deviation 1.
Note that the first layer is assumed to be an input layer, and
by convention we won't set any biases for those neurons, since
biases are only ever used in computing the outputs from later
layers.
This weight and bias initializer uses the same approach as in
Chapter 1, and is included for purposes of comparison. It
will usually be better to use the default weight initializer
instead.
"""
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
lmbda = 0.0,
evaluation_data=None,
monitor_evaluation_cost=False,
monitor_evaluation_accuracy=False,
monitor_training_cost=False,
monitor_training_accuracy=False):
"""Train the neural network using mini-batch stochastic gradient
descent. The ``training_data`` is a list of tuples ``(x, y)``
representing the training inputs and the desired outputs. The
other non-optional parameters are self-explanatory, as is the
regularization parameter ``lmbda``. The method also accepts
``evaluation_data``, usually either the validation or test
data. We can monitor the cost and accuracy on either the
evaluation data or the training data, by setting the
appropriate flags. The method returns a tuple containing four
lists: the (per-epoch) costs on the evaluation data, the
accuracies on the evaluation data, the costs on the training
data, and the accuracies on the training data. All values are
evaluated at the end of each training epoch. So, for example,
if we train for 30 epochs, then the first element of the tuple
will be a 30-element list containing the cost on the
evaluation data at the end of each epoch. Note that the lists
are empty if the corresponding flag is not set.
"""
if evaluation_data: n_data = len(evaluation_data)
n = len(training_data)
evaluation_cost, evaluation_accuracy = [], []
training_cost, training_accuracy = [], []
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in range(0, n, mini_batch_size)]
for m, mini_batch in enumerate(mini_batches):
if not m%(len(mini_batches)/50): print('.', end='')
self.update_mini_batch(
mini_batch, eta, lmbda, len(training_data))
print()
print("Epoch %s training complete" % j)
if monitor_training_cost:
cost = self.total_cost(training_data, lmbda)
training_cost.append(cost)
print("Cost on training data: {}".format(cost))
if monitor_training_accuracy:
accuracy = self.accuracy(training_data, convert=True)
training_accuracy.append(accuracy)
print("Accuracy on training data: {} / {}".format(
accuracy, n))
if monitor_evaluation_cost:
cost = self.total_cost(evaluation_data, lmbda, convert=True)
evaluation_cost.append(cost)
print("Cost on evaluation data: {}".format(cost))
if monitor_evaluation_accuracy:
accuracy = self.accuracy(evaluation_data)
evaluation_accuracy.append(accuracy)
print("Accuracy on evaluation data: {} / {}".format(
self.accuracy(evaluation_data), n_data))
print()
return evaluation_cost, evaluation_accuracy, \
training_cost, training_accuracy
def update_mini_batch(self, mini_batch, eta, lmbda, n):
"""Update the network's weights and biases by applying gradient
descent using backpropagation to a single mini batch. The
``mini_batch`` is a list of tuples ``(x, y)``, ``eta`` is the
learning rate, ``lmbda`` is the regularization parameter, and
``n`` is the total size of the training data set.
"""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = (self.cost).delta(zs[-1], activations[-1], y)
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def accuracy(self, data, convert=False):
"""Return the number of inputs in ``data`` for which the neural
network outputs the correct result. The neural network's
output is assumed to be the index of whichever neuron in the
final layer has the highest activation.
The flag ``convert`` should be set to False if the data set is
validation or test data (the usual case), and to True if the
data set is the training data. The need for this flag arises
due to differences in the way the results ``y`` are
represented in the different data sets. In particular, it
flags whether we need to convert between the different
representations. It may seem strange to use different
representations for the different data sets. Why not use the
same representation for all three data sets? It's done for
efficiency reasons -- the program usually evaluates the cost
on the training data and the accuracy on other data sets.
These are different types of computations, and using different
representations speeds things up. More details on the
representations can be found in
mnist_loader.load_data_wrapper.
"""
if convert:
results = [(np.argmax(self.feedforward(x)), np.argmax(y))
for (x, y) in data]
else:
results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in data]
return sum(int(x == y) for (x, y) in results)
def total_cost(self, data, lmbda, convert=False):
"""Return the total cost for the data set ``data``. The flag
``convert`` should be set to False if the data set is the
training data (the usual case), and to True if the data set is
the validation or test data. See comments on the similar (but
reversed) convention for the ``accuracy`` method, above.
"""
cost = 0.0
for x, y in data:
a = self.feedforward(x)
if convert: y = vectorized_result(y)
cost += self.cost.fn(a, y)/len(data)
cost += 0.5*(lmbda/len(data))*sum(
np.linalg.norm(w)**2 for w in self.weights)
return cost
def save(self, filename):
"""Save the neural network to the file ``filename``."""
data = {"sizes": self.sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases],
"cost": str(self.cost.__name__)}
f = open(filename, "w")
json.dump(data, f)
f.close()
def evaluate(self, test_data):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation."""
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
#### Loading a Network
def load(filename):
"""Load a neural network from the file ``filename``. Returns an
instance of Network.
"""
f = open(filename, "r")
data = json.load(f)
f.close()
cost = getattr(sys.modules[__name__], data["cost"])
net = Network(data["sizes"], cost=cost)
net.weights = [np.array(w) for w in data["weights"]]
net.biases = [np.array(b) for b in data["biases"]]
return net
#### Miscellaneous functions
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the j'th position
and zeroes elsewhere. This is used to convert a digit (0...9)
into a corresponding desired output from the neural network.
"""
e = np.zeros((10, 1))
e[j] = 1.0
return e
def sigmoid(z):
"""The sigmoid function."""
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
|
haphaeu/yoshimi
|
neural_networks/mnist/network2.py
|
Python
|
lgpl-3.0
| 14,535
|
[
"Gaussian",
"NEURON"
] |
752a1ce561192cd8f9ee6c53defb463420c20d9388312d368fc81974a2333ed7
|
import re
import numpy as np
import scipy.sparse
import pytest
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.naive_bayes import MultinomialNB, ComplementNB
from sklearn.naive_bayes import CategoricalNB
DISCRETE_NAIVE_BAYES_CLASSES = [
BernoulliNB, CategoricalNB, ComplementNB, MultinomialNB]
ALL_NAIVE_BAYES_CLASSES = DISCRETE_NAIVE_BAYES_CLASSES + [GaussianNB]
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
with pytest.raises(
ValueError,
match="The target label.* in y do not exist in the initial classes"
):
GaussianNB().partial_fit(X, y, classes=[0, 1])
# TODO remove in 1.2 once sigma_ attribute is removed (GH #18842)
def test_gnb_var():
clf = GaussianNB()
clf.fit(X, y)
with pytest.warns(FutureWarning, match="Attribute sigma_ was deprecated"):
assert_array_equal(clf.sigma_, clf.var_)
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf = GaussianNB().fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.var_, clf_sw.var_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.var_, clf2.var_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.var_, clf_sw.var_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
msg = 'Priors must be non-negative'
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_almost_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_priors_sum_isclose():
# test whether the class prior sum is properly tested"""
X = np.array([[-1, -1], [-2, -1], [-3, -2], [-4, -5], [-5, -4],
[1, 1], [2, 1], [3, 2], [4, 4], [5, 5]])
priors = np.array([0.08, 0.14, 0.03, 0.16, 0.11, 0.16, 0.07, 0.14,
0.11, 0.0])
Y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
clf = GaussianNB(priors=priors)
# smoke test for issue #9633
clf.fit(X, Y)
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
msg = 'Number of priors must match number of classes'
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
msg = 'The sum of the priors should be 1'
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert clf.predict([[-0.1, -0.1]]) == np.array([2])
def test_gnb_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert tmean == mean
assert tvar == var
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.var_, clf_pf.var_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.var_, clf_pf2.var_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_gnb_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
# TODO: Remove in version 1.1
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_deprecated_coef_intercept(DiscreteNaiveBayes):
est = DiscreteNaiveBayes().fit(X2, y2)
for att in ["coef_", "intercept_"]:
with pytest.warns(FutureWarning):
hasattr(est, att)
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_prior(DiscreteNaiveBayes):
# Test whether class priors are properly set.
clf = DiscreteNaiveBayes().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_partial_fit(DiscreteNaiveBayes):
clf1 = DiscreteNaiveBayes()
clf1.fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1])
clf2 = DiscreteNaiveBayes()
clf2.partial_fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
if DiscreteNaiveBayes is CategoricalNB:
for i in range(len(clf1.category_count_)):
assert_array_equal(clf1.category_count_[i],
clf2.category_count_[i])
else:
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = DiscreteNaiveBayes()
# all categories have to appear in the first partial fit
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
clf3.partial_fit([[1, 1]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
if DiscreteNaiveBayes is CategoricalNB:
# the categories for each feature of CategoricalNB are mapped to an
# index chronologically with each call of partial fit and therefore
# the category_count matrices cannot be compared for equality
for i in range(len(clf1.category_count_)):
assert_array_equal(clf1.category_count_[i].shape,
clf3.category_count_[i].shape)
assert_array_equal(np.sum(clf1.category_count_[i], axis=1),
np.sum(clf3.category_count_[i], axis=1))
# assert category 0 occurs 1x in the first class and 0x in the 2nd
# class
assert_array_equal(clf1.category_count_[0][0], np.array([1, 0]))
# assert category 1 occurs 0x in the first class and 2x in the 2nd
# class
assert_array_equal(clf1.category_count_[0][1], np.array([0, 2]))
# assert category 0 occurs 0x in the first class and 1x in the 2nd
# class
assert_array_equal(clf1.category_count_[1][0], np.array([0, 1]))
# assert category 1 occurs 1x in the first class and 1x in the 2nd
# class
assert_array_equal(clf1.category_count_[1][1], np.array([1, 1]))
else:
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
@pytest.mark.parametrize('NaiveBayes', ALL_NAIVE_BAYES_CLASSES)
def test_NB_partial_fit_no_first_classes(NaiveBayes):
# classes is required for first call to partial fit
with pytest.raises(
ValueError,
match="classes must be passed on the first call to partial_fit."
):
NaiveBayes().partial_fit(X2, y2)
# check consistency of consecutive classes values
clf = NaiveBayes()
clf.partial_fit(X2, y2, classes=np.unique(y2))
with pytest.raises(
ValueError,
match="is not the same as on last call to partial_fit"
):
clf.partial_fit(X2, y2, classes=np.arange(42))
# TODO: Remove in version 1.1
@ignore_warnings(category=FutureWarning)
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for DiscreteNaiveBayes, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = DiscreteNaiveBayes().fit(X, y)
assert clf.predict(X[-1:]) == 2
assert clf.predict_proba([X[0]]).shape == (1, 2)
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for DiscreteNaiveBayes, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = DiscreteNaiveBayes().fit(X, y)
assert clf.predict_proba(X[0:1]).shape == (1, 3)
assert clf.predict_proba(X[:2]).shape == (2, 3)
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_uniform_prior(DiscreteNaiveBayes):
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
clf = DiscreteNaiveBayes()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([.5, .5]))
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_provide_prior(DiscreteNaiveBayes):
# Test whether discrete NB classes use provided prior
clf = DiscreteNaiveBayes(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
msg = 'Number of priors must match number of classes'
with pytest.raises(ValueError, match=msg):
clf.fit([[0], [1], [2]], [0, 1, 2])
msg = 'is not the same as on last call to partial_fit'
with pytest.raises(ValueError, match=msg):
clf.partial_fit([[0], [1]], [0, 1], classes=[0, 1, 1])
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_provide_prior_with_partial_fit(DiscreteNaiveBayes):
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = DiscreteNaiveBayes(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = DiscreteNaiveBayes(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_sample_weight_multiclass(DiscreteNaiveBayes):
# check shape consistency for number of samples at fit time
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = DiscreteNaiveBayes().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = DiscreteNaiveBayes()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# TODO: Remove in version 1.1
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize('DiscreteNaiveBayes', [BernoulliNB, ComplementNB,
MultinomialNB])
def test_discretenb_coef_intercept_shape(DiscreteNaiveBayes):
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
clf = DiscreteNaiveBayes()
clf.fit(X, y)
assert clf.coef_.shape == (1, 3)
assert clf.intercept_.shape == (1,)
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
@pytest.mark.parametrize('use_partial_fit', [False, True])
@pytest.mark.parametrize('train_on_single_class_y', [False, True])
def test_discretenb_degenerate_one_class_case(
DiscreteNaiveBayes,
use_partial_fit,
train_on_single_class_y,
):
# Most array attributes of a discrete naive Bayes classifier should have a
# first-axis length equal to the number of classes. Exceptions include:
# ComplementNB.feature_all_, CategoricalNB.n_categories_.
# Confirm that this is the case for binary problems and the degenerate
# case of a single class in the training set, when fitting with `fit` or
# `partial_fit`.
# Non-regression test for handling degenerate one-class case:
# https://github.com/scikit-learn/scikit-learn/issues/18974
X = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
y = [1, 1, 2]
if train_on_single_class_y:
X = X[:-1]
y = y[:-1]
classes = sorted(list(set(y)))
num_classes = len(classes)
clf = DiscreteNaiveBayes()
if use_partial_fit:
clf.partial_fit(X, y, classes=classes)
else:
clf.fit(X, y)
assert clf.predict(X[:1]) == y[0]
# Check that attributes have expected first-axis lengths
attribute_names = [
'classes_',
'class_count_',
'class_log_prior_',
'feature_count_',
'feature_log_prob_',
]
for attribute_name in attribute_names:
attribute = getattr(clf, attribute_name, None)
if attribute is None:
# CategoricalNB has no feature_count_ attribute
continue
if isinstance(attribute, np.ndarray):
assert attribute.shape[0] == num_classes
else:
# CategoricalNB.feature_log_prob_ is a list of arrays
for element in attribute:
assert element.shape[0] == num_classes
@pytest.mark.parametrize('kind', ('dense', 'sparse'))
def test_mnnb(kind):
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
if kind == 'dense':
X = X2
elif kind == 'sparse':
X = scipy.sparse.csr_matrix(X2)
# Check the ability to predict the learning set.
clf = MultinomialNB()
msg = 'Negative values in data passed to'
with pytest.raises(ValueError, match=msg):
clf.fit(-X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def test_mnb_prior_unobserved_targets():
# test smoothing of prior for yet unobserved targets
# Create toy training data
X = np.array([[0, 1], [1, 0]])
y = np.array([0, 1])
clf = MultinomialNB()
with pytest.warns(None) as record:
clf.partial_fit(X, y, classes=[0, 1, 2])
assert len(record) == 0
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 0
# add a training example with previously unobserved class
with pytest.warns(None) as record:
clf.partial_fit([[1, 1]], [2])
assert len(record) == 0
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 2
# TODO: Remove in version 1.1
@ignore_warnings(category=FutureWarning)
def test_mnb_sample_weight():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1 / 3.0, 2 / 3.0, 2 / 3.0, 1 / 3.0, 1 / 3.0,
2 / 3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_bnb_feature_log_prob():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_almost_equal(clf.feature_log_prob_, (num - denom))
def test_cnb():
# Tests ComplementNB when alpha=1.0 for the toy example in Manning,
# Raghavan, and Schuetze's "Introduction to Information Retrieval" book:
# https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo.
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1).
Y = np.array([0, 0, 0, 1])
# Check that weights are correct. See steps 4-6 in Table 4 of
# Rennie et al. (2003).
theta = np.array([
[
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6)
],
[
(1 + 1) / (6 + 6),
(3 + 1) / (6 + 6),
(0 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(0 + 1) / (6 + 6)
]])
weights = np.zeros(theta.shape)
normed_weights = np.zeros(theta.shape)
for i in range(2):
weights[i] = -np.log(theta[i])
normed_weights[i] = weights[i] / weights[i].sum()
# Verify inputs are nonnegative.
clf = ComplementNB(alpha=1.0)
msg = re.escape('Negative values in data passed to ComplementNB (input X)')
with pytest.raises(ValueError, match=msg):
clf.fit(-X, Y)
clf.fit(X, Y)
# Check that counts/weights are correct.
feature_count = np.array([[1, 3, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1]])
assert_array_equal(clf.feature_count_, feature_count)
class_count = np.array([3, 1])
assert_array_equal(clf.class_count_, class_count)
feature_all = np.array([1, 4, 1, 1, 1, 1])
assert_array_equal(clf.feature_all_, feature_all)
assert_array_almost_equal(clf.feature_log_prob_, weights)
clf = ComplementNB(alpha=1.0, norm=True)
clf.fit(X, Y)
assert_array_almost_equal(clf.feature_log_prob_, normed_weights)
def test_categoricalnb():
# Check the ability to predict the training set.
clf = CategoricalNB()
y_pred = clf.fit(X2, y2).predict(X2)
assert_array_equal(y_pred, y2)
X3 = np.array([[1, 4], [2, 5]])
y3 = np.array([1, 2])
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X3, y3)
assert_array_equal(clf.n_categories_, np.array([3, 6]))
# Check error is raised for X with negative entries
X = np.array([[0, -1]])
y = np.array([1])
error_msg = re.escape(
"Negative values in data passed to CategoricalNB (input X)"
)
with pytest.raises(ValueError, match=error_msg):
clf.predict(X)
with pytest.raises(ValueError, match=error_msg):
clf.fit(X, y)
# Test alpha
X3_test = np.array([[2, 5]])
# alpha=1 increases the count of all categories by one so the final
# probability for each category is not 50/50 but 1/3 to 2/3
bayes_numerator = np.array([[1/3*1/3, 2/3*2/3]])
bayes_denominator = bayes_numerator.sum()
assert_array_almost_equal(clf.predict_proba(X3_test),
bayes_numerator / bayes_denominator)
# Assert category_count has counted all features
assert len(clf.category_count_) == X3.shape[1]
# Check sample_weight
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X, y)
assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([1]))
assert_array_equal(clf.n_categories_, np.array([2, 2]))
for factor in [1., 0.3, 5, 0.0001]:
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
sample_weight = np.array([1, 1, 10, 0.1]) * factor
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([2]))
assert_array_equal(clf.n_categories_, np.array([2, 2]))
@pytest.mark.parametrize(
"min_categories, exp_X1_count, exp_X2_count, new_X, exp_n_categories_",
[
# check min_categories with int > observed categories
(3, np.array([[2, 0, 0], [1, 1, 0]]), np.array([[1, 1, 0], [1, 1, 0]]),
np.array([[0, 2]]), np.array([3, 3]),
),
# check with list input
([3, 4], np.array([[2, 0, 0], [1, 1, 0]]),
np.array([[1, 1, 0, 0], [1, 1, 0, 0]]), np.array([[0, 3]]),
np.array([3, 4]),
),
# check min_categories with min less than actual
([1, np.array([[2, 0], [1, 1]]), np.array([[1, 1], [1, 1]]),
np.array([[0, 1]]), np.array([2, 2])]
),
]
)
def test_categoricalnb_with_min_categories(min_categories, exp_X1_count,
exp_X2_count, new_X,
exp_n_categories_):
X_n_categories = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y_n_categories = np.array([1, 1, 2, 2])
expected_prediction = np.array([1])
clf = CategoricalNB(alpha=1, fit_prior=False,
min_categories=min_categories)
clf.fit(X_n_categories, y_n_categories)
X1_count, X2_count = clf.category_count_
assert_array_equal(X1_count, exp_X1_count)
assert_array_equal(X2_count, exp_X2_count)
predictions = clf.predict(new_X)
assert_array_equal(predictions, expected_prediction)
assert_array_equal(clf.n_categories_, exp_n_categories_)
@pytest.mark.parametrize(
"min_categories, error_msg",
[
('bad_arg', "'min_categories' should have integral"),
([[3, 2], [2, 4]], "'min_categories' should have shape"),
(1., "'min_categories' should have integral"),
]
)
def test_categoricalnb_min_categories_errors(min_categories, error_msg):
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
clf = CategoricalNB(alpha=1, fit_prior=False,
min_categories=min_categories)
with pytest.raises(ValueError, match=error_msg):
clf.fit(X, y)
def test_alpha():
# Setting alpha=0 should not output nan results when p(x_i|y_j)=0 is a case
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
nb = BernoulliNB(alpha=0.)
msg = (
"alpha too small will result in numeric errors,"
" setting alpha = 1.0e-10"
)
with pytest.warns(UserWarning, match=msg):
nb.partial_fit(X, y, classes=[0, 1])
with pytest.warns(UserWarning, match=msg):
nb.fit(X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
with pytest.warns(UserWarning, match=msg):
nb.partial_fit(X, y, classes=[0, 1])
with pytest.warns(UserWarning, match=msg):
nb.fit(X, y)
prob = np.array([[2. / 3, 1. / 3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = CategoricalNB(alpha=0.)
with pytest.warns(UserWarning, match=msg):
nb.fit(X, y)
prob = np.array([[1., 0.], [0., 1.]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test sparse X
X = scipy.sparse.csr_matrix(X)
nb = BernoulliNB(alpha=0.)
with pytest.warns(UserWarning, match=msg):
nb.fit(X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
with pytest.warns(UserWarning, match=msg):
nb.fit(X, y)
prob = np.array([[2. / 3, 1. / 3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test for alpha < 0
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
expected_msg = re.escape(
'Smoothing parameter alpha = -1.0e-01. alpha should be > 0.'
)
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
c_nb = CategoricalNB(alpha=-0.1)
with pytest.raises(ValueError, match=expected_msg):
b_nb.fit(X, y)
with pytest.raises(ValueError, match=expected_msg):
m_nb.fit(X, y)
with pytest.raises(ValueError, match=expected_msg):
c_nb.fit(X, y)
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
with pytest.raises(ValueError, match=expected_msg):
b_nb.partial_fit(X, y, classes=[0, 1])
with pytest.raises(ValueError, match=expected_msg):
m_nb.partial_fit(X, y, classes=[0, 1])
def test_alpha_vector():
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
# Setting alpha=np.array with same length
# as number of features should be fine
alpha = np.array([1, 2])
nb = MultinomialNB(alpha=alpha)
nb.partial_fit(X, y, classes=[0, 1])
# Test feature probabilities uses pseudo-counts (alpha)
feature_prob = np.array([[1 / 2, 1 / 2], [2 / 5, 3 / 5]])
assert_array_almost_equal(nb.feature_log_prob_, np.log(feature_prob))
# Test predictions
prob = np.array([[5 / 9, 4 / 9], [25 / 49, 24 / 49]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test alpha non-negative
alpha = np.array([1., -0.1])
m_nb = MultinomialNB(alpha=alpha)
expected_msg = (
'Smoothing parameter alpha = -1.0e-01. alpha should be > 0.'
)
with pytest.raises(ValueError, match=expected_msg):
m_nb.fit(X, y)
# Test that too small pseudo-counts are replaced
ALPHA_MIN = 1e-10
alpha = np.array([ALPHA_MIN / 2, 0.5])
m_nb = MultinomialNB(alpha=alpha)
m_nb.partial_fit(X, y, classes=[0, 1])
assert_array_almost_equal(m_nb._check_alpha(),
[ALPHA_MIN, 0.5],
decimal=12)
# Test correct dimensions
alpha = np.array([1., 2., 3.])
m_nb = MultinomialNB(alpha=alpha)
expected_msg = re.escape(
'alpha should be a scalar or a numpy array with shape [n_features]'
)
with pytest.raises(ValueError, match=expected_msg):
m_nb.fit(X, y)
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
X, y = load_digits(return_X_y=True)
binary_3v8 = np.logical_or(y == 3, y == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert scores.mean() > 0.86
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert scores.mean() > 0.94
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert scores.mean() > 0.83
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert scores.mean() > 0.92
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert scores.mean() > 0.77
scores = cross_val_score(GaussianNB(var_smoothing=0.1), X, y, cv=10)
assert scores.mean() > 0.89
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert scores.mean() > 0.86
# FIXME: remove in 1.2
@pytest.mark.parametrize("Estimator", DISCRETE_NAIVE_BAYES_CLASSES)
def test_n_features_deprecation(Estimator):
# Check that we raise the proper deprecation warning if accessing
# `n_features_`.
X = np.array([[1, 2], [3, 4]])
y = np.array([1, 0])
est = Estimator().fit(X, y)
with pytest.warns(FutureWarning, match="n_features_ was deprecated"):
est.n_features_
|
glemaitre/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
Python
|
bsd-3-clause
| 35,392
|
[
"Gaussian"
] |
87a61ce688e515e1431cb005839a0f3d350ac2b796f37a418a73976181c6b255
|
import os
import random
import scipy.integrate
import scipy.interpolate
import scipy.signal
import string
import numpy
import astropy.io.fits as pyfits
class SpectrumError( Exception ):
def __init__(self, value, errmsg):
'''
SpectrumError
Raised on execptions within Spectrum objects
Error definitions are as follows:
0 | Failure loading Raw Data!!
1 | Failure loading Processed Data!!
2 | Failure calculating Equivalent Widths!
3 | Failure Convolving Spectrum!
4 | Failure Calculating Difference Spectrum!
'''
self.value = value
self.message = {}
self.message[0] = "Failure loading Raw Data!! %s" % errmsg
self.message[1] = "Failure loading Processed Data!! %s" % errmsg
self.message[2] = "Failure calculating Equivalent Widths! %s" % errmsg
self.message[3] = "Failure Convolving Spectrum! %s" % errmsg
self.message[4] = "Failure Calculating Difference Spectrum! %s" % errmsg
def __str__(self):
return repr(self.message[self.value])
class Spectrum( object ):
def __init__(self, wl=None, I=None, dI=None, Q=None, U=None, V=None,
continuum=None, header=pyfits.Header(), spectrum_type=None,
filename=None, ext=None, preserve=False, label=None):
"""
Spectrum.__init__(wl=None, I=None, Q=None, U=None, V=None,
continuum=None, header=pyfits.Header(), spectrum_type=None,
filename=None, ext=None, preserve=False)
Creates a Spectrum object from arrays
wl = numpy array of wavelength values (should be microns?)
I = numpy array of Stokes I flux values
Q = numpy array of Stokes Q flux values
U = numpy array of Stokes U flux values
V = numpy array of Stokes V flux values
continuum = numpy array of continuum values. Can/should be omitted
if the flux values are normalized.
header = pyfits Header object containing the FITS header to be
saved with the Spectrum when it is saved
spectrum_type = 'RAW', 'INTERPOLATED', 'DISK INTEGRATED',
'CONVOLVED', 'BLENDED', 'MERGED', 'ROTATED', 'DIFFERENCE',
'SCALED', 'MOOG DISK INTEGRATED', 'MOOG EMERGENT'
filename = Filename for saving (or reading)
ext = For FITS files with multiple parts, ext is the extention number
preserve = When True, prepares the fluxes and wavelengths for saving into
a FITS table.
"""
self.wl = wl
self.flux_I = I
self.dflux_I = dI
self.flux_Q = Q
self.flux_U = U
self.flux_V = V
self.continuum = continuum
self.header = header
self.filename = filename
self.ext = ext
self.label = label
if spectrum_type != None:
self.addHistory(spectrum_type=spectrum_type)
if preserve == True:
self.preserve(I=I!=None, dI=dI!=None, Q=Q!=None, U=U!=None, V=V!=None, continuum=continuum!=None)
@classmethod
def from_file(self, header=None, data=None, filename=None, ext=None, label=None):
"""
Spectrum.from_file(header=None, data=None, filename=None, ext=None)
Creates a Spectrum object by reading previously saved data from a file
header = pyfits Header object to be used in place of any existing FITS
header.
data = object containing pyfits datafields (i.e. pyfits.getdata())
filename = full path of file containing data
ext = if multiple spectra are contained in the same file, ext
specifies which extension to grab.
"""
if not(data==None):
self.extractData(data)
else:
wl = None
I = None
dI = None
Q = None
U = None
V = None
continuum=None
if (filename==None) and (ext==None):
errmsg = "Filename or extension not provided!"
raise SpectrumError(0, errmsg)
return self(wl=wl, I=I, dI=dI, Q=Q, U=U, V=V, continuum=continuum, header=header,
filename=filename, ext=ext, label=label)
def addHistory(self, spectrum_type=""):
"""
Spectrum.addHistory(spectrum_type="")
addHistory allows the user to keep track of the provenance of the data
contained inside. If an operation changes the type of data (say by
convolving to a different resolution), this routine stores the
"parent's" SPECTRUM ID in the history. Then, the spectrum is given
a new SPECTRUM_ID by generating a random string of ascii characters.
"""
if 'SPECTRUM_ID' in self.header.iterkeys():
self.header.add_history(self.header.get('SPECTRUM_TYPE')+
' - '+self.header.get('SPECTRUM_ID'))
self.header.set('SPECTRUM_ID', ''.join(random.choice(string.ascii_letters)
for _ in range(10)))
self.header.set('SPECTRUM_TYPE', spectrum_type)
def addLabel(self, label=None):
self.label = label
def extractData(self, data, plainFits=False):
"""
Spectrum.extractData(data)
This routine extracts spectrum data from a FITS binary table.
data = pyfits binary table
Note: the data object must contain a 'Wavelength' field.
"""
if plainFits:
self.wl = data[0]
self.flux_I = data[1]
self.flux_Q = None
self.flux_U = None
self.flux_V = None
self.continuum = None
if len(data) == 3:
self.dflux_I = data[2]
else:
self.dflux_I = None
else:
try:
self.wl = data.field('Wavelength')
except:
raise SpectrumError(0, "Spectrum data must contain WL data")
try:
self.flux_I = data.field('Stokes_I')
except:
self.flux_I = None
try:
self.dflux_I = data.field('dStokes_I')
except:
self.dflux_I = None
try:
self.flux_Q = data.field('Stokes_Q')
except:
self.flux_Q = None
try:
self.flux_U = data.field('Stokes_U')
except:
self.flux_U = None
try:
self.flux_V = data.field('Stokes_V')
except:
self.flux_V = None
try:
self.continuum = data.field('Continuum')
except:
self.continuum = None
def loadData(self, plainFits=False):
"""
Spectrum.loadData()
Loads the data related to a spectrum stored in the self.filename.
Once the data is loaded from the file, the file is closed, and
removed from memory. Then, the data is extracted and loaded into the
Spectrum object
"""
try:
datafile = open(self.filename, 'rb')
data = pyfits.getdata(datafile, ext=self.ext, memmap=False)
datafile.close()
del(datafile)
except:
raise SpectrumError(0, "Error reading extension %d from %s" %
(self.ext, self.filename))
self.extractData(data, plainFits=plainFits)
if plainFits:
self.header = pyfits.getheader(self.filename)
del(data)
def preserve(self, prepareColumns=True, I=True, dI=False, Q=False, U=False, V=True, continuum=True):
"""
Spectrum.preserve(prepareColumns=True, I=True, Q=False, U=False, V=True, continuum=True)
prepares the spectrum for saving in a FITS binary table by
creating the columns object (a pyfits.ColDefs object) from
the Spectrum data.
prepareColumns [Boolean] =
True - self.columns is created
False - Nothing happens
I [Boolean] =
True = a Stokes_I column will be added to the ColDefs object
False = Stokes_I column is left out of the ColDefs object
Q [Boolean] =
True = a Stokes_Q column will be added to the ColDefs object
False = Stokes_Q column is left out of the ColDefs object
U [Boolean] =
True = a Stokes_U column will be added to the ColDefs object
False = Stokes_U column is left out of the ColDefs object
V [Boolean] =
True = a Stokes_V column will be added to the ColDefs object
False = Stokes_V column is left out of the ColDefs object
continuum [Boolean] =
True = a Continuum column will be added to the ColDefs object
False = Continuum column is left out of the ColDefs object
"""
self.wl = numpy.array(self.wl)
if prepareColumns:
coldefs = []
wave = pyfits.Column(name='Wavelength', format='D', array=self.wl)
coldefs.append(wave)
if I:
flux_I = pyfits.Column(name='Stokes_I', format='D',
array=numpy.array(self.flux_I))
coldefs.append(flux_I)
if dI:
dflux_I = pyfits.Column(name='dStokes_I', format='D',
array=numpy.array(self.dflux_I))
coldefs.append(dflux_I)
if Q:
flux_Q = pyfits.Column(name='Stokes_Q', format='D',
array=numpy.array(self.flux_Q))
coldefs.append(flux_Q)
if U:
flux_U = pyfits.Column(name='Stokes_U', format='D',
array=numpy.array(self.flux_U))
coldefs.append(flux_U)
if V:
flux_V = pyfits.Column(name='Stokes_V', format='D',
array=numpy.array(self.flux_V))
coldefs.append(flux_V)
if continuum:
continuum = pyfits.Column(name='Continuum', format='D',
array=numpy.array(self.continuum))
coldefs.append(continuum)
self.columns = pyfits.ColDefs(coldefs)
def savePlainFits(self, I=True, dI=False, Q=False, U=False, V=False, continuum=False, outfileName='Output.fits'):
data = numpy.array([self.wl, self.flux_I])
saved_header = self.header.copy()
for card in self.label.Melody.header.cards[4:]:
saved_header.append(card)
hdu = pyfits.PrimaryHDU(data, header=saved_header)
hdu.writeto(outfileName, clobber=True)
def copy(self):
return Spectrum(wl=self.wl, I=self.flux_I, Q=self.flux_Q, U=self.flux_U,continuum=self.continuum,
V=self.flux_V, header=self.header, spectrum_type='CONVOLVED')
def plot(self, I=True, Q=False, U=False, V=False,
continuum=False, ax=None, **kwargs):
"""
Spectrum.plot(I=True, Q=False, U=False, V=False, continuum=False,
ax=pyplot.axis, **kwargs)
plot allows a simple way to plot the contents of the spectrum.
I [Boolean] = signifying whether or not Stokes_I is plotted
Q [Boolean] = signifying whether or not Stokes_Q is plotted
U [Boolean] = signifying whether or not Stokes_U is plotted
V [Boolean] = signifying whether or not Stokes_V is plotted
continuum [Boolean] = signifying whether or not the continuum is plotted
ax [matplotlib.pyplot.axis object]
**kwargs = arguments to pass to the plot command
"""
plotLabel = self.header.get('EXTNAME')
if I:
if self.dflux_I != None:
ax.errorbar(self.wl, self.flux_I, yerr=self.dflux_I,
label=plotLabel, **kwargs)
else:
ax.plot(self.wl, self.flux_I, label=plotLabel, **kwargs)
if Q:
ax.plot(self.wl, self.flux_Q, label=plotLabel, **kwargs)
if U:
ax.plot(self.wl, self.flux_U, label=plotLabel, **kwargs)
if V:
ax.plot(self.wl, self.flux_V, label=plotLabel, **kwargs)
if continuum:
ax.plot(self.wl, self.continuum, label=plotLabel, **kwargs)
def nyquistSample(self, R=0.0):
nyquistWl = []
deltaWl = min(self.wl)/(2.0*R)
nyquistWl.append(min(self.wl) + deltaWl)
while True:
deltaWl = nyquistWl[-1]/(2.0*R)
if nyquistWl[-1]+deltaWl > self.wl[-1]:
break
nyquistWl.append(nyquistWl[-1]+deltaWl)
nyquistWl = numpy.array(nyquistWl)
self.bin(nyquistWl)
def resample(self, R=0.0, nyquist=False, observedWl=None, pad=None):
"""
Spectrum.resample(R=0.0, nyquist=False, observedWl=None, pad=None)
resample convolves the spectrum to a resolution R, and optionally
nyquist samples it, or re-bins it to a different wavelength range,
padding the beginning and end of the wavelength range.
R [float] = Desired resolving power (Lambda/dLambda)
nyquist [Boolean] = If true, returns a nyquist-sampled spectrum
observedWl [numpy.array] wavelength points to which the spectrum
will be rebinned/interpolated
pad [None/float] If the spectrum is to be rebinned, pad contains
the value to be stored in the flux points which are not spanned
by a complete bin.
"""
subsample = 16.0
newWl = [self.wl[0]]
while True:
stepsize = newWl[-1]/(R*subsample)
if newWl[-1]+stepsize > self.wl[-1]:
break
newWl.append(newWl[-1]+stepsize)
if self.flux_I != None:
I = scipy.interpolate.interpolate.interp1d(self.wl,
self.flux_I, bounds_error=False, fill_value=1.0)
newI = I(newWl)
if self.flux_V != None:
V = scipy.interpolate.interpolate.interp1d(self.wl,
self.flux_V, bounds_error=False, fill_value=1.0)
newV = V(newWl)
if self.flux_Q != None:
Q = scipy.interpolate.interpolate.interp1d(self.wl,
self.flux_Q, bounds_error=False, fill_value=1.0)
newQ = Q(newWl)
if self.flux_U != None:
U = scipy.interpolate.interpolate.interp1d(self.wl,
self.flux_U, bounds_error=False, fill_value=1.0)
newU = U(newWl)
if self.continuum != None:
continuum = scipy.interpolate.interpolate.interp1d(self.wl,
self.continuum, bounds_error=False, fill_value=1.0)
newContinuum = continuum(newWl)
const = numpy.ones(len(newWl))
xk = numpy.array(range(int(4.0*subsample)))
yk = numpy.exp(-(xk-(2.0*subsample))**2.0/(subsample**2.0/(4.0*numpy.log(2.0))))
newWl = numpy.array(newWl[int(len(xk)/2.0):-int(len(xk)/2.0)])
normal = scipy.signal.convolve(const, yk, mode = 'same')
if self.flux_I != None:
result_I = scipy.signal.convolve(newI, yk, mode ='same')/normal
flux_I = numpy.array(result_I[int(len(xk)/2.0):-int(len(xk)/2.0)])
else:
flux_I = None
if self.flux_V != None:
result_V = scipy.signal.convolve(newV, yk, mode ='same')/normal
flux_V = numpy.array(result_V[int(len(xk)/2.0):-int(len(xk)/2.0)])
else:
flux_V = None
if self.flux_Q != None:
result_Q = scipy.signal.convolve(newQ, yk, mode ='same')/normal
flux_Q = numpy.array(result_Q[int(len(xk)/2.0):-int(len(xk)/2.0)])
else:
flux_Q = None
if self.flux_U != None:
result_U = scipy.signal.convolve(newU, yk, mode ='same')/normal
flux_U = numpy.array(result_U[int(len(xk)/2.0):-int(len(xk)/2.0)])
else:
flux_U = None
if self.continuum != None:
result_Cont = scipy.signal.convolve(newContinuum, yk, mode ='same')/normal
continuum = numpy.array(result_Cont[int(len(xk)/2.0):-int(len(xk)/2.0)])
else:
continuum = None
header = self.header.copy()
header.set('RESOLVING_POWER', R)
processed = Spectrum(wl=newWl, I=flux_I, Q=flux_Q, U=flux_U, continuum=continuum,
V=flux_V, header=header, spectrum_type='CONVOLVED')
if nyquist:
nyquistWl = []
deltaWl = min(self.wl)/(2.0*R)
nyquistWl.append(min(self.wl) + deltaWl)
while True:
deltaWl = nyquistWl[-1]/(2.0*R)
if nyquistWl[-1]+deltaWl > self.wl[-1]:
break
nyquistWl.append(nyquistWl[-1]+deltaWl)
nyquistWl = numpy.array(nyquistWl)
processed.bin(nyquistWl)
if observedWl == None:
return processed
else:
processed.bin(observedWl, pad=pad)
return processed
def rv(self, rv=0.0):
"""
Spectrum.rv(rv=0.0)
This routine simulates the effect of a radial velocity on the target spectrum
rv = radial velocity in km/s
"""
beta = rv/299792.0
self.wl = self.wl * (1 + beta)/(1.0 - beta**2.0)**(-0.5)
def bin(self, newWl, pad=None, subsample=3.0):
"""
Spectrum.bin(newWl=[], pad=None)
This routine simulates the binning of a synthetic spectra due to
the discrete nature of detector pixels.
newWl [numpy.array] the new wavelengths to which the spectrum
should be binned.
pad [None/float] pad contains the value to be stored in the flux points
which are not spanned by a complete bin.
"""
if subsample == 0:
if not(self.flux_I is None):
I = scipy.interpolate.splrep(self.wl, self.flux_I)
newSpec_I = scipy.interpolate.splev(newWl, I, ext=1)
else:
newSpec_I = None
if not(self.flux_Q is None):
Q = scipy.interpolate.splrep(self.wl, self.flux_Q)
newSpec_Q = scipy.interpolate.splev(newWl, Q, ext=1)
else:
newSpec_Q = None
if not(self.flux_U is None):
U = scipy.interpolate.splrep(self.wl, self.flux_U)
newSpec_U = scipy.interpolate.splev(newWl, U, ext=1)
else:
newSpec_U = None
if not(self.flux_V is None):
V = scipy.interpolate.splrep(self.wl, self.flux_V)
newSpec_V = scipy.interpolate.splev(newWl, V, ext=1)
else:
newSpec_V = None
if not(self.continuum is None):
continuum = scipy.interpolate.splrep(self.wl, self.continuum)
newSpec_continuum = scipy.interpolate.splev(newWl, continuum, ext=1)
else:
newSpec_continuum = None
self.wl = newWl
if not(self.flux_I is None):
self.flux_I = numpy.array(newSpec_I)
if not(self.flux_Q is None):
self.flux_Q = numpy.array(newSpec_Q)
if not(self.flux_U is None):
self.flux_U = numpy.array(newSpec_U)
if not(self.flux_V is None):
self.flux_V = numpy.array(newSpec_V)
if not(self.continuum is None):
self.continuum = numpy.array(newSpec_continuum)
else:
factor = subsample
deltaWl = numpy.median(numpy.diff(newWl))/factor
if pad is None:
#interpWl = numpy.arange(self.wl[0], self.wl[-1], deltaWl)
npts = int((self.wl[-1]-self.wl[0])/deltaWl)
interpWl = numpy.linspace(self.wl[0], self.wl[-1], num=npts)
else:
#interpWl = numpy.arange(newWl[0], newWl[-1], deltaWl)
npts = int((newWl[-1]-newWl[0])/deltaWl)
interpWl = numpy.linspace(newWl[0], newWl[-1], num=npts)
newWave = []
if not(self.flux_I is None):
I = scipy.interpolate.splrep(self.wl, self.flux_I)
I_interp = scipy.interpolate.splev(interpWl, I, ext=1)
if not(pad is None):
I_interp[I_interp==0]=1.0
newSpec_I = []
if not(self.flux_Q is None):
Q = scipy.interpolate.splrep(self.wl, self.flux_Q)
Q_interp = scipy.interpolate.splev(interpWl, Q, ext=1)
newSpec_Q = numpy.zeros(len(newWl))
if not(self.flux_U is None):
U = scipy.interpolate.splrep(self.wl, self.flux_U)
U_interp = scipy.interpolate.splev(interpWl, U, ext=1)
newSpec_U = numpy.zeros(len(newWl))
if not(self.flux_V is None):
V = scipy.interpolate.splrep(self.wl, self.flux_V)
V_interp = scipy.interpolate.splev(interpWl, V, ext=1)
if not(pad is None):
V_interp[V_interp==0]=0.0
newSpec_V = []
if not(self.continuum is None):
continuum = scipy.interpolate.splrep(self.wl, self.continuum)
cont_interp = scipy.interpolate.splev(interpWl, continuum, ext=1)
newSpec_continuum = numpy.zeros(len(newWl))
for i in range(len(newWl)):
if i==0:
lowerBound = newWl[0]-deltaWl*(factor/2.0)
else:
lowerBound = (newWl[i-1]+newWl[i])/2.0
if i==len(newWl)-1:
upperBound = newWl[-1]+deltaWl*(factor/2.0)
else:
upperBound = (newWl[i]+newWl[i+1])/2.0
inBin = scipy.where( (interpWl > lowerBound) & (
interpWl <= upperBound))[0]
if (len(inBin) > 1):
newWave.append(newWl[i])
denom = interpWl[inBin][-1] - interpWl[inBin][0]
if not(self.flux_I is None):
num=scipy.integrate.simps(I_interp[inBin],
x=interpWl[inBin])
newSpec_I.append(num/denom)
if not(self.flux_Q is None):
num=scipy.integrate.simps(Q_interp[inBin],
x=interpWl[inBin])
newSpec_Q[i] = num/denom
if not(self.flux_U is None):
num=scipy.integrate.simps(U_interp[inBin],
x=interpWl[inBin])
newSpec_U[i] = num/denom
if not(self.flux_V is None):
num=scipy.integrate.simps(V_interp[inBin],
x=interpWl[inBin])
newSpec_V.append(num/denom)
if not(self.continuum is None):
num=scipy.integrate.simps(cont_interp[inBin],
x=interpWl[inBin])
newSpec_continuum[i] = num/denom
elif (len(inBin) == 1):
newWave.append(newWl[i])
if not(self.flux_I is None):
newSpec_I.append(I_interp[inBin][0])
if not(self.flux_Q is None):
newSpec_Q.append(Q_interp[inBin][0])
if not(self.flux_U is None):
newSpec_U.append(U_interp[inBin][0])
if not(self.flux_V is None):
newSpec_V.append(V_interp[inBin][0])
if not(self.continuum is None):
newSpec_continuum.append(cont_interp[inBin][0])
else:
newWave.append(newWl[i])
if not(self.flux_I is None):
newSpec_I.append(scipy.interpolate.splev(newWl[i], I, ext=1))
if not(self.flux_Q is None):
newSpec_Q.append(scipy.interpolate.splev(newWl[i], Q, ext=1))
if not(self.flux_U is None):
newSpec_U.append(scipy.interpolate.splev(newWl[i], U, ext=1))
if not(self.flux_V is None):
newSpec_V.append(scipy.interpolate.splev(newWl[i], V, ext=1))
if not(self.continuum is None):
newSpec_continuum.append(scipy.interpolate.splev(newWl[i], continuum, ext=1))
#print "ERROR!!! newWave is not appended!"
#raw_input()
self.wl = numpy.array(newWave)
if not(self.flux_I is None):
self.flux_I = numpy.array(newSpec_I)
if not(self.flux_Q is None):
self.flux_Q = numpy.array(newSpec_Q)
if not(self.flux_U is None):
self.flux_U = numpy.array(newSpec_U)
if not(self.flux_V is None):
self.flux_V = numpy.array(newSpec_V)
if not(self.continuum is None):
self.continuum = numpy.array(newSpec_continuum)
def rotate(self, angle=0.0, wlPoint = None):
"""
Spectrum.rotate(angle=0.0, wlPoint = None)
This routine rotates the spectra, to mimic errors in the continuum
determination.
angle [rad] = arctan(rise/run).
Units are continuum/angstrom
"""
I = None
Q = None
U = None
V = None
continuum = None
if wlPoint == None:
wlPoint = (self.wl[0]+self.wl[-1])/2.0
offset = numpy.tan(angle)*(self.wl-wlPoint)
if (self.flux_I != None):
I = self.flux_I + offset
if (self.flux_Q != None):
Q = self.flux_Q + offset
if (self.flux_U != None):
U = self.flux_U + offset
if (self.flux_V != None):
V = self.flux_V + offset
if (self.continuum != None):
continuum = self.continuum + offset
return Spectrum(wl=self.wl, I=I, Q=Q, U=U, V=V,
continuum=continuum, header=self.header,
spectrum_type="ROTATED")
def __sub__(self, other):
'''
Spectrum.__sub__(other)
__sub__ overloads the subtraction operator. It subtracts one spectrum
from the other
subtracted = Spectrum - other
'''
overlap_start = numpy.max([numpy.min(self.wl), numpy.min(other.wl)])
overlap_stop = numpy.min([numpy.max(self.wl), numpy.max(other.wl)])
overlap = scipy.where((self.wl >= overlap_start) & (self.wl <= overlap_stop))
if (self.flux_I != None) & (other.flux_I != None):
I = scipy.interpolate.splrep(other.wl, other.flux_I)
retval_I = numpy.zeros(len(self.wl))
retval_I[overlap] = self.flux_I[overlap] - scipy.interpolate.splev(self.wl[overlap], I)
else:
retval_I = None
if (self.flux_Q != None) & (other.flux_Q != None):
Q = scipy.interpolate.splrep(other.wl, other.flux_Q)
retval_Q = numpy.zeros(len(self.wl))
retval_Q[overlap] = self.flux_Q[overlap] - scipy.interpolate.splev(self.wl[overlap], Q)
else:
retval_Q = None
if (self.flux_U != None) & (other.flux_U != None):
U = scipy.interpolate.splrep(other.wl, other.flux_U)
retval_U = numpy.zeros(len(self.wl))
retval_U[overlap] = self.flux_U[overlap] - scipy.interpolate.splev(self.wl[overlap], U)
else:
retval_U = None
if (self.flux_V != None) & (other.flux_V != None):
V = scipy.interpolate.splrep(other.wl, other.flux_V)
retval_V = numpy.zeros(len(self.wl))
retval_V[overlap] = self.flux_V[overlap] - scipy.interpolate.splev(self.wl[overlap], V)
else:
retval_V = None
if (self.continuum != None) & (other.continuum != None):
continuum = scipy.interpolate.splrep(other.wl, other.continuum)
retval_continuum = numpy.zeros(len(self.wl))
retval_continuum[overlap] = self.continuum[overlap] - scipy.interpolate.splev(self.wl[overlap], continuum)
else:
retval_continuum = None
return Spectrum(wl=self.wl, I=retval_I, Q=retval_Q, U=retval_U, V=retval_V,
continuum=retval_continuum, header=self.header,
spectrum_type="DIFFERENCE")
def __add__(self, other):
'''
Spectrum.__plus__(other)
__sub__ overloads the addition operator. It adds one spectrum
to the other
added = Spectrum + other
'''
overlap_start = numpy.max([numpy.min(self.wl), numpy.min(other.wl)])
overlap_stop = numpy.min([numpy.max(self.wl), numpy.max(other.wl)])
overlap = scipy.where((self.wl >= overlap_start) & (self.wl <= overlap_stop))
if (self.flux_I != None) & (other.flux_I != None):
I = scipy.interpolate.splrep(other.wl, other.flux_I)
retval_I = numpy.zeros(len(self.wl))
retval_I[overlap] = self.flux_I[overlap] + scipy.interpolate.splev(self.wl[overlap], I)
else:
retval_I = None
if (self.flux_Q != None) & (other.flux_Q != None):
Q = scipy.interpolate.splrep(other.wl, other.flux_Q)
retval_Q = numpy.zeros(len(self.wl))
retval_Q[overlap] = self.flux_Q[overlap] + scipy.interpolate.splev(self.wl[overlap], Q)
else:
retval_Q = None
if (self.flux_U != None) & (other.flux_U != None):
U = scipy.interpolate.splrep(other.wl, other.flux_U)
retval_U = numpy.zeros(len(self.wl))
retval_U[overlap] = self.flux_U[overlap] + scipy.interpolate.splev(self.wl[overlap], U)
else:
retval_U = None
if (self.flux_V != None) & (other.flux_V != None):
V = scipy.interpolate.splrep(other.wl, other.flux_V)
retval_V = numpy.zeros(len(self.wl))
retval_V[overlap] = self.flux_V[overlap] + scipy.interpolate.splev(self.wl[overlap], V)
else:
retval_V = None
if (self.continuum != None) & (other.continuum != None):
continuum = scipy.interpolate.splrep(other.wl, other.continuum)
retval_continuum = numpy.zeros(len(self.wl))
retval_continuum[overlap] = self.continuum[overlap] + scipy.interpolate.splev(self.wl[overlap], continuum)
else:
retval_continuum = None
return Spectrum(wl=self.wl, I=retval_I, Q=retval_Q, U=retval_U, V=retval_V,
continuum=retval_continuum, header=self.header,
spectrum_type="DIFFERENCE")
def __mul__(self, factor):
if (self.flux_I != None):
I = self.flux_I*factor
else:
I = None
if (self.flux_Q != None):
Q = self.flux_Q*factor
else:
Q = None
if (self.flux_U != None):
U = self.flux_U*factor
else:
U = None
if (self.flux_V != None):
V = self.flux_V*factor
else:
V = None
if (self.continuum != None):
continuum = self.continuum*factor
else:
continuum = None
return Spectrum(wl=self.wl, I=I, Q=Q, U=U, V=V, continuum=continuum,
header=self.header, spectrum_type="SCALED")
def __div__(self, factor):
"""
Spectrum.__div__(factor)
__div__ overloads the division operator. If factor is a scalar, div
returns a spectrum object divided by the scalar factor. If
factor is instead another Spectrum object, div returns a
Spectrum object of one spectrum divided by the other.
divided = Spectrum/factor
"""
if isinstance(factor, float):
I = None
Q = None
U = None
V = None
continuum = None
if (self.flux_I != None):
I = self.flux_I/factor
if (self.flux_Q != None):
Q = self.flux_Q/factor
if (self.flux_U != None):
U = self.flux_U/factor
if (self.flux_V != None):
V = self.flux_V/factor
if (self.continuum != None):
continuum = self.continuum/factor
return Spectrum(wl=self.wl, I=I, Q=Q, U=U, V=V, continuum=continuum,
header=self.header, spectrum_type="SCALED")
elif isinstance(factor, Spectrum):
overlap_start = numpy.max([numpy.min(self.wl), numpy.min(factor.wl)])
overlap_stop = numpy.min([numpy.max(self.wl), numpy.max(factor.wl)])
overlap = scipy.where((self.wl >= overlap_start) & (self.wl <= overlap_stop))
if (self.flux_I != None) & (factor.flux_I != None):
I = scipy.interpolate.splrep(factor.wl, factor.flux_I)
retval_I = numpy.zeros(len(self.wl))
retval_I[overlap] = self.flux_I[overlap]/scipy.interpolate.splev(self.wl[overlap],I)
else:
retval_I = None
if (self.flux_Q != None) & (factor.flux_Q != None):
Q = scipy.interpolate.splrep(factor.wl, factor.flux_Q)
retval_Q = numpy.zeros(len(self.wl))
retval_Q[overlap] = self.flux_Q[overlap]/scipy.interpolate.splev(self.wl[overlap],Q)
else:
retval_Q = None
if (self.flux_U != None) & (factor.flux_U != None):
U = scipy.interpolate.splrep(factor.wl, factor.flux_U)
retval_U = numpy.zeros(len(self.wl))
retval_U[overlap] = self.flux_U[overlap]/scipy.interpolate.splev(self.wl[overlap],U)
else:
retval_U = None
if (self.flux_V != None) & (factor.flux_V != None):
V = scipy.interpolate.splrep(factor.wl, factor.flux_V)
retval_V = numpy.zeros(len(self.wl))
retval_V[overlap] = self.flux_V[overlap]/scipy.interpolate.splev(self.wl[overlap],V)
else:
retval_V = None
if (self.continuum != None) & (factor.continuum != None):
continuum = scipy.interpolate.splrep(factor.wl, factor.continuum)
retval_continuum = numpy.zeros(len(self.wl))
retval_continuum[overlap] = self.continuum[overlap]/scipy.interpolate.splev(self.wl[overlap],continuum)
else:
retval_continuum = None
return Spectrum(wl=self.wl, I=retval_I, Q=retval_Q, U=retval_U, V=retval_V,
continuum=retval_continuum, header=self.header,
spectrum_type="DIVIDED")
def diff_spectra(self, other, pad=False):
'''
Spectrum.diff_spectrum(other, pad=False)
diff_spectrum computes the difference between this spectrum and another
other = Spectrum object containing the comparison spectrum
pad = Boolean determining whether or not the difference spectrum should
be zero-padded
'''
if self.R != other.R:
errmsg = "The resolutions of the two spectra are not compatible!"
raise SpectrumError(4, errmsg)
overlap_start = numpy.max([numpy.min(self.wl), numpy.min(other.wl)])
overlap_stop = numpy.min([numpy.max(self.wl), numpy.max(other.wl)])
overlap = scipy.where((self.wl >= overlap_start) & (self.wl <= overlap_stop))
"""
if (self.flux_I != None) & (other.flux_I != None):
I = scipy.interpolate.splrep(other.wl, other.flux_I)
if (self.flux_Q != None) & (other.flux_Q != None):
Q = scipy.interpolate.splrep(other.wl, other.flux_Q)
if (self.flux_U != None) & (other.flux_U != None):
U = scipy.interpolate.splrep(other.wl, other.flux_U)
if (self.flux_V != None) & (other.flux_V != None):
V = scipy.interpolate.splrep(other.wl, other.flux_V)
if (self.continuum != None) & (other.continuum != None):
continuum = scipy.interpolate.splrep(other.wl, other.continuum)
"""
if pad:
if (self.flux_I != None) & (other.flux_I != None):
I = scipy.interpolate.splrep(other.wl, other.flux_I)
retval_I = numpy.zeros(len(self.wl))
retval_I[overlap] = self.flux_I[overlap] - scipy.interpolate.splev(self.wl[overlap],I)
else:
retval_I = None
if (self.flux_Q != None) & (other.flux_Q != None):
Q = scipy.interpolate.splrep(other.wl, other.flux_Q)
retval_Q = numpy.zeros(len(self.wl))
retval_Q[overlap] = self.flux_Q[overlap] - scipy.interpolate.splev(self.wl[overlap],Q)
else:
retval_Q = None
if (self.flux_U != None) & (other.flux_U != None):
U = scipy.interpolate.splrep(other.wl, other.flux_U)
retval_U = numpy.zeros(len(self.wl))
retval_U[overlap] = self.flux_U[overlap] - scipy.interpolate.splev(self.wl[overlap],U)
else:
retval_U = None
if (self.flux_V != None) & (other.flux_V != None):
V = scipy.interpolate.splrep(other.wl, other.flux_V)
retval_V = numpy.zeros(len(self.wl))
retval_V[overlap] = self.flux_V[overlap] - scipy.interpolate.splev(self.wl[overlap],V)
else:
retval_V = None
if (self.continuum != None) & (other.continuum != None):
continuum = scipy.interpolate.splrep(other.wl, other.continuum)
retval_continuum = numpy.zeros(len(self.wl))
retval_continuum[overlap] = self.continuum[overlap] - scipy.interpolate.splev(self.wl[overlap],continuum)
else:
retval_continuum = None
return Spectrum(wl=self.wl, I=retval_I, Q=retval_Q, U=retval_U,
V=retval_V, continuum=retval_continuum, header=self.header,
spectrum_type='DIFFERENCE')
else:
if (self.flux_I != None) & (other.flux_I != None):
I = scipy.interpolate.splrep(other.wl, other.flux_I)
retval_I = scipy.interpolate.splev(self.wl[overlap], I)
else:
retval_I = None
if (self.flux_Q != None) & (other.flux_Q != None):
Q = scipy.interpolate.splrep(other.wl, other.flux_Q)
retval_Q = scipy.interpolate.splev(self.wl[overlap], Q)
else:
retval_Q = None
if (self.flux_U != None) & (other.flux_U != None):
U = scipy.interpolate.splrep(other.wl, other.flux_U)
retval_U = scipy.interpolate.splev(self.wl[overlap], U)
else:
retval_U = None
if (self.flux_V != None) & (other.flux_V != None):
V = scipy.interpolate.splrep(other.wl, other.flux_V)
retval_V = scipy.interpolate.splev(self.wl[overlap], V)
else:
retval_V = None
if (self.continuum != None) & (other.continuum != None):
continuum = scipy.interpolate.splrep(other.wl, other.continuum)
retval_continuum = scipy.interpolate.splev(self.wl[overlap], continuum)
else:
retval_continuum = None
return Spectrum(wl=self.wl[overlap], I=retval_I, Q=retval_Q, U=retval_U, V=retval_V, continuum=retval_continuum, header=self.header,
spectrum_type="DIFFERENCE")
def blend(self, other, fraction, wlRange=None):
"""
blended = Spectrum.blend(other, fraction)
Spectrum.blend returns a linear blend of the current spectrum with the other
Spectrum object weighted by the scalar fraction
other : [Spectrum] - the other spectrum
fraction : [float] - the ratio of blending, obeying the limits:
0 - all other spectrum
0.5 - equal blend
1 - all this spectrum
The function returns a Spectrum object containing the blended spectrum
"""
if wlRange == None:
wlRange = [0.0, numpy.inf]
overlap_start = numpy.max([numpy.min(self.wl), numpy.min(other.wl), wlRange[0]])
overlap_stop = numpy.min([numpy.max(self.wl), numpy.max(other.wl), wlRange[1]])
overlap = scipy.where((self.wl >= overlap_start) & (self.wl <= overlap_stop))
newWl = self.wl[overlap]
newI = None
newQ = None
newU = None
newV = None
newCont = None
if (self.flux_I != None) & (other.flux_I != None):
I = scipy.interpolate.splrep(other.wl, other.flux_I)
newI = self.flux_I[overlap]*fraction + scipy.interpolate.splev(newWl, I)*(1.0-fraction)
if (self.flux_Q != None) & (other.flux_Q != None):
Q = scipy.interpolate.splrep(other.wl, other.flux_Q)
newQ = self.flux_Q[overlap]*fraction + scipy.interpolate.splev(newWl, Q)*(1.0-fraction)
if (self.flux_U != None) & (other.flux_U != None):
U = scipy.interpolate.splrep(other.wl, other.flux_U)
newU = self.flux_U[overlap]*fraction + scipy.interpolate.splev(newWl, U)*(1.0-fraction)
if (self.flux_V != None) & (other.flux_V != None):
V = scipy.interpolate.splrep(other.wl, other.flux_V)
newV = self.flux_V[overlap]*fraction + scipy.interpolate.splev(newWl, V)*(1.0-fraction)
if (self.continuum != None) & (other.continuum != None):
continuum = scipy.interpolate.splrep(other.wl, other.flux_V)
newCont = self.continuum[overlap]*fraction + scipy.interpolate.splev(newWl, continuum)*(1.0-fraction)
return Spectrum(wl=newWl, I=newI, Q=newQ, U=newU, V=newV, continuum=newCont, header=self.header,
spectrum_type="BLENDED")
def trim(self, wlStart, wlStop):
if (wlStart > self.wl[-1]) or (wlStop < self.wl[0]) or (wlStart < self.wl[0]) or (wlStop > self.wl[-1]):
raise SpectrumError(2, 'Requested region falls outside wavelength bounds!')
bm = scipy.where( (self.wl > wlStart) & (self.wl < wlStop))[0]
self.wl = self.wl[bm]
if self.flux_I != None:
self.flux_I = self.flux_I[bm]
if self.flux_Q != None:
self.flux_Q = self.flux_Q[bm]
if self.flux_U != None:
self.flux_U = self.flux_U[bm]
if self.flux_V != None:
self.flux_V = self.flux_V[bm]
if self.continuum != None:
self.continuum = self.continuum[bm]
def calc_EW(self, wlStart, wlStop, findContinuum=False):
"""
EW = Spectrum.calc_EW(wlStart, wlStop, findContinuum=False)
calc_EW calculates the equivalent width of the Spectrum object between the
given start and stop wavelengths.
wlStart [float] = start of the EW interval. Must be same units as the
Spectrum.wl array
wlStop [float] = stop of the EW interval. Must be same units as the
Spectrum.wl array
findContinuum [Boolean] = Whether or not to attempt to automatically find
the continuum. Should probably only be used if Spectrum.flux_I is
not normalized
"""
if (wlStart > self.wl[-1]) or (wlStop < self.wl[0]):
raise SpectrumError(2, 'Wavelength Regions do not overlap!')
bm = scipy.where( (self.wl > wlStart) & (self.wl < wlStop) )[0]
cont = numpy.ones(len(bm))
if findContinuum:
cont *= numpy.median(self.flux_I[bm])
print "%.4f - continuum level" % numpy.median(self.flux_I[bm])
num = scipy.integrate.simps(self.flux_I[bm], self.wl[bm])
denom = scipy.integrate.simps(cont, self.wl[bm])
return (denom-num)
def mergeSpectra(self, second=None):
"""
merged = Spectrum.mergeSpectra(second=None)
Spectrum.mergeSpectra merges the spectrum with another spectrum which
covers a different spectral region.
second [Spectrum] = Spectrum object to be merged
"""
if second == None:
return self
x1 = self.wl
x2 = second.wl
overlap_start = numpy.max([numpy.min(x1), numpy.min(x2)])
overlap_stop = numpy.min([numpy.max(x1), numpy.max(x2)])
overlap = scipy.where((x1 >= overlap_start) & (x1 <= overlap_stop))
if (len(overlap[0]) > 1):
unique1 = scipy.where((x1 < overlap_start) | (x1 > overlap_stop))
unique2 = scipy.where((x2 < overlap_start) | (x2 > overlap_stop))
new_x = numpy.append(x1, x2[unique2])
if (self.flux_I != None) & (second.flux_I != None):
I1 = self.flux_I
I2 = second.flux_I
I1[numpy.isnan(I1)] = 0.0
I2[numpy.isnan(I2)] = 0.0
I = scipy.interpolate.splrep(x2, I2)
Iinterp = scipy.interpolate.splev(x1[overlap], I)
if (self.dflux_I != None) & (second.dflux_I != None):
dI1 = self.dflux_I
dI2 = second.dflux_I
dI1[numpy.isnan(dI1)] = 1000000.0
dI2[numpy.isnan(dI2)] = 1000000.0
dI1[I1==0.0] = 100000000.0
dI2[I2==0.0] = 100000000.0
dI = scipy.interpolate.splrep(x2, dI2)
dIinterp = scipy.interpolate.splev(x1[overlap], dI)
"""
import matplotlib.pyplot as pyplot
fig = pyplot.figure(1)
fig.clear()
ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])
#print dIinterp
print overlap_start, overlap_stop
#raw_input()
#"""
mergedI = (I1[overlap]/dI1[overlap] + Iinterp/dIinterp)/(1.0/dI1[overlap]+1.0/dIinterp)
merged_dI = 1.0/(1.0/dI1[overlap] + 1.0/dIinterp)
new_dI = numpy.append(numpy.append(dI1[unique1], merged_dI), dI2[unique2])
"""
ax1.errorbar(x1[overlap], I1[overlap], yerr=dI1[overlap], color = 'r')
ax1.errorbar(x2, I2, yerr=dI2, color = 'b')
ax1.errorbar(x1[overlap], mergedI, yerr=merged_dI, color = 'g')
fig.show()
raw_input()
#"""
else:
mergedI = (I1[overlap] + Iinterp)/2.0
new_dI = None
new_I = numpy.append(numpy.append(I1[unique1], mergedI), I2[unique2])
else:
new_dI = None
new_I = None
if (self.flux_Q != None) & (second.flux_Q != None):
Q1 = self.flux_Q
Q2 = second.flux_Q
Q1[numpy.isnan(Q1)] = 0.0
Q2[numpy.isnan(Q2)] = 0.0
Q = scipy.interpolate.splrep(x2, Q2)
Qinterp = scipy.interpolate.splev(x1[overlap], Q)
mergedQ = (Q1[overlap] + Qinterp)/2.0
new_Q = numpy.append(numpy.append(Q1[unique1], mergedQ), Q2[unique2])
else:
new_Q = None
if (self.flux_U != None) & (second.flux_U != None):
U1 = self.flux_U
U2 = second.flux_U
U1[numpy.isnan(U1)] = 0.0
U2[numpy.isnan(U2)] = 0.0
U = scipy.interpolate.splrep(x2, U2)
Uinterp = scipy.interpolate.splev(x1[overlap], U)
mergedU = (U1[overlap] + Uinterp)/2.0
new_U = numpy.append(numpy.append(U1[unique1], mergedU), U2[unique2])
else:
new_U = None
if (self.flux_V != None) & (second.flux_V != None):
V1 = self.flux_V
V2 = second.flux_V
V1[numpy.isnan(V1)] = 0.0
V2[numpy.isnan(V2)] = 0.0
V = scipy.interpolate.splrep(x2, V2)
Vinterp = scipy.interpolate.splev(x1[overlap], V)
mergedV = (V1[overlap] + Vinterp)/2.0
new_V = numpy.append(numpy.append(V1[unique1], mergedV), V2[unique2])
else:
new_V = None
if (self.continuum != None) & (second.continuum != None):
C1 = self.continuum
C2 = second.continuum
C1[numpy.isnan(C1)] = 0.0
C2[numpy.isnan(C2)] = 0.0
C = scipy.interpolate.splrep(x2, C2)
Cinterp = scipy.interpolate.splev(x1[overlap], C)
mergedC = (C1[overlap] + Cinterp)/2.0
new_C = numpy.append(numpy.append(C1[unique1], mergedC), C2[unique2])
else:
new_C = None
else:
new_x = numpy.append(x1, x2)
if (self.flux_I != None) & (second.flux_I != None):
I1 = self.flux_I
I2 = second.flux_I
I1[numpy.isnan(I1)] = 0.0
I2[numpy.isnan(I2)] = 0.0
new_I = numpy.append(I1, I2)
if (self.dflux_I != None) & (second.dflux_I != None):
dI1 = self.dflux_I
dI2 = second.dflux_I
dI1[numpy.isnan(dI1)] = 0.0
dI2[numpy.isnan(dI2)] = 0.0
new_dI = numpy.append(dI1, dI2)
else:
new_dI = None
else:
new_I = None
new_dI = None
if (self.flux_Q != None) & (second.flux_Q != None):
Q1 = self.flux_Q
Q2 = second.flux_Q
Q1[numpy.isnan(Q1)] = 0.0
Q2[numpy.isnan(Q2)] = 0.0
new_Q = numpy.append(Q1, Q2)
else:
new_Q = None
if (self.flux_U != None) & (second.flux_U != None):
U1 = self.flux_U
U2 = second.flux_U
U1[numpy.isnan(U1)] = 0.0
U2[numpy.isnan(U2)] = 0.0
new_U = numpy.append(U1, U2)
else:
new_U = None
if (self.flux_V != None) & (second.flux_V != None):
V1 = self.flux_V
V2 = second.flux_V
V1[numpy.isnan(V1)] = 0.0
V2[numpy.isnan(V2)] = 0.0
new_V = numpy.append(V1, V2)
else:
new_V = None
if (self.continuum != None) & (second.continuum != None):
C1 = self.continuum
C2 = second.continuum
C1[numpy.isnan(C1)] = 0.0
C2[numpy.isnan(C2)] = 0.0
new_C = numpy.append(C1, C2)
else:
new_C = None
header = self.header.copy()
header.set("WLSTART", numpy.min([header.get("WLSTART"),
second.header.get("WLSTART")]))
header.set("WLSTOP", numpy.max([header.get("WLSTOP"),
second.header.get("WLSTOP")]))
retval = Spectrum(wl=new_x, I = new_I, dI = new_dI, Q = new_Q, U = new_U, V = new_V,
continuum = new_C, spectrum_type='MERGED', header=header, filename=self.filename)
return retval
class ObservedSpectrum ( object ):
def __init__(self, observed=None):
self.observed = observed # Should be a Spectrum object
def yank(self, **kwargs):
return self.observed
class Integrator( object ):
def __init__(self, parent=None, deltav = 0.1, limb_darkening=None):
"""
Integrator(parent=None, deltav=0.1, limb_darkening=None)
An Integrator object handles the interpolating, integrating, and
convolving of raw data produced by MoogStokes
Input:
parent [Moog960.SyntheticPhrase] = reference to the Integrator's parent
deltav [float] = velocity resolution used for interpolating (km/s)
limb_darkening [numpy.array(float)] = custom limb darkening coefficients for
the disk integration routines. There should be as many coefficients as
there are unique emergent spectral elements.
Contents:
Integrator.parent = reference to Integrator's parent. Necessary to find the
location of the raw/processed data
Integrator.deltav = velocity resolution used for interpolating (km/s)
Integrator.interpolatedData = reference to interpolated data
Integrator.integratedData = reference to integrated data
Integrator.convolvedData = reference to convolved data
Integrator.limb_darkening = custom limb darkening coefficients for disk integration
routines
"""
self.parent = parent
self.deltav = deltav
self.interpolated = parent.interpolatedData # interpolated to uniform wl grid
for interp in self.interpolated:
interp.loadData()
self.integrated = parent.integratedData # vsin i - needs interpolated
for integ in self.integrated:
integ.loadData()
self.convolved = parent.convolvedData # R - needs integrated
for convol in self.convolved:
if convol.wl == None:
convol.loadData()
self.limb_darkening = limb_darkening
class TennisBall( Integrator ):
"""
TennisBall - inherits from Integrator
TennisBall is used to handle data processing from the normal, non-magnetic
Moog. Tennis balls are all one color (I know, I know, except for the seam)
and the scalar version of Moog produces disk-averaged spectra. There is
no need to actually process the output of Moog, so the diskInt, resample
"""
def loadData(self):
"""
TennisBall.loadData()
placeholder function. Since no data processing is necessary,
nothing is done.
"""
return
def diskInt(self):
"""
TennisBall.diskInt()
Copies the first entry in the parent.rawData list to the list of
integrated spectra.
Since the TennisBall object is used with Moog-produced disk-averaged
spectra, no processing is performed.
"""
self.integrated.append(self.parent.rawData[0])
def resample(self, R=0, observedWl=None):
"""
TennisBall.resample(R=0, observedWl=None)
Resamples the first entry in the list of integrated spectra and
saves the result in the list of convolved spectra.
"""
self.convolved.append(self.integrated[0].resample(R=R, observedWl=observedWl))
def yank(self, vsini=0.0, R=0.0, observedWl = None, keySignature="CONVOLVED"):
"""
TennisBall.yank(vsini=0.0, R=0.0, observedWl = None,
keySignature= "RAW", "INTEGRATED", "CONVOLVED")
"""
if keySignature == "CONVOLVED":
return self.convolved[0]
if keySignature == "RAW":
return self.parent.rawData[0]
if keySignature == "INTEGRATED":
return self.integrated[0]
class BeachBall( Integrator ):
"""
A BeachBall object is an extension of the Integrator class and is used to
handle MoogStokes output generated with the MoogStokes parameter diskflag=1
The case of the BeachBall disk integration algorithm divides the stellar surface into
N
"""
def loadData(self):
"""
BeachBall.loadData()
Loads raw data from the parent Moog960.SyntheticPhrase. Then,
interpolates each emergent spectra to the wavelength spacing denoted
by the velocity resolution (BeachBall.deltav), weighting each
slice by a limb darkening coefficient. The interpolated and weighted
emergent fluxes are then stored in the interpolated list
"""
c = 3e5 #km/s
phi = []
mu = []
cell = []
newWl = []
newWl.append(self.parent.rawData[0].wl[0])
while newWl[-1] < self.parent.rawData[0].wl[-1]:
dLambda = newWl[-1]*self.deltav/c
newWl.append(newWl[-1]+dLambda)
newWl = numpy.array(newWl[0:-1])
wave = numpy.mean(newWl)
if ((1.0/(wave/10000.0)) < 2.4):
self.alpha = -0.023 + 0.292/(wave/10000.0)
else:
self.alpha = -0.507 + 0.441/(wave/10000.0)
limb_darkening = []
for raw in self.parent.rawData:
phi.append(raw.header.get('PHI_ANGLE'))
mu.append(raw.header.get('MU'))
limb_darkening.append(1.0-(1.0-mu[-1]**(self.alpha)))
cell.append(raw.header.get('CELL'))
fI = scipy.interpolate.UnivariateSpline(raw.wl,
numpy.array(raw.flux_I)/numpy.array(raw.continuum), s=0)
fV = scipy.interpolate.UnivariateSpline(raw.wl,
numpy.array(raw.flux_V)/numpy.array(raw.continuum), s=0)
self.interpolated.append(Spectrum(wl=newWl, I = fI(newWl)*limb_darkening[-1],
V = fV(newWl)*limb_darkening[-1], continuum = numpy.ones(len(newWl))*limb_darkening[-1],
header = raw.header.copy(), spectrum_type='INTERPOLATED'))
"""
Probably should do something with the labels here...
"""
self.limb_darkening = numpy.array(limb_darkening)
self.phi = numpy.array(phi)
self.mu = numpy.array(mu)
self.cell = numpy.array(cell)
self.ncells = len(self.cell)
def diskInt(self, vsini=0.0):
"""
BeachBall.diskInt(vsini=0.0)
Input:
vsini [float] = rotational velocity * sin (inclination) (in km/s)
Returns:
True: if the requested VSINI disk integrated spectrum
did not already exist and had to be created
False: if the requested VSINI disk integrated spectrum was
already existing, and did NOT have to be created
"""
if (self.parent.rawData[0].wl == None):
for raw in self.parent.rawData:
raw.loadData()
self.loadData()
for integrated in self.integrated:
if integrated.header.get('VSINI') == vsini:
return False
I, V = self.rtint(vsini_in=vsini)
header = self.interpolated[0].header.copy()
header.set('VSINI', vsini)
header.remove('PHI_ANGLE')
header.remove('MU')
header.remove('CELL')
for interp in self.interpolated[1:]:
header.add_history(interp.header.get('SPECTRUM_TYPE')+' - '+interp.header.get('SPECTRUM_ID'))
self.integrated.append(Spectrum(wl=self.interpolated[0].wl, I=I, V=V, header=header,
spectrum_type='DISK INTEGRATED'))
return True
"""
Probably should do something with the labels here
"""
#"""
def findVsini(self, vsini):
for integrated in self.integrated:
if numpy.abs(integrated.header.get('VSINI') - vsini) < 0.01:
return integrated
raise SpectrumError(1, "Integrated Spectrum with vsini=%.2f NOT FOUND!!!" %
(vsini))
#self.diskInt(vsini=vsini)
#return self.integrated[-1]
#"""
def resample(self, vsini=0.0, R=0, observedWl=None):
"""
BeachBall.resample(vsini=0.0, R=0, observedWl=None)
Resample resamples the current spectrum to a desired resolving power
Returns:
retval : List of strings containing types of spectra created
"INTEGRATED" - If disk-integrated spectra with desired VSINI
does not exist, it must be created
"CONVOLVED" - If
This should be done with the Labels.
"""
retval = []
for convol in self.convolved:
if ((numpy.abs(convol.header.get('VSINI') - vsini) < 0.01)
and (numpy.abs(convol.header.get('RESOLVING_POWER') - R) < 0.1)):
return retval
try:
integrated = self.findVsini(vsini)
except SpectrumError:
self.diskInt(vsini=vsini)
retval.append("INTEGRATED")
integrated = self.findVsini(vsini)
if R > 0:
self.convolved.append(integrated.resample(R=R, observedWl=observedWl))
retval.append("CONVOLVED")
return retval
else:
raise SpectrumError(3, "Resolving Power must be greater than 0!")
def yank(self, vsini=0.0, R=0.0, observedWl = None, keySignature="CONVOLVED"):
if keySignature=="INTEGRATED":
return self.findVsini(vsini)
if keySignature=="CONVOLVED":
for convol in self.convolved:
if ((numpy.abs(convol.header.get('VSINI') - vsini) < 0.01) and
(numpy.abs(convol.header.get('RESOLVING_POWER') - R) < 0.1)):
if observedWl!= None:
return convol.resample(R=R, observedWl=observedWl)
else:
return convol
raise SpectrumError(1, "Spectrum with vsini=%.2f and R=%.1f NOT FOUND!!!" %
(vsini, R))
def rtint(self, vsini_in=0.0, vrt_in=0, **kwargs):
"""
This is a python translation of Jeff Valenti's disk integration routine
PURPOSE:
Produces a flux profile by integrating intensity profiles (sampled
at various mu angles) over the visible stellar surface.
Calling Sequence:
flux = rtint(mu, inten, deltav, vsini, vrt)
INPUTS:
MU: list of length nmu cosine of the angle between the outward normal
and the line of sight for each intensity spectrum INTEN
INTEN: list (of length nmu) numpy arrays (each of length npts)
intensity spectra at specified values of MU
DELTAV: (scalar) velocity spacing between adjacent spectrum points in
INTEN (same units as VSINI and VRT)
VSIN (scalar) maximum radial velocity, due to solid-body rotation
VRT (scalar) radial-tangential macroturbulence parameter, i.e.. sqrt(2)
times the standard deviation of a Gaussian distribution of
turbulent velocities. The same distribution function describes
the raidal motions of one component and the tangential motions of
a second component. Each component covers half the stellar surface.
See "Observation and Analysis of Stellar Photospheres" by Gray.
INPUT KEYWORDS:
OSAMP: (scalar) internal oversamping factor for the convolutions. By
default, convolutions are done using the input points (OSAMP=1),
but when OSAMP is set to higher integer values, the input spectra
are first oversamping via cubic spline interpolation.
OUTPUTS:
function value: numpy array of length npts producing the disk-integrated
flux profile.
RESTRICTIONS:
Intensity profiles are weighted by the fraction of the projected stellar
surface they represent, apportioning the area between adjacent MU
points equally. Additional weights (such as those used in a Gauss-
Legendre quadrature) cannot meaningfully be used in this scheme.
About twice as many points are required with this scheme to achieve
the same precision of Gauss-Legendre quadrature.
DELTAV, VSINI, and VRT must all be in the same units (e.q. km/s).
If specified, OSAMP should be a positive integer
AUTHOR'S REQUEST:
If you use this algorithm in work that you publish, please cite...
MODIFICATION HISTORY:
Feb 88 GM Created ANA version
13 Oct 92 JAV Adapted from G. Marcy's ANA routine of same name
03 Nov 93 JAV Switched to annular convolution technique
12 Nov 93 JAV Fixed bug. Intensity components not added when vsini=0
14 Jun 94 JAV Reformatted for "public" release. Heavily commented.
Pass deltav instead of 2.998d5/deltav. Added osamp
keyword. Added rebinning logic and end of routine.
Changed default osamp from 3 to 1.
20 Feb 95 JAV Added mu as an argument to handle arbitrary mu sampling
and remove ambiguity in intensity profile ordering.
Interpret VTURB as sqrt(2)*sigma instead of just sigma
Replaced call_external with call to spl_{init|interp}.
03 Apr 95 JAV Multiply flux by !pi to give observed flux.
24 Oct 95 JAV Force "nmk" padding to be at least 3 pixels
18 Dec 95 JAV Renamed from dkint() to rtint(). No longer make local
copy of intensities. Use radial-tangential instead of
isotropic Gaussian macroturbulence.
26 Jan 99 JAV For NMU=1 and VSINI=0, assume resolved solar surface;
apply R-T macro, but supress vsini broadening.
01 Apr 99 GMH Use annuli weights, rather than assuming equal area.
27 Feb 13 CPD Translated to Python
"""
#make local copies of various input vars, which will be altered below
vsini = float(vsini_in)
vrt = float(vrt_in)
mu = self.mu
if "OSAMP" in kwargs:
os = max(round(kwargs["OSAMP"]), 1)
else:
os = 1
#Convert input MU to proj. radii, R of annuli for star of unit radius
#(which is just sine rather than cosine of the angle between the outward
#normal and the LOS)
rmu = numpy.sqrt(1.0-self.mu**2)
#Sort the proj. radii and corresponding intensity spectra into ascending
#order (i.e. from disk center to limb), which is equivalent to sorting
#MU in decending order
order = numpy.argsort(rmu)
rmu = rmu[order]
nmu = len(self.mu)
if (nmu == 1):
vsini = 0.0
#Calculate the proj. radii for boundaries of disk integration annuli.
#The n+1 boundaries are selected so that r(i+1) exactly bisects the area
#between rmu(i) and rmu(i+1). The innermost boundary, r(0) is set to 0
#(Disk center) and the outermost boundary r(nmu) is set to to 1 (limb).
if ((nmu > 1) | (vsini != 0)):
r = numpy.sqrt(0.5*(rmu[0:-1]**2.0+rmu[1:]**2.0)).tolist()
r.insert(0, 0.0)
r.append(1.0)
r = numpy.array(r)
#Calculate integration weights for each disk integration annulus. The
#weight is given by the relative area of each annulus, normalized such
#that the sum of all weights is unity. Weights for limb darkening are
#included explicitly in intensity profiles, so they aren't needed here.
wt = r[1:]**2.0 - r[0:-1]**2.0
else:
wt = numpy.array([1.0])
#Generate index vectors for input and oversampled points. Note that the
#oversampled indicies are carefully chosen such that every "os" finely
#sampled points fit exactly into one input bin. This makes it simple to
#"integrate" the finely sampled points at the end of the routine.
npts = len(self.interpolated[0].flux_I)
xpix = numpy.arange(npts)
nfine = os*npts
xfine = 0.5/os * 2.0*numpy.arange(nfine)-os+1
#Loop through annuli, constructing and convolving with rotation kernels.
dummy = 0
Ifine = numpy.zeros(nfine)
Vfine = numpy.zeros(nfine)
cfine = numpy.zeros(nfine)
fluxI = numpy.zeros(nfine)
fluxV = numpy.zeros(nfine)
continuum = numpy.zeros(nfine)
for m, spectrum, w, i in zip(mu, self.interpolated, wt, range(nmu)):
I = spectrum.flux_I
V = spectrum.flux_V
c = spectrum.continuum
#use cubic spline routine to make an oversampled version of the
#intensity profile for the current annulus.
if os== 1:
Ifine = I.copy()
Vfine = V.copy()
cfine = c.copy()
else:
Ispl = scipy.interpolate.splrep(xpix, I)
Vspl = scipy.interpolate.splrep(xpix, V)
cspl = scipy.interpolate.splrep(xpix, c)
Ifine = scipy.interpolate.splev(Ispl, xfine)
Vfine = scipy.interpolate.splev(Vspl, xfine)
cfine = scipy.interpolate.splev(cspl, xfine)
# Construct the convolution kernel which describes the distribution of
# rotational velocities present in the current annulus. The distribution
# has been derived analyitically for annuli of arbitrary thickness in a
# rigidly rotating star. The kernel is constructed in two places: one
# piece for radial velocities less than the maximum velocity along the
# inner edge of annulus, and one piece for velocities greater than this
# limit.
if vsini > 0:
r1 = r[i]
r2 = r[i+1]
dv = self.deltav/os
maxv = vsini * r2
nrk = 2*long(maxv/dv) + 3
v = dv * (numpy.arange(nrk) - ((nrk-1)/2.))
rkern = numpy.zeros(nrk)
j1 = scipy.where(abs(v) < vsini*r1)
if len(j1[0]) > 0:
rkern[j1] = (numpy.sqrt((vsini*r2)**2 - v[j1]**2)-
numpy.sqrt((vsini*r1)**2 - v[j1]**2))
j2 = scipy.where((abs(v) >= vsini*r1) & (abs(v) <= vsini*r2))
if len(j2[0]) > 0:
rkern[j2] = numpy.sqrt((vsini*r2)**2 - v[j2]**2)
#print("dv = %.2f" % dv)
#print("vsini = %.2f" % vsini)
#print abs(v)
#print vsini*r1
#print vsini*r2
#print("len(j1) = %d len(j2) = %d" % (len(j1[0]), len(j2[0])))
#print("Rkern = ")
#print rkern
rkern = rkern / rkern.sum() # normalize kernel
# Convolve the intensity profile with the rotational velocity kernel for
# this annulus. Pad end of each profile with as many points as are in
# the convolution kernel, reducing Fourier ringing. The convolution
# may also be done with a routine called "externally" which efficiently
# shifts and adds.
if nrk > 3:
Ifine = scipy.convolve(Ifine, rkern, mode='same')
Vfine = scipy.convolve(Vfine, rkern, mode='same')
cfine = scipy.convolve(cfine, rkern, mode='same')
# Calc projected simga for radial and tangential velocity distributions.
sigma = os*vrt/numpy.sqrt(2.0) /self.deltav
sigr = sigma * m
sigt = sigma * numpy.sqrt(1.0 - m**2.)
# Figure out how many points to use in macroturbulence kernel
nmk = max(min(round(sigma*10), (nfine-3)/2), 3)
# Construct radial macroturbulence kernel w/ sigma of mu*VRT/sqrt(2)
if sigr > 0:
xarg = (numpy.arange(2*nmk+1)-nmk) / sigr # exponential arg
mrkern = numpy.exp(max((-0.5*(xarg**2)),-20.0))
mrkern = mrkern/mrkern.sum()
else:
mrkern = numpy.zeros(2*nmk+1)
mrkern[nmk] = 1.0 #delta function
# Construct tangential kernel w/ sigma of sqrt(1-mu**2)*VRT/sqrt(2.)
if sigt > 0:
xarg = (numpy.arange(2*nmk+1)-nmk) /sigt
mtkern = numpy.exp(max((-0.5*(xarg**2)), -20.0))
mtkern = mtkern/mtkern.sum()
else:
mtkern = numpy.zeros(2*nmk+1)
mtkern[nmk] = 1.0
# Sum the radial and tangential components, weighted by surface area
area_r = 0.5
area_t = 0.5
mkern = area_r*mrkern + area_t*mtkern
# Convolve the total flux profiles, again padding the spectrum on both ends
# to protect against Fourier rinnging.
Ifine = scipy.convolve(Ifine, mkern, mode='same')
Vfine = scipy.convolve(Vfine, mkern, mode='same')
cfine = scipy.convolve(cfine, mkern, mode='same')
# Add contribution from current annulus to the running total
fluxI += w*Ifine
fluxV += w*Vfine
continuum += w*cfine
return fluxI/continuum, fluxV/continuum
def blackBodySpectrum(TemplateSpectrum=None, **kwargs):
""" Returns a blackbody function over the given wavelength """
"""
inputs:
wl : wavelength array
Assumed to be in units of cm, unless specified by wlUnits kwarg
nu : frequency array
T : Blackbody Temperature (K)
wlUnits: Units of wavelengths (Optional)
'Angstroms'
'nanometers'
'microns'
'cm'
'meters'
outUnits: cgs Units to output.
'Fnu'
'Flambda'
'Energy'
outputs:
y : Blackbody function. Unit is assumed to be Flambda or Fnu (depnding on whether
wl or nu was given) unless overridden by outUnits kwarg
"""
h = 6.626e-27
c = 2.998e10
k = 1.38e-16
T = kwargs["T"]
wl = TemplateSpectrum.wl
if "wlUnits" in kwargs:
if kwargs["wlUnits"] == "Angstroms":
wl = wl*1e-8
c1 = 2.0*h*c**2.0
c2 = h*c/(k*T)
Flambda = c1/(wl**5.0*(numpy.exp(c2/wl)-1.0))
if "outUnits" in kwargs:
if kwargs["outUnits"] == "Energy":
return Flambda*wl
else:
return Spectrum(wl=TemplateSpectrum.wl, I=Flambda)
def blackBody(**kwargs):
""" Returns a blackbody function over the given wavelength """
"""
inputs:
wl : wavelength array
Assumed to be in units of cm, unless specified by wlUnits kwarg
nu : frequency array
T : Blackbody Temperature (K)
wlUnits: Units of wavelengths (Optional)
'Angstroms'
'nanometers'
'microns'
'cm'
'meters'
outUnits: cgs Units to output.
'Fnu'
'Flambda'
'Energy'
outputs:
y : Blackbody function. Unit is assumed to be Flambda or Fnu (depnding on whether
wl or nu was given) unless overridden by outUnits kwarg
"""
h = 6.626e-27
c = 2.998e10
k = 1.38e-16
T = kwargs["T"]
if "wl" in kwargs:
wl = kwargs["wl"]
if "wlUnits" in kwargs:
if kwargs["wlUnits"] == "Angstroms":
wl = wl*1e-8
c1 = 2.0*h*c**2.0
c2 = h*c/(k*T)
Flambda = c1/(wl**5.0*(numpy.exp(c2/wl)-1.0))
if "outUnits" in kwargs:
if kwargs["outUnits"] == "Energy":
return Flambda*wl
else:
return Flambda
elif "nu" in kwargs:
nu = kwargs["nu"]
c1 = 2.0*h/(c**2.0)
c2 = h/(k*T)
Fnu = c1*nu**2.0/(numpy.exp(c2*nu) - 1.0)
if "outUnits" in kwargs:
if kwargs["outUnits"] == "Energy":
return Fnu*nu
else:
return Fnu
class photometrySynthesizer( object ):
def __init__(self, **kwargs):
if "filterDir" in kwargs:
self.fdir = kwargs["filterDir"]
else:
self.fdir = '/home/deen/Data/StarFormation/Photometry/FILTER_PROFILES/'
filterNames = ['Uj', 'Bj', 'Vj', 'Rc', 'Ic', '2massj', '2massh', '2massk']
fileNames = ['U_Landolt.dat', 'B_Bessell.dat', 'V_Bessell.dat', 'cousins_Rband.dat',
'cousins_Iband.dat', 'J_2MASS.dat', 'H_2MASS.dat', 'K_2MASS.dat']
fnu_zero = [1829, 4144, 3544, 2950, 2280.0, 1594.0, 1024.0, 666.7 ]
flam_zero = [4.0274905e-09, 6.3170333e-09, 3.6186341e-09, 2.1651655e-9, 1.1326593e-09,
3.129e-10, 1.133e-10, 4.283e-11] #erg/s/cm^2/Angstrom
lambda_eff = [3600, 4362, 5446, 6413, 7978, 12285, 16385, 21521]
mVega = [0.02, 0.02, 0.03, 0.039, 0.035, -0.001, +0.019, -0.017]
self.photBands = []
for band in zip(filterNames, fileNames, fnu_zero, flam_zero, lambda_eff, mVega):
photBand = dict()
photBand['Name'] = band[0]
photBand['file'] = band[1]
photBand['fnu_zero'] = band[2]
photBand['flam_zero'] = band[3]
photBand['lambda_eff'] = band[4]
photBand['mVega'] = band[5]
fx = []
fy = []
dat = open(self.fdir+band[1], 'r').read().split('\n')
for line in dat:
if len(line) > 0:
l = line.split()
fx.append(float(l[0])*1e4)
fy.append(float(l[1]))
fy = numpy.array(fy)
fx = numpy.array(fx)
photBand['min_x'] = min(fx)
photBand['max_x'] = max(fx)
photBand['photSpline'] = scipy.interpolate.splrep(fx, fy)
self.photBands.append(photBand)
def photFlux(self, x, y, filtName):
for band in self.photBands:
if band['Name'] == filtName:
bm = scipy.where( (x > band['min_x'] ) & (x < band['max_x']) )[0]
fnew = scipy.interpolate.splev(x[bm], band['photSpline'])
valid_bm = scipy.where( (fnew > 0.0) & (y[bm] > 0.0) )[0]
numerator = scipy.integrate.simps(y[bm][valid_bm]*fnew[valid_bm], x[bm][valid_bm])
denom = scipy.integrate.simps(fnew[valid_bm], x[bm][valid_bm])
return numerator/denom
|
JNMcLane/MoogPy
|
MoogTools/SpectralTools.py
|
Python
|
mit
| 79,066
|
[
"Gaussian"
] |
1229b3a86120260b26b1a84fa39c95ad6de94aeab27214f9eac86d0b53f767de
|
###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
import unittest
try:
from numpy import matrix
except ImportError:
from numjy import matrix
from test.tools import assert_dict_almost_equal, mneq_
from diffcalc.ub.crystal import CrystalUnderTest
from test.diffcalc import scenarios
class TestCrystalUnderTest(object):
def setup_method(self):
self.tclatt = []
self.tcbmat = []
# From the dif_init.mat next to dif_dos.exe on Vlieg's cd
#self.tclatt.append([4.0004, 4.0004, 2.270000, 90, 90, 90])
#self.tcbmat.append([[1.570639, 0, 0] ,[0.0, 1.570639, 0] ,
# [0.0, 0.0, 2.767923]])
# From b16 on 27June2008 (From Chris Nicklin)
# self.tclatt.append([3.8401, 3.8401, 5.43072, 90, 90, 90])
# self.tcbmat.append([[1.636204, 0, 0],[0, 1.636204, 0],
# [0, 0, 1.156971]])
def testGetBMatrix(self):
# Check the calculated B Matrix
for sess in scenarios.sessions():
if sess.bmatrix is None:
continue
cut = CrystalUnderTest('tc', *sess.lattice)
desired = matrix(sess.bmatrix)
print desired.tolist()
answer = cut.B
print answer.tolist()
note = "Incorrect B matrix calculation for scenario " + sess.name
mneq_(answer, desired, 4, note=note)
def test__str__(self):
cut = CrystalUnderTest("HCl", 1, 2, 3, 4, 5, 6)
print cut.__str__()
|
DiamondLightSource/diffcalc
|
test/diffcalc/ub/test_crystal.py
|
Python
|
gpl-3.0
| 2,172
|
[
"CRYSTAL"
] |
a9dfa683bd6864072b19ad28a7796bd24ca90cb654cc79e97727b21e65d50226
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
It merges the reports from BOWTIE-method with BLAT-method.
Author: Daniel Nicorici, Daniel.Nicorici@gmail.com
Copyright (c) 2009-2021 Daniel Nicorici
This file is part of FusionCatcher.
FusionCatcher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FusionCatcher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with FusionCatcher (see file 'COPYING.txt'). If not, see
<http://www.gnu.org/licenses/>.
By default, FusionCatcher is running BLAT aligner
<http://users.soe.ucsc.edu/~kent/src/> but it offers also the option to disable
all its scripts which make use of BLAT aligner if you choose explicitly to do so.
BLAT's license does not allow to be used for commercial activities. If BLAT
license does not allow to be used in your case then you may still use
FusionCatcher by forcing not use the BLAT aligner by specifying the option
'--skip-blat'. Fore more information regarding BLAT please see its license.
Please, note that FusionCatcher does not require BLAT in order to find
candidate fusion genes!
This file is not running/executing/using BLAT.
"""
import os
import sys
import optparse
def myorder(a,b):
return (a,b) if a <= b else (b,a)
def process_psl(data, psl_filename, tag, threshold = 0, tpairs = 0, anchor2 = 40):
# reading the BLAT report
print "Reading...",psl_filename,anchor2
blat = [line.rstrip('\r\n').split('\t') for line in file(psl_filename,'r') if line.rstrip('\r\n')]
blat_header = dict([(v.lower(),i) for (i,v) in enumerate(blat.pop(0))])
# gene-5end
# gene-5end_symbol
# chromosome_gene-5end
# strand_gene-5end
# end_chromosome_part-1-read-mapped-gene-5end
# gene-3end
# gene-3end_symbol
# chromosome_gene-3end
# strand_gene-3end
# start_chromosome_part-2-of-read-mapped-gene-3end
# counts
# fusion_sequence
# filter
if ((threshold > 0) or (anchor2>0)):
blat = [row for row in blat if ((int(row[blat_header['counts']]) >= threshold) or (int(row[blat_header['longest_anchor']]) >= anchor2))]
# merging => adding to BOWTIE list
print "Processing..."
for line in blat:
symbol_1 = line[blat_header['gene-5end_symbol']]
symbol_2 = line[blat_header['gene-3end_symbol']]
gene_1 = line[blat_header['gene-5end']]
gene_2 = line[blat_header['gene-3end']]
exon_1 = ''
exon_2 = ''
pos_1 = "%s:%s:%s" % (line[blat_header['chromosome_gene-5end']],
line[blat_header['end_chromosome_part-1-read-mapped-gene-5end']],
'+' if line[blat_header['strand_gene-5end']] == '1' else '-')
pos_2 = "%s:%s:%s" % (line[blat_header['chromosome_gene-3end']],
line[blat_header['start_chromosome_part-2-of-read-mapped-gene-3end']],
'+' if line[blat_header['strand_gene-3end']] == '1' else '-')
pairs = candidate_fusions[myorder(gene_1,gene_2)]
reads = line[blat_header['counts']]
longest = line[blat_header['longest_anchor']]
aligner = tag
seq = line[blat_header['fusion_sequence']]
if int(pairs) >= tpairs:
data.append([
symbol_1,
symbol_2,
gene_1,
gene_2,
exon_1,
exon_2,
pos_1,
pos_2,
pairs,
reads,
longest,
aligner,
seq
])
#####################################
#####################################
#####################################
if __name__ == '__main__':
#command line parsing
usage="%prog [options]"
description="""It merges the reports from BOWTIE-method with BLAT-method.
"""
version="%prog 0.10 beta"
parser=optparse.OptionParser(usage=usage,description=description,version=version)
parser.add_option("--input_bowtie",
action="store",
type="string",
dest="input_bowtie_filename",
help="""The report with candidate fusion genes found using the Bowtie.""")
parser.add_option("--input_blat",
action="store",
type="string",
dest="input_blat_filename",
help="""The report with candidate fusion genes found using the BLAT aligner.""")
parser.add_option("--input_star",
action="store",
type="string",
dest="input_star_filename",
help="""The report with candidate fusion genes found using the STAR aligner.""")
parser.add_option("--input_bowtie2",
action="store",
type="string",
dest="input_bowtie2_filename",
help="""The report with candidate fusion genes found using the BOWTIE2 aligner.""")
parser.add_option("--input_bwa",
action="store",
type="string",
dest="input_bwa_filename",
help="""The report with candidate fusion genes found using the BWA aligner.""")
parser.add_option("--input_spotlight",
action="store",
type="string",
dest="input_spotlight_filename",
help="""The report with candidate fusion genes found using the SPOTLIGHT method.""")
parser.add_option("--input_candidate_fusion_genes",
action = "store",
type = "string",
dest = "input_candidate_fusion_genes_filename",
help = """The input list of candidate fusion genes, for example 'candidate_fusion-genes_no-offending-reads_label-no-proteins-paralogs-readthrough-similar-pseudogenes_further.txt'.""")
parser.add_option("--input_ambiguous",
action = "store",
type = "string",
dest = "input_ambiguous_filename",
help = """File containing the pairs of genes and their corresponding number of reads which map ambiguously on each other.""")
parser.add_option("--supporting_reads_blat",
action="store",
type="int",
dest="supporting_reads_blat",
default=2,
help="""The minimum number of supporting reads (found using BLAT aligner) necessary for considering valid a candidate fusion gene. Default is '%default'.""")
parser.add_option("--supporting_pairs_blat",
action="store",
type="int",
dest="supporting_pairs_blat",
default=2,
help="""The minimum number of supporting pairs (found using BLAT aligner) necessary for considering valid a candidate fusion gene. Default is '%default'.""")
parser.add_option("--supporting_reads_star",
action="store",
type="int",
dest="supporting_reads_star",
default=2,
help="""The minimum number of supporting reads (found using STAR aligner) necessary for considering valid a candidate fusion gene. Default is '%default'.""")
parser.add_option("--supporting_pairs_star",
action="store",
type="int",
dest="supporting_pairs_star",
default=2,
help="""The minimum number of supporting pairs (found using STAR aligner) necessary for considering valid a candidate fusion gene. Default is '%default'.""")
parser.add_option("--supporting_reads_bowtie2",
action="store",
type="int",
dest="supporting_reads_bowtie2",
default=2,
help="""The minimum number of supporting reads (found using BOWTIE2 aligner) necessary for considering valid a candidate fusion gene. Default is '%default'.""")
parser.add_option("--supporting_pairs_bowtie2",
action="store",
type="int",
dest="supporting_pairs_bowtie2",
default=2,
help="""The minimum number of supporting pairs (found using BOWTIE2 aligner) necessary for considering valid a candidate fusion gene. Default is '%default'.""")
parser.add_option("--supporting_reads_bwa",
action="store",
type="int",
dest="supporting_reads_bwa",
default=2,
help="""The minimum number of supporting reads (found using BOWTIE2 aligner) necessary for considering valid a candidate fusion gene. Default is '%default'.""")
parser.add_option("--supporting_pairs_bwa",
action="store",
type="int",
dest="supporting_pairs_bwa",
default=2,
help="""The minimum number of supporting pairs (found using BWA aligner) necessary for considering valid a candidate fusion gene. Default is '%default'.""")
parser.add_option("--supporting_reads_spotlight",
action="store",
type="int",
dest="supporting_reads_spotlight",
default=2,
help="""The minimum number of supporting reads (found using SPOTLIGHT method) necessary for considering valid a candidate fusion gene. Default is '%default'.""")
parser.add_option("--supporting_pairs_spotlight",
action="store",
type="int",
dest="supporting_pairs_spotlight",
default=2,
help="""The minimum number of supporting pairs (found using SPOTLIGHT method) necessary for considering valid a candidate fusion gene. Default is '%default'.""")
parser.add_option("--squish-report",
action = "store_true",
dest = "squish",
default = False,
help = """If set then the report is squished (i.e. fusion genes with same junction coordinates are listed once even that they are found by severeal methods). Default is '%default'.""")
parser.add_option("--anchor2",
action = "store",
type = "int",
dest = "anchor2",
default = 40,
help = """For anchors longer (or equal) with this value it is enough to have only one supporting read. Default is '%default'.""")
parser.add_option("--output",
action="store",
type="string",
dest="output_filename",
help="""Merged report of candidate fusion genes.""")
(options, args) = parser.parse_args()
# validate options
if not (options.input_bowtie_filename and
options.output_filename
):
parser.print_help()
sys.exit(1)
print "Reading...",options.input_candidate_fusion_genes_filename
# 0 - Fusion_gene_1
# 1 - Fusion_gene_2
# 2 - Count_paired-end_reads
# 3 - Fusion_gene_symbol_1
# 4 - Fusion_gene_symbol_2
# 5 - Information_fusion_genes
# 6 - Analysis_status -> further or skipped
draft_fusions = [line.rstrip('\r\n').split('\t') for line in file(options.input_candidate_fusion_genes_filename,'r').readlines()]
draft_fusions.pop(0) # remove the header
candidate_fusions = dict([( tuple(myorder(line[0], line[1])), line[2]) for line in draft_fusions if line[6].lower() == 'further_analysis'])
label_fusions = dict([( tuple(myorder(line[0], line[1])), line[5]) for line in draft_fusions if line[6].lower() == 'further_analysis'])
# reading the BOWTIE report
# this is considered the standard
print "Reading...",options.input_bowtie_filename
bowtie = [line.rstrip('\r\n').split('\t') for line in file(options.input_bowtie_filename,'r') if line.rstrip('\r\n')]
bowtie_header = dict([(v.lower(),i) for (i,v) in enumerate(bowtie[0])])
# Fusion_gene_symbol_1(5end_partner) # 0
# Fusion_gene_symbol_2(3end_partner) # 1
# Fusion_gene_1(5end_partner) # 2
# Fusion_gene_2(3end_partner) # 3
# Fusion_exon_1(5end_partner) # 4
# Fusion_exon_2(3end_partner) # 5
# Fusion_gene_1_position(5end_partner) # 6
# Fusion_gene_2_position(3end_partner) # 7
# Spanning_pairs # 8
# Spanning_unique_reads # 9
# Longest_spanning_read # 10
# Aligner(s) # 11
# Fusion_sequence # 12
if options.input_blat_filename:
process_psl(bowtie, options.input_blat_filename, tag="BOWTIE+BLAT", threshold = options.supporting_reads_blat, tpairs = options.supporting_pairs_blat, anchor2=options.anchor2)
if options.input_star_filename:
process_psl(bowtie, options.input_star_filename, tag="BOWTIE+STAR", threshold = options.supporting_reads_star, tpairs = options.supporting_pairs_star, anchor2=options.anchor2)
if options.input_bowtie2_filename:
process_psl(bowtie, options.input_bowtie2_filename, tag="BOWTIE+BOWTIE2", threshold = options.supporting_reads_bowtie2, tpairs = options.supporting_pairs_bowtie2, anchor2=options.anchor2)
if options.input_bwa_filename:
process_psl(bowtie, options.input_bwa_filename, tag="BOWTIE+BWA", threshold = options.supporting_reads_bwa, tpairs = options.supporting_pairs_bwa, anchor2=options.anchor2)
if options.input_spotlight_filename:
process_psl(bowtie, options.input_spotlight_filename, tag="BOWTIE+SPOTLIGHT", threshold = options.supporting_reads_spotlight, tpairs = options.supporting_pairs_spotlight, anchor2=options.anchor2)
# add an extra column to the report with the labels of the fusion genes
new_bowtie = []
for i,line in enumerate(bowtie):
if i == 0:
new_bowtie.append(line + ['Fusion_description'])
else:
gene_1 = line[2]
gene_2 = line[3]
new_bowtie.append(line + [label_fusions[myorder(gene_1,gene_2)]])
bowtie = new_bowtie
# add an extra column to the report with the counts of ambiguous counts of reads
ambiguous = dict()
if options.input_ambiguous_filename and (os.path.isfile(options.input_ambiguous_filename) or os.path.islink(options.input_ambiguous_filename)):
ambiguous = [line.rstrip('\r\n').split('\t') for line in file(options.input_ambiguous_filename,'r').readlines() if line.rstrip('\r\n')]
ambiguous = dict([(myorder(line[0],line[1]),line[2]) for line in ambiguous if label_fusions.has_key(myorder(line[0],line[1])) ])
new_bowtie = []
for i,line in enumerate(bowtie):
if i == 0:
new_bowtie.append(line + ['Counts_of_common_mapping_reads'])
else:
gene_1 = line[2]
gene_2 = line[3]
new_bowtie.append(line + [ambiguous.get(myorder(gene_1,gene_2),'0')])
bowtie = new_bowtie
# here reshuffle the columns so that they look beautiful
# move column Fusion_description from 13 to 3
# move column Counts_of_common_mapping_reads from 14 to 4 (after Fusion_description)
# Spanning_pairs from column 8 to 5
# Spanning_unique_reads from column 9 to 6
# Longest_anchor_found from column 10 to 7
# Fusion_finding_method from column 11 to 8
new_bowtie = zip(*bowtie)
new_bowtie = [ new_bowtie[0],
new_bowtie[1],
new_bowtie[13],
new_bowtie[14],
new_bowtie[8],
new_bowtie[9],
new_bowtie[10],
new_bowtie[11],
new_bowtie[6],
new_bowtie[7],
new_bowtie[2],
new_bowtie[3],
new_bowtie[4],
new_bowtie[5],
new_bowtie[12]
]
new_bowtie = zip(*new_bowtie)
header = new_bowtie.pop(0)
bowtie = sorted(new_bowtie, key = lambda x: (-int(x[4]),x[0],x[1],-int(x[5]),-int(x[6]),x[7]))
bowtie.insert(0,header)
# squish report
if options.squish and bowtie and len(bowtie)>1:
b = bowtie[:]
h = b.pop(0) # header
r = [h]
n = len(b)
f = [True] * n
clean_labels = {
frozenset(['dist200kbp', 'dist100kbp', 'dist10kbp', 'dist1000bp']): 'gap<1K',
frozenset(['dist200kbp', 'dist100kbp', 'dist10kbp']): '1K<gap<10K',
frozenset(['dist200kbp', 'dist100kbp']): '10K<gap<100K',
frozenset(['dist200kbp']): '100K<gap<200K',
}
for i in xrange(n):
if f[i]:
method = set([b[i][7]])
exon1 = ''
exon2 = ''
ids = []
for j in xrange(i+1,n):
if f[j] and b[i][8] == b[j][8] and b[i][9] == b[j][9] and b[i][10] == b[j][10] and b[i][11] == b[j][11]:
method.add(b[j][7])
if b[j][12]:
exon1 = b[j][12]
exon2 = b[j][13]
f[j] = False
line = list(b[i][:])
if method:
line[7] = ';'.join(sorted(method))
if exon1 and (not line[12]):
line[12] = exon1
line[13] = exon2
# convert IGH_LOCUS to IGH@ and IGK_LOCUS to IGK@
if line[0].lower().find('_locus') != -1:
line[0] = line[0].split('_')[0].upper()+'@'
if line[1].lower().find('_locus') != -1:
line[1] = line[1].split('_')[0].upper()+'@'
# clean the short_distance and distance labels
labels = line[2].split(',')
labels = [elk for elk in labels if elk != 'short_distance'] # remove short_distance
distances = frozenset([elk for elk in labels if elk.startswith('dist')])
if distances:
labels = [elk for elk in labels if not elk.startswith('dist')] # remove the distance labels
labels.append(clean_labels.get(distances,''))
line[2] = ','.join(labels)
r.append(line)
bowtie = r
# FINAL REPORT
print "Writing the merged report..."
file(options.output_filename,'w').writelines(['\t'.join(line)+'\n' for line in bowtie])
#
|
ndaniel/fusioncatcher
|
bin/merge_reports.py
|
Python
|
gpl-3.0
| 19,594
|
[
"BWA",
"Bowtie",
"Elk"
] |
1281e355a3f7f26606d6091a63a110cce3b1ab68ccfd7f04d3d455277a79d076
|
#!/usr/bin/env python
__author__ = 'arulalant'
__version__ = 'v2.0.1'
__long_name__ = 'NCUM Parallel Rider by targetting creation of TIGGE Grib2 files'
"""
Inputs: NCUM fieldsfile / pp format files
Outputs: WMO-NCEP Grib2 format files
This script produce output files as multiple 6 hourly forecasts data from
different input files such as pd, pd, pe, etc., So all 6 hourly forecasts data
of different input files will be append to same 6 hourly grib2 outfiles.
But not limited to 6 hours only, it supports 3 hourly and 24 hourly too.
(These conventions are according to NCUM only!)
Parallel:
As for now, we are using multiprocessing to make parallel run on different files
like pb, pd, pe and its creating child porcess with respect to no of forecast
hours. To make more parallel threads on variable, fcstHours level we may need to
use OpenMPI-Py.
Testing team in NCMRWF & their roles:
#1. Mr. Kuldeep Sharma - Main tester for visual integrety vis-a-vis GrADS & subset.ctl tool
#2. Dr. Sumit Kumar - Main tester for visual integrety vis-a-vis GrADS & subset.ctl tool
#3. Dr. C.J. Johny - VSDB Input product tester
#4. Mr. M.Momin Imran Ali - Hycom Input product tester
#4. Mr. Abhishek Lodh - Soil Moisture tester
#2. Dr. Raghavendra Ashrit - Testing for RIMES with WRF-Noah and overall integrity testing
#3. Dr. Jayakumar A. - Comparison with the CAWCR convertor and specifictions needs
#4. Dr. Saji Mohandad, TIFF Lead - Control test (GrADS & subset.tcl) & Future Functional Description
#5. Mr. Gopal Raman Iyengar - Overseer
Acknowledgments:
#1. Mr. Raghavendra S. Mupparthy - Integrator, TIAV Lead for the
initial serial version for grib2 conversion of NCUM pp/ff file.
#2. Dr. Rakhi R, Dr. Jayakumar A, Dr. Saji Mohandas and Mr. Bangaru (Ex-IBM)
for N768 and STASH corrections.
#3. Mr. Raghavendra S. Mupparthy, Dr. Rakhi R and Dr. S.Indira Rani for Rose-cycle um setup and intergration.
#4. IBM Team @ NCMRWF for installation support on Bhaskara - Ms. Shivali (IBM) & Mr. Bangaru (Ex-IBM)
References:
1. Iris. v1.8.1 03-Jun-2015. Met Office. UK. https://github.com/SciTools/iris/archive/v1.8.1.tar.gz
2. Saji M. (2014), "Utility to convert UM fieldsfile output to NCEP GRIB1 format:
A User Guide", NMRF/TR/01/2014, April 2014, pp. 51, available at
http://www.ncmrwf.gov.in/umfld2grib.pdf
Disclaimers (if any!)
This is just test code as of now and is meant for a specific purpose only!
Copyright: ESSO-NCMRWF, MoES, 2015-2016, 2016-2017.
Author : Arulalan.T
latest Update : 27-Sep-2016
"""
# -- Start importing necessary modules
import os, sys, time, subprocess, errno
import numpy, cdtime
import iris
import gribapi
from cf_units import Unit
import multiprocessing as mp
import multiprocessing.pool as mppool
# We must import this multiprocessing.pool explicitly, it is not imported
# by the top-level multiprocessing module.
import datetime
from iris.time import PartialDateTime
from cubeutils import cubeAverager, cubeAddSubtractor, cubeCummulator
from ncum_load_rules import update_cf_standard_name
import um2grb2 as umfcs
import umeps2grb2 as umeps
from um2grb2 import (createDirWhileParallelRacing, getCubeData, myLog,
__getAnlFcstFileNameIndecies__, __genAnlFcstOutFileName__,
getCubeAttr, _NoDaemonProcess, _MyPool)
# End of importing business
# We have to make sure that strict_grib_load as False, since we have to
# read the cubes from grib2 to re-order the variables. True throws an error
# while reading for tweaked_messages (say pf varibles)
iris.FUTURE.strict_grib_load = False
iris.FUTURE.netcdf_promote = True
iris.FUTURE.netcdf_no_unlimited = True
iris.FUTURE.cell_datetime_objects = True
# -- Start coding
# create global _lock_ object
_lock_ = mp.Lock()
# global path variables
g2ctl = "/gpfs2/home/umtid/Softwares/grib2ctl/g2ctl.pl"
grib2ctl = "/gpfs2/home/umtid/Softwares/grib2ctl/grib2ctl.pl"
gribmap = "/gpfs1/home/Libs/GNU/GRADS/grads-2.0.2.oga.1/Contents/gribmap"
cnvgrib = "/gpfs1/home/Libs/INTEL/CNVGRIB/CNVGRIB-1.4.1/cnvgrib-1.4.1/cnvgrib"
wgrib2 = "/gpfs1/home/Libs/GNU/WGRIB2/v2.0.5/wgrib2/wgrib2"
# other global variables
__LPRINT__ = False
__utc__ = '00'
__outFileType__ = 'ana'
# start and step hour in short forecast files
__anl_step_hour__ = 6
# start hour in long forecast files
__start_long_fcst_hour__ = 6
# step hour in long forecast files
__fcst_step_hour__ = 6
# maximum long forecast hours produced by model
__end_long_fcst_hour__ = 240
# analysis reference time applicable only to average/accumulation vars.
__anl_aavars_reference_time__ = 'shortforecast'
# analysis time bounds option applicable only to average/accumulation vars.
__anl_aavars_time_bounds__ = True
# grib1 file suffix
__grib1FilesNameSuffix__ = '.grib1'
# flag for removing grib2 files after grib1 has been converted
__removeGrib2FilesAfterGrib1FilesCreated__ = False
# fill fully masked vars with this value.
__fillFullyMaskedVars__ = None
_ensemble_count_ = 44
_ensemble_member_ = None
# Defining default out grib2 file name structure for analysis
__anlFileNameStructure__ = ('um_ana', '_', '*HHH*', 'hr', '_',
'*YYYYMMDD*', '_', '*ZZ*', 'Z', '.grib2')
# Defining default out grib2 file name structure for forecast
__fcstFileNameStructure__ = ('um_prg', '_', '*HHH*', 'hr', '_',
'*YYYYMMDD*', '_', '*ZZ*', 'Z', '.grib2')
# the _convertVars_ is global list which should has final variables list of
# tuples (varName, varSTASH) will be converted, otherwise default variables
# of this module will be converted!
_convertVars_ = []
_removeVars_ = [] # used to store temporary vars
_current_date_ = None
_startT_ = None
_tmpDir_ = None
_inDataPath_ = None
_opPath_ = None
_doRegrid_ = False
_targetGrid_ = None
_targetGridFile_ = ''
_targetGridRes_ = None
_reverseLatitude_ = False
_requiredLat_ = None
_requiredLon_ = None
_requiredPressureLevels_ = None
_preExtension_ = '_unOrdered'
_createGrib2CtlIdxFiles_ = True
_createGrib1CtlIdxFiles_ = False
_convertGrib2FilestoGrib1Files_ = False
__setGrib2TableParameters__ = None
__wgrib2Arguments__ = None
_extraPolateMethod_ = 'auto'
__UMtype__ = 'global'
_ncfilesVars_ = []
__outg2files__ = []
# By default __soilFirstSecondFixedSurfaceUnit__ takes as 'cm', suggested for
# WRF-Noah model.
__soilFirstSecondFixedSurfaceUnit__ = 'cm'
# global ordered variables (the order we want to write into grib2)
_orderedVars_ = {'PressureLevel': [
## Pressure Level Variable names & STASH codes
('geopotential_height', 'm01s16i202'),
('x_wind', 'm01s15i201'),
('y_wind', 'm01s15i202'),
('upward_air_velocity', 'm01s15i242'),
('air_temperature', 'm01s16i203'),
('relative_humidity', 'm01s16i256'),
('specific_humidity', 'm01s30i205')],
## Non Pressure Level Variable names & STASH codes
'nonPressureLevel': [
('tropopause_altitude', 'm01s30i453'),
('tropopause_air_temperature', 'm01s30i452'),
('tropopause_air_pressure', 'm01s30i451'),
('surface_air_pressure', 'm01s00i409'),
('air_pressure_at_sea_level', 'm01s16i222'),
('surface_temperature', 'm01s00i024'),
('relative_humidity', 'm01s03i245'),
('specific_humidity', 'm01s03i237'),
('air_temperature', 'm01s03i236'),
('dew_point_temperature', 'm01s03i250'),
('atmosphere_convective_available_potential_energy_wrt_surface', 'm01s05i233'), # CAPE
('atmosphere_convective_inhibition_wrt_surface', 'm01s05i234'), #CIN
('high_type_cloud_area_fraction', 'm01s09i205'),
('medium_type_cloud_area_fraction', 'm01s09i204'),
('low_type_cloud_area_fraction', 'm01s09i203'),
('cloud_area_fraction_assuming_random_overlap', 'm01s09i216'),
('cloud_area_fraction_assuming_maximum_random_overlap', 'm01s09i217'),
('x_wind', 'm01s03i225'),
('y_wind', 'm01s03i226'),
('x_wind', 'm01s15i212'), # 50meter B-Grid U component wind
('y_wind', 'm01s15i213'), # 50meter B-Grid V component wind
('visibility_in_air', 'm01s03i247'),
('precipitation_amount', 'm01s05i226'),
('stratiform_snowfall_amount', 'm01s04i202'),
('convective_snowfall_amount', 'm01s05i202'),
('stratiform_rainfall_amount', 'm01s04i201'),
('convective_rainfall_amount', 'm01s05i201'),
('rainfall_flux', 'm01s05i214'),
('snowfall_flux', 'm01s05i215'),
('precipitation_flux', 'm01s05i216'),
('atmosphere_mass_content_of_water', 'm01s30i404'),
('atmosphere_mass_content_of_dust_dry_aerosol_particles', 'm01s30i403'),
('atmosphere_cloud_liquid_water_content', 'm01s30i405'),
('atmosphere_cloud_ice_content', 'm01s30i406'),
('fog_area_fraction', 'm01s03i248'),
('toa_incoming_shortwave_flux', 'm01s01i207'),
('toa_outgoing_shortwave_flux', 'm01s01i205'),
('toa_outgoing_shortwave_flux_assuming_clear_sky', 'm01s01i209'),
('toa_outgoing_longwave_flux', 'm01s02i205'),
('toa_outgoing_longwave_flux_assuming_clear_sky', 'm01s02i206'),
('surface_upward_latent_heat_flux', 'm01s03i234'),
('surface_upward_sensible_heat_flux', 'm01s03i217'),
('surface_downwelling_shortwave_flux_in_air', 'm01s01i235'),
('surface_downwelling_longwave_flux', 'm01s02i207'),
('surface_net_downward_longwave_flux', 'm01s02i201'),
('surface_net_downward_shortwave_flux', 'm01s01i202'),
('atmosphere_boundary_layer_thickness', 'm01s00i025'),
('atmosphere_optical_thickness_due_to_dust_ambient_aerosol', 'm01s02i422'),
('moisture_content_of_soil_layer', 'm01s08i223'), # 4 layers
# single layer, this must be after 4 layers as in order
('soil_moisture_content', 'm01s08i208'), # single layer
## though moisture_content_of_soil_layer and volumetric_moisture_of_soil_layer
## has same STASH code, but we must include seperate entry here.
('volumetric_moisture_of_soil_layer', 'm01s08i223'), # 4 layers
# single layer, this must be after 4 layers as in order
('volumetric_moisture_of_soil_layer', 'm01s08i208'), # single layer
('soil_temperature', 'm01s03i238'),
('land_binary_mask', 'm01s00i030'),
('sea_ice_area_fraction', 'm01s00i031'),
('sea_ice_thickness', 'm01s00i032'),
# the snowfall_amount might be changed as
# liquid_water_content_of_surface_snow by convert it into
# water equivalent of snow amount, before re-ordering itself.
('liquid_water_content_of_surface_snow', 'm01s00i023'),
# the below one is for orography which presents only in analysis 00 file.
# so we must keep this as the last one in the ordered variables!
('surface_altitude', 'm01s00i033')],
}
#Define _precipVars_
# The following vars should contains only precipitation, rainfall, snow
# variables, those whose regrid extrapolate should be only in 'linear' mode
# and not in 'mask' mode, and should not have -ve values.
_precipVars_ = [('precipitation_amount', 'm01s05i226'),
('stratiform_snowfall_amount', 'm01s04i202'),
('convective_snowfall_amount', 'm01s05i202'),
('stratiform_rainfall_amount', 'm01s04i201'),
('convective_rainfall_amount', 'm01s05i201'),
('rainfall_flux', 'm01s05i214'),
('snowfall_flux', 'm01s05i215'),
('precipitation_flux', 'm01s05i216')]
# Define _accumulationVars_
# The following variables should be 6-hourly accumulated, but model
# produced as 1-hourly accumulation. So we need to sum of 6-hours data to
# get 6-hourly accumulation.
# rainfall_flux, snowfall_flux, precipitation_flux are not accumulated
# vars, since those are averaged rain rate (kg m-2 s-1).
# But the following vars unit is (kg m-2), accumulated vars.
_accumulationVars_ = [('precipitation_amount', 'm01s05i226'),
('surface_net_downward_shortwave_flux', 'm01s01i202'),
('surface_net_downward_longwave_flux', 'm01s02i201'),
('surface_upward_latent_heat_flux', 'm01s03i234'),
('surface_upward_sensible_heat_flux', 'm01s03i217'),
('toa_outgoing_longwave_flux', 'm01s02i205')]
# TIGGE's total time cummulated variables
_total_cummulativeVars_ = ['precipitation_amount',
'surface_net_downward_shortwave_flux',
'surface_net_downward_longwave_flux',
'surface_upward_latent_heat_flux',
'surface_upward_sensible_heat_flux',
'toa_outgoing_longwave_flux',
'time_cummulated_precipitation',
'time_integrated_surface_net_downward_shortwave_flux',
'time_integrated_surface_net_downward_longwave_flux',
'time_integrated_surface_upward_latent_heat_flux',
'time_integrated_surface_upward_sensible_heat_flux',
'time_integrated_toa_outgoing_longwave_flux',]
## Define _ncmrGrib2LocalTableVars_
## the following variables need to be set localTableVersion no as 1 and
## master table version no as 255 (undefined), since WRF grib2 table doesnt
## support for the following variables. So we created our own local table.
_ncmrGrib2LocalTableVars_ = ['fog_area_fraction',
'toa_outgoing_longwave_flux_assuming_clear_sky',
'toa_outgoing_shortwave_flux_assuming_clear_sky',
'atmosphere_optical_thickness_due_to_dust_ambient_aerosol',
'atmosphere_mass_content_of_dust_dry_aerosol_particles',
# 'cloud_area_fraction_assuming_random_overlap',
'cloud_area_fraction_assuming_maximum_random_overlap',]
## Define _maskOverOceanVars_
## the following variables need to be set mask over ocean because the original
## model itself producing mask over ocean. but when we are doing regrid it
## couldnt retain the mask ! dont know why ! So using land_binary_mask
## variable, we are resetting mask over ocean for the following vars.
_maskOverOceanVars_ = ['moisture_content_of_soil_layer',
'soil_moisture_content', 'volumetric_moisture_of_soil_layer',
# 'moisture_content_of_soil_layer' and 'soil_moisture_content' are
# renamed as 'volumetric_moisture_of_soil_layer',
# but same STASH m01s08i223 and m01s08i208 code.
'soil_temperature']
## Define dust aerosol optical thickness of model pseudo level with its
## corresponding micron / micro wavelength. We need to tweak with following
## information before writing into final grib2 file.
_aod_pseudo_level_var_ = {
'atmosphere_optical_thickness_due_to_dust_ambient_aerosol': [
(1, '0.38'), (2, '0.44'), (3, '0.55'), (4, '0.67'), (5, '0.87'), (6, '1.02')]}
## Define _depedendantVars_ where A is key, B is value. A is depedendant on B,
## B is not. B not necessarily to be written in out file. User may just specify
## only A in var.cfg configure file.
_depedendantVars_ = {
# land_binary_mask is needed to set ocean mask
('volumetric_moisture_of_soil_layer', 'm01s08i208'): [('land_binary_mask', 'm01s00i030')],
('moisture_content_of_soil_layer', 'm01s08i208'): [('land_binary_mask', 'm01s00i030')],
('soil_temperature', 'm01s03i238'): [('land_binary_mask', 'm01s00i030')],
# need to calculate surface up sw/lw using surface down & net sw/lw fluxes
('surface_upwelling_shortwave_flux_in_air', 'None'): [
('surface_net_downward_shortwave_flux', 'm01s01i202'),
('surface_downwelling_shortwave_flux_in_air', 'm01s01i235')],
('surface_upwelling_longwave_flux_in_air', 'None'): [
('surface_net_downward_longwave_flux', 'm01s02i201'),
('surface_downwelling_longwave_flux', 'm01s02i207')],
('atmosphere_precipitable_water_content', 'None'): [
('atmosphere_mass_content_of_water', 'm01s30i404'),
('atmosphere_mass_content_of_dust_dry_aerosol_particles', 'm01s30i403'),
('atmosphere_cloud_liquid_water_content', 'm01s30i405'),
('atmosphere_cloud_ice_content', 'm01s30i406'),
]
}
ncumSTASH_tiggeVars = {
######################## BEGIN OF TIGGE-VALID-VARS-IN-BOTH-NCUM-DETERMINISTIC-ENSEMBLES ################
# PressureLevel Variables
('geopotential_height', 'm01s16i202'): ('geopotential_height', 'gh', None), # 'gpm'
('specific_humidity', 'm01s30i205'): ('specific_humidity', 'q', 'kg kg-1'),
('air_temperature', 'm01s16i203'): ('temperature', 't', 'K'),
('x_wind', 'm01s15i243'): ('u_velocity', 'u', 'm s-1'),
('y_wind', 'm01s15i244'): ('v_velocity', 'v', 'm s-1'),
('x_wind', 'm01s15i201'): ('u_velocity', 'u', 'm s-1'), #deterministic
('y_wind', 'm01s15i202'): ('v_velocity', 'v', 'm s-1'), #deterministic
# SingleLevel Variables
('x_wind', 'm01s03i209'): ('10_meter_u_velocity', '10u', 'm s-1'),
('y_wind', 'm01s03i210'): ('10_meter_v_velocity', '10v', 'm s-1'),
('x_wind', 'm01s03i225'): ('10_meter_u_velocity', '10u', 'm s-1'), #deterministic
('y_wind', 'm01s03i226'): ('10_meter_v_velocity', '10v', 'm s-1'), #deterministic
('land_binary_mask', 'm01s00i030'): ('land_sea_mask', 'lsm', None), # Proportion
('air_pressure_at_sea_level', 'm01s16i222'): ('mean_sea_level_pressure', 'msl', 'Pa'),
('surface_altitude', 'm01s00i033'): ('orography', 'orog', None), # 'gpm'
('orography', 'm01s00i033'): ('orography', 'orog', None), # 'gpm' # required to work.
('air_temperature', 'm01s03i236'): ('surface_air_temperature', '2t', 'K'),
('air_temperature_maximum', 'm01s03i236'): ('surface_air_maximum_temperature', 'mx2t6', 'K'),
('air_temperature_minimum', 'm01s03i236'): ('surface_air_minimum_temperature', 'mn2t6', 'K'),
('dew_point_temperature', 'm01s03i250'): ('surface_air_dew_point_temperature', '2d', 'K'),
('surface_temperature', 'm01s00i024'): ('skin_temperature', 'skt', 'K'),
('moisture_content_of_soil_layer', 'm01s08i223'): ('soil_moisture', 'sm', 'K'),
('soil_temperature', 'm01s03i238'): ('soil_temperature', 'st', 'K'),
('surface_air_pressure', 'm01s00i409'): ('surface_pressure', 'sp', 'Pa'),
('precipitation_amount', 'm01s05i226'): ('total_precipitation', 'tp', 'kg m-2'), # intermediate file
('time_cummulated_precipitation', 'None'): ('total_precipitation', 'tp', 'kg m-2'),
('surface_upward_latent_heat_flux', 'm01s03i234'): ('time_integrated_surface_latent_heat_flux', 'slhf', None), # intermediate file
('time_integrated_surface_upward_latent_heat_flux', 'm01s03i234'): ('time_integrated_surface_latent_heat_flux', 'slhf', 'W m-2 s'),
('surface_upward_sensible_heat_flux', 'm01s03i217'): ('time_integrated_surface_sensible_heat_flux', 'sshf', None), # intermediate file
('time_integrated_surface_upward_sensible_heat_flux', 'm01s03i217'): ('time_integrated_surface_sensible_heat_flux', 'sshf', 'W m-2 s'),
('surface_net_downward_longwave_flux', 'm01s02i201'): ('time_integrated_surface_net_thermal_radiation', 'str', None), # intermediate file
('time_integrated_surface_net_downward_longwave_flux', 'm01s02i201'): ('time_integrated_surface_net_thermal_radiation', 'str', 'W m-2 s'),
('surface_net_downward_shortwave_flux', 'm01s01i202'): ('time_integrated_surface_net_solar_radiation', 'ssr', None), # intermediate file
('time_integrated_surface_net_downward_shortwave_flux', 'm01s01i202'): ('time_integrated_surface_net_solar_radiation', 'ssr', 'W m-2 s'),
('toa_outgoing_longwave_flux', 'm01s02i205'): ('time_integrated_outgoing_long_wave_radiation', 'ttr', None), # intermediate file
('time_integrated_toa_outgoing_longwave_flux', 'm01s02i205'): ('time_integrated_outgoing_long_wave_radiation', 'ttr', 'W m-2 s'),
('cloud_area_fraction_assuming_random_overlap', 'm01s09i216'): ('total_cloud_cover', 'tcc', '%'),
('snowfall_amount', 'm01s00i023'): ('snow_depth_water_equivalent', 'sd', 'kg m-2')
######################## END OF TIGGE-VALID-VARS-IN-BOTH-NCUM-DETERMINISTIC-ENSEMBLES ################
### Doubts fluxes input needs to be divided by no of sec in the 3-hour or 1-hour
## upward, dowward , + / - ???
######################## END OF TIGGE-VALID-VARS-ONLY-AVAILABLE-IN-DETERMINISTIC ###################
}
def getTiggeFileName(cube):
# follows as per standard of tigge
# https://software.ecmwf.int/wiki/display/TIGGE/TIGGE+Data+Exchange+Protocol
# z_tigge_c_cccc_yyyymmddhhmmss_mmmm_vvvv_tt_ll_ssss_nnn_llll_param
prefix = 'z_tigge_c'
cccc = 'dems' # DEMS Delhi Meteorological Station centre code (WMO Standard)
mmmm = 'glob'
vvvv = 'prod' # TIGGE production mode
cstash = None
# get the cube time and make it as yyyymmddhhmmss
tpoint = cube.coords('forecast_reference_time')[0].points[0]
tunit = cube.coords('forecast_reference_time')[0].units.name
ct = cdtime.reltime(tpoint, tunit).tocomp()
yyyymmddhhmmss = str(ct.year) + str(ct.month).zfill(2) + str(ct.day).zfill(2)
yyyymmddhhmmss+= str(ct.hour).zfill(2) + str(ct.minute).zfill(2) + str(int(ct.second)).zfill(2)
# get the type of forecast
if cube.coords('realization'):
if cube.coords('realization')[0].points[0] == 0:
tt = 'cf' # control forecast
else:
tt = 'pf' # perturbed forecast
else:
tt = 'fc' # deterministic forecast
# pressure level or single level
ll = 'pl'
if cube.coords('pressure'):
ll = 'pl'
llll = str(int(cube.coords('pressure')[0].points[0])).zfill(4)
else:
ll = 'sl'
llll = '0000'
# assign forecast hour
if cube.coords('forecast_period')[0].bounds is None:
# instantaneous time point
ssss = cube.coords('forecast_period')[0].points[0]
else:
# end time of bound time or
ssss = cube.coords('forecast_period')[0].bounds[0][-1]
ssss = str(int(ssss)).zfill(4)
# ensemble no
if cube.coords('realization'):
nnn = str(int(cube.coords('realization')[0].points[0])).zfill(3)
else:
nnn = '000'
# get the tigge standard short name
cname = cube.standard_name if cube.standard_name else cube.long_name
cstash = str(cube.attributes.get('STASH', 'None'))
tiggeName, tiggeParam, tiggeUnit = ncumSTASH_tiggeVars.get((cname, cstash), (None, None, None))
# set tigge unit
if tiggeUnit: cube.units = Unit(tiggeUnit)
# return the tigge standard file name of this cube.
outfilename = '_'.join([prefix, cccc, yyyymmddhhmmss, mmmm, vvvv,
tt, ll, ssss, nnn, llll, tiggeParam])
return outfilename, tiggeParam
# end of def getTiggeFileName(cube, datatype):
def convertSoilVarto20cm(cube):
# HEre we are converting soil temp into 20cm.
# https://software.ecmwf.int/wiki/display/TIGGE/Soil+temperature
# st = ( 7 * T_1 + 13 * T_2 )/20....
# In NCUM it make 10 cm, 35 cm, 1 m, 2 m.
# So for NCUM, it would be
# st = ( 10 * T_1 + 10 * T_2 )/20 -> (( T_1 + T_2 ) * 10)/20
# -> st = (T_1 + T_2 )/2 -> (T_1 + T_2)*0.5
# dimensions are realization, soil_model_level_number, lat, lon
cube[0].data = (cube[0].data + cube[1].data) * 0.5
cube = cube[0] # take the first level alone (which is now top 20 cm)
if cube.coords('soil_model_level_number'): cube.remove_coord('soil_model_level_number')
if cube.coords('depth'): cube.remove_coord('depth')
return cube
# end of def convertSoilVarto20cm(cube):
# start definition #5
def regridAnlFcstFiles(arg):
"""
New Module by AAT:
This module has been rewritten entirely by AAT for optimization as an embarassingly-
parallel problem! It also checks the std names from Iris cube format with the
CF-convention and it regrids the data to 0.25x0.25 regular grid using linear
interpolation methods.
:param arg: tuple(fname, hr)
fname: common filename
hr: forecast hour
:return: regridded cube saved as GRIB2 file! TANGIBLE!
ACK:
This module has been entirely revamped & improved by AAT based on an older and
serial version by MNRS on 11/16/2015.
"""
global _lock_, _targetGrid_, _targetGridRes_, _current_date_, _startT_, \
_inDataPath_, _opPath_, _preExtension_, _accumulationVars_, _ncfilesVars_, \
_convertVars_, _requiredLat_, _requiredLon_, _doRegrid_, __utc__, \
__anlFileNameStructure__, __fcstFileNameStructure__, __LPRINT__, \
__fcst_step_hour__, __anl_step_hour__, _targetGridFile_, __UMtype__, \
_precipVars_, _requiredPressureLevels_, __anl_aavars_reference_time__, \
__anl_aavars_time_bounds__, _extraPolateMethod_, _maskOverOceanVars_, \
__fillFullyMaskedVars__, _reverseLatitude_
fpname, hr = arg
if __UMtype__ == 'global':
### if fileName has some extension, then do not add hr to it.
fileName = fpname + hr if not '.' in fpname else fpname
elif __UMtype__ == 'regional':
if '.' in fpname:
fileName = fpname
else:
fileName = fpname if '.' in fpname else fpname + hr.zfill(3)
# end of if '.' in pfname:
# end of if __UMtype__ == 'global':
fname = os.path.join(_inDataPath_, fileName)
inDataPathHour = _inDataPath_.split('/')[-1]
# call definition to get variable indices
varNamesSTASH, fcstHours, doMultiHourlyMean, infile, simulated_hr = umfcs.getVarInOutFilesDetails(_inDataPath_, fileName, hr)
if not os.path.isfile(fname):
print "The file doesn't exists: %s.. \n" %fname
return
# end of if not os.path.isfile(fname):
if _convertVars_:
# load only needed variables from this file !
varNamesSTASH = [vns for vns in varNamesSTASH if vns in _convertVars_]
if not varNamesSTASH:
print "No varibale selected to load from the file '%s' " % fname
if __LPRINT__:
print "Because global variable _convertVars_ doesn't contain any one of the following variables"
print "\n".join([str(i+1)+' : ' + str(tu) for i, tu in enumerate(varNamesSTASH)])
return None
else:
print "The following variables are going to be converted from file ", fname
print "\n".join([str(i+1)+' : ' + str(tu) for i, tu in enumerate(varNamesSTASH)])
# end of if not varNamesSTASH:
print "Started Processing the file: %s.. \n" %infile
# call definition to get cube data
cubes = getCubeData(infile)
nVars = len(cubes)
simulated_hr = int(infile.split('/')[-2])
if __LPRINT__: print "simulated_hr = ", simulated_hr
print "simulated_hr = ", simulated_hr
if fpname.startswith(('umglaa', 'umnsaa')):
dtype = 'fcst'
outFileNameStructure = __fcstFileNameStructure__
start_step_fcst_hour = __fcst_step_hour__
elif fpname.startswith(('umglca', 'qwqg00')):
dtype = 'ana'
outFileNameStructure = __anlFileNameStructure__
start_step_fcst_hour = __anl_step_hour__
# end of if fpname.startswith('umglaa'):
# get the out fileName Structure based on pre / user defined indecies
outFnIndecies = __getAnlFcstFileNameIndecies__(outFileNameStructure)
# get the file name extension
fileExtension = outFileNameStructure[-1]
#####
### setting timepoint, fcstpoint as 'centre' bounds, will not affect
### in g2ctl.pl because it uses flag -verf by default which will set
### access time point as end time point of fcst bounds.
timepoint = 'cbound' # TESTED, OK, on 05-01-2016
fcstpoint = 'cbound' # TESTED, OK, on 05-01-2016
timebound = True # TESTED, OK, on 28-03-2016
fcstbound = True # TESTED, OK, on 28-03-2016
# Note : if we are not correcting ana, fcst fcstpoint as above, in g2ctl
# ctl file will has 2 time points. To avoid that we have to tell to g2ctl
# to use start time bound for analysis and last time bound for fcst,
# which brings to 1 time point.
# Define default lat, lon, pressure contraint (None just bring model global data)
latConstraint, lonConstraint, pressureConstraint = None, None, None
if _requiredLat_:
# make constraint of required latitude
latConstraint = iris.Constraint(latitude=lambda cell:
_requiredLat_[0] <= cell <= _requiredLat_[-1])
if _requiredLon_:
# make constraint of required longitude
lonConstraint = iris.Constraint(longitude=lambda cell:
_requiredLon_[0] <= cell <= _requiredLon_[-1])
if _requiredPressureLevels_:
# make constraint of required pressure
# To slice out particular pressure levels (like 850, 200, 1000 alone)
# then the following way is essential.
pressureConstraint = iris.Constraint(pressure=lambda cell:
int(cell.point) in _requiredPressureLevels_)
# open for-loop-1 -- for all the variables in the cube
for varName, varSTASH in varNamesSTASH:
# define variable name constraint
varConstraint = iris.Constraint(name=varName)
# define varibale stash code constraint
STASHConstraint = iris.AttributeConstraint(STASH=varSTASH)
if not cubes.extract(varConstraint & STASHConstraint):
raise ValueError("unable to extract variable %s %s" % (varName, varSTASH))
# get the standard_name of variable
stdNm = cubes.extract(varConstraint & STASHConstraint)[0].standard_name
longNm = cubes.extract(varConstraint & STASHConstraint)[0].long_name
print "stdNm", stdNm, infile
if stdNm is None and longNm is None:
print "Unknown variable standard_name for '%s' of %s. So skipping it" % (varName, infile)
continue
# end of if stdNm is None and longNm is None:
print " Working on variable: %s \n" %stdNm
if (varName, varSTASH) in [('soil_temperature', 'm01s03i238'),
('moisture_content_of_soil_layer', 'm01s08i223')]:
# Within pi file, these variable has instantaneous time point,
# rest of other variables in pi file are 3-hr averaged.
# get an instantaneous forecast time for soil_temperature, and
# moisture_content_of_soil_layer.
if dtype == 'ana':
ana_soil_infile = os.path.join(_inDataPath_, fileName)
cubes = getCubeData(ana_soil_infile)
simulated_hr = int(ana_soil_infile.split('/')[-2])
# get instantaneous forecast hours to be extracted.
fcstHours = numpy.array([0,])
print varName, "loaded from file, ", ana_soil_infile
print "simulated_hr = ", simulated_hr
elif dtype == 'fcst':
# get instantaneous forecast hours to be extracted.
if isinstance(fcstHours[0], numpy.ndarray):
fcstHours += 1 # adjust fcst hour by add 1
idx = 1 # get second time in tuple
# do this process only one time though we have 2 variables
# here (both soil_temperature & moisture_content_of_soil_layer)
# becase here we are overwriting fcstHours which we
# previously defined in umfcs.getVarInOutFilesDetails function.
fcstHours = numpy.array([fhr[idx] for fhr in fcstHours])
print varName,"fcstHours", fcstHours
# end of if (varName, varSTASH) in [...]:
if (varName, varSTASH) in _accumulationVars_:
# From pe files, we need to extract precipitation_amount fileds
# as 6 hourly accumulation, but other variables from pe files
# are instantaneous fileds (no need to 6 hourly mean/sum).
# both analysis & forecast need to be accumulation.
doMultiHourlyMean = True
if dtype == 'fcst':
# for forecast pe file, and this varibale we need to set the
# extract time as follows.
# the cube contains data of every 1-hourly accumutated.
# but we need to make only every 6th hourly accumutated.
# fileName[-3:] is equivalent to hr. but hr had updated in
# filename extension for some case. So it better extract
# forecast hour from the fileName itself.
fcstHours = numpy.arange(24).reshape(4, 6) + int(fileName[-3:]) + 0.5
# required since NCUM 10.2 onwards
print varName, "fcstHours ", fcstHours, int(fileName[-3:])
elif dtype == 'ana':
# for analysis pe file, and this varibale we need to set the
# extract time as follows.
# the cube contains data of every 1-hourly accumutated.
# but we need to make only every 6th hourly accumutated.
fcstHours = numpy.array([(0, 1, 2, 3, 4, 5)]) + 0.5 # required since NCUM 10.2 onwards
ana_precip_infile, simulated_hr = umfcs.__getTodayOrYesterdayInfile__(_inDataPath_, fileName)
if ana_precip_infile != infile:
cubes = getCubeData(ana_precip_infile)
print varName, "loaded from file, ", ana_precip_infile
print "simulated_hr = ", simulated_hr
# end of if ana_infile != infile:
# end of if (varName, varSTASH) in _accumulationVars_:
# define (simulated_hr) forecast_reference_time constraint
fcstRefTimeConstraint = iris.Constraint(forecast_reference_time=PartialDateTime(hour=int(simulated_hr)))
if __LPRINT__: print fcstRefTimeConstraint
for fhr in fcstHours:
# loop-2 -- runs through the selected time slices - synop hours
if __LPRINT__: print " Working on forecast time: ", fhr
# grab the variable which is f(t,z,y,x)
# tmpCube corresponds to each variable for the SYNOP hours
if __LPRINT__: print "extract start", infile, fhr, varName
# get the varibale iris cube by applying variable name constraint,
# variable name, stash code, forecast_reference_time constraints
# and forecast hour constraint
if __LPRINT__: print varConstraint, STASHConstraint, fhr,
if __LPRINT__: print fcstRefTimeConstraint, latConstraint, lonConstraint
if fhr is not None:
# make forecast_period constraint
fpConstraint = iris.Constraint(forecast_period=fhr)
if __anl_step_hour__ == 3 and inDataPathHour == '00' and fhr == 0 \
and fpname.startswith('umglca_pe'):
if (varName, varSTASH) in [('air_pressure_at_sea_level', 'm01s16i222'),
('surface_air_pressure', 'm01s00i409'),]:
# these vars taken already from qwqg00.pp0 file.
continue
# end of if __anl_step_hour__ == 3 and fhr == 0:
if __anl_step_hour__ == 3 and fhr == 1.5:
# Load from current date instead of yesterday date
ana_today_infile = os.path.join(_inDataPath_, fileName)
if ana_today_infile != infile:
a3_1p5_cube = getCubeData(ana_today_infile)
a3_simulated_hr = int(ana_today_infile.split('/')[-2])
a3_fcstRefTime = iris.Constraint(forecast_reference_time=PartialDateTime(hour=a3_simulated_hr))
# extract based on new constraints which are all applicable only to ana 1.5 hours.
tmpCube = a3_1p5_cube.extract(varConstraint &
STASHConstraint & a3_fcstRefTime &
fpConstraint &
latConstraint & lonConstraint)
print "special load of ana_hour 1.5"
print varName, "loaded from today infile, ", ana_today_infile
print "simulated_hr = ", simulated_hr
else:
# extract cube with possible and required constraints
tmpCube = cubes.extract(varConstraint & STASHConstraint &
fcstRefTimeConstraint &
fpConstraint &
latConstraint & lonConstraint)
# end of if __anl_step_hour__ == 3 and fhr == 1.5:
print "tmpCube=", tmpCube
if not tmpCube: raise ValueError("unable to extract variable %s %s %d" % varName, varSTASH, fhr)
# Got variable successfully!
tmpCube = tmpCube[0]
# extract pressure levels
if pressureConstraint and tmpCube.coords('pressure'):
if (varName, varSTASH) == ('geopotential_height', 'm01s16i202'):
# extract 50 hPa only to gh variable for TIGGE
pressureC = iris.Constraint(pressure=lambda cell:
int(cell.point) in _requiredPressureLevels_+[50])
tmpCube = tmpCube.extract(pressureC)
else:
tmpCube = tmpCube.extract(pressureConstraint)
# ene of if pressureConstraint and tmpCube.coords('pressure'):
if __LPRINT__: print "extract end", infile, fhr, varName
if __LPRINT__: print "tmpCube =>", tmpCube
if tmpCube.has_lazy_data():
print "Loaded", tmpCube.standard_name, "into memory",
## By accessing tmpCube.data (even for printing), the full
## data has been loaded into memory instead of being lazy
## data. Especially for dust aod, we must make it as fully
## loaded otherwise full data will be treated as zeros only
## instead of 6 pseudo_level data.
print "- min", tmpCube.data.min(), "max", tmpCube.data.max(),
print "has_lazy_data =", tmpCube.has_lazy_data()
# end of if tmpCube.has_lazy_data():
if doMultiHourlyMean and (tmpCube.coords('forecast_period')[0].shape[0] > 1):
# grab the variable which is f(t,z,y,x)
# tmpCube corresponds to each variable for the SYNOP hours from
# start to end of short time period mean (say 3-hourly)
cubeName = tmpCube.standard_name
cubeName = cubeName if cubeName else ''
# get action as either do we have to accumulation or mean.
action = 'sum' if (cubeName, varSTASH) in _accumulationVars_ else 'mean'
# convert 3-hourly mean data into 6-hourly mean or accumulation
# actionIntervals is 6 hourly mean or accumulation
# here dt intervals meant to be forecast intervals, as per
# model, it forecast every one hour. so we must pass as
# '1 hour' to dt intervals argument.
if __LPRINT__: print "action = ", action
print "tmpCube", tmpCube
tmpCube = cubeAverager(tmpCube, action, dt='1 hour',
actionIntervals=str(start_step_fcst_hour)+' hour',
tpoint=timepoint, fpoint=fcstpoint,
tbounds=timebound, fbounds=fcstbound)
# end of if doMultiHourlyMean and tmpCube.coords('forecast_period')[0].shape[0] > 1:
print "before regrid", varName, tmpCube.data.min(), tmpCube.data.max()
exmode = None # required, when user didnt do any regrid
# interpolate it as per targetGridResolution deg resolution by
# setting up sample points based on coord
if _doRegrid_:
if __LPRINT__: print "From shape", tmpCube.shape
if (varName, varSTASH) in _precipVars_:
# DO NOT APPLY iris.analysis.Linear(extrapolation_mode='mask'),
# which writes nan every where for the snowfall_flux,
# rainfall_flux, precipitation_flux. So donot apply that.
exmode = 'linear'
else:
# In general all the other variables should not be
# extrapolated over masked grid points.
exmode = 'mask'
# end of if (...):
# However, if user specified custom method do that!
exmode = _extraPolateMethod_ if _extraPolateMethod_ != 'auto' else exmode
# but make sure that soil variables (or whichever variables do not have values over ocean)
# do not extrapolate over ocean/masked regions. Otherwise, it will write only nan.
exmode = 'mask' if varName in _maskOverOceanVars_ else exmode
if os.path.isfile(_targetGridFile_):
print "\n Regridding data to %s degree spatial resolution based on file %s\n" % (_targetGrid_.shape, _targetGridFile_)
# Do regrid based on user specfied target grid file.
scheme = iris.analysis.Linear(extrapolation_mode=exmode)
regdCube = tmpCube.regrid(_targetGrid_, scheme)
print "regrid data shape", regdCube.shape
else:
# Do regrid based on user specfied target grid resolution number.
print "\n Regridding data to %sx%s degree spatial resolution \n" % (_targetGridRes_, _targetGridRes_)
try:
# This lienar interpolate will do extra polate over ocean even
# though original data doesnt have values over ocean and wise versa.
# So lets be aware of this.
regdCube = tmpCube.interpolate(_targetGrid_, iris.analysis.Linear(extrapolation_mode=exmode))
except Exception as e:
print "ALERT !!! Error while regridding!! %s" % str(e)
print " So skipping this without saving data"
continue
# end of try:
else:
# do not apply regrid. this is temporary fix.
regdCube = tmpCube
# end of if _doRegrid_:
if _reverseLatitude_:
# Need to reverse latitude from SN to NS
rcsh = len(regdCube.data.shape)
if rcsh == 3:
regdCube.data = regdCube.data[:,::-1,:]
elif rcsh == 2:
regdCube.data = regdCube.data[::-1,:]
lat = regdCube.coords('latitude')[0]
lat.points = lat.points[::-1]
# end of if _reverseLatitude_:
unit = regdCube.units
if varName.endswith('_flux'):
# applicable only to TIGGE
# converting flux unit from time average into time integrated
# by multiplying 60*60*6 = 21600 seconds in 6-hour
regdCube.data *= 21600.0
unit = Unit('W m-2 s') # changed unit from (W m-2) into (W m-2 s)
print "Multiplied data with 60*60*6 seconds to make flux variable into time-intergrated"
print regdCube.data.min(), regdCube.data.max()
# end of if varName.endswith('_flux'):
if (varName, varSTASH) in _precipVars_:
# Since we are not using 'mask' option for extrapolate while
# doing linear regrid, which bring -ve values after regrid in
# extrapolated grids. So lets make it as 0 as minimum value.
regdCube.data[regdCube.data < 0.0] = 0.0
# end of if (varName, varSTASH) in _precipVars_:
if (varName, varSTASH) in [('land_binary_mask', 'm01s00i030')]:
regdCube.data[regdCube.data > 0] = 1
# trying to keep values either 0 or 1. Not fraction!
regdCube.data = numpy.ma.array(regdCube.data, dtype=numpy.int)
# end of if (varName, varSTASH) in [('land_binary_mask', 'm01s00i030')]:
if (varName, varSTASH) in [('surface_altitude', 'm01s00i033')]:
regdCube.standard_name = None
regdCube.long_name = 'orography'
# end of if (varName, varSTASH) in [('surface_altitude', 'm01s00i033')]:
if exmode == 'mask':
# For the above set of variables we shouldnot convert into
# masked array. Otherwise its full data goes as nan.
# convert data into masked array
regdCube.data = numpy.ma.masked_array(regdCube.data,
dtype=numpy.float64, fill_value=9.999e+20)
if (varName, varSTASH) in [('moisture_content_of_soil_layer', 'm01s08i223'),
('sea_ice_area_fraction', 'm01s00i031'),
('sea_ice_thickness', 'm01s00i032'),]:
# We should assign 0 instead 1e-15 only for this var!
regdCube.data[regdCube.data <= 1e-15] = 0.0
elif (varName, varSTASH) == ('soil_temperature', 'm01s03i238'):
# We should assign min instead 1e-15 only for this var!
# because 0 will not make sense when temperature unit is Kelvin
nmin = numpy.ma.masked_less_equal(regdCube.data, 1e-15).min()
regdCube.data[regdCube.data <= 1e-15] = nmin
# http://www.cpc.ncep.noaa.gov/products/wesley/g2grb.html
# Says that 9.999e+20 value indicates as missingValue in grib2
# by default g2ctl.pl generate "undefr 9.999e+20", so we must
# keep the fill_value / missingValue as 9.999e+20 only.
numpy.ma.set_fill_value(regdCube.data, 9.999e+20)
# end of if exmode == 'mask':
if (varName, varSTASH) == ('soil_temperature', 'm01s03i238'):
# convert to 20cm layer as per TIGGE
regdCube = convertSoilVarto20cm(regdCube)
# end of if (varName, varSTASH) == ('soil_temperature', 'm01s03i238'):
if __fillFullyMaskedVars__ is not None and isinstance(regdCube.data, numpy.ma.masked_array):
# yes, it is ma array
if regdCube.data.mask.all():
# Now data is fully masked. So lets fill with user passed value.
# And still create ma array
regdCube.data = regdCube.data.filled(__fillFullyMaskedVars__)
print "filled masked vars", regdCube.data
regdCube.data = numpy.ma.masked_array(regdCube.data.filled(__fillFullyMaskedVars__),
fill_value=9.999e+20)
elif regdCube.data.min() == regdCube.data.max():
# Both min and max are same value. But its mask is not fully True.
# So previous condition not executed, anyhow lets set
# fully the value of fillFullyMaskedVars.
print "Both min and max are same. So lets fillFullyMaskedVars as", __fillFullyMaskedVars__
regdCube.data = numpy.ma.masked_array(regdCube.data.filled(__fillFullyMaskedVars__),
fill_value=9.999e+20)
# end of if __fillFullyMaskedVars__ and ...:
# get all other dimensions
# generate list of tuples contain index and coordinate
dim_coords = [(coord, i) for i,coord in enumerate(list(regdCube.dim_coords))]
aux_coords = regdCube.aux_coords
# create ensemble packed cubes
regdData = iris.cube.Cube(regdCube.data, regdCube.standard_name,
regdCube.long_name, regdCube.var_name,
unit, regdCube.attributes,
regdCube.cell_methods, dim_coords)
# add all aux coordinates
for axc in aux_coords: regdData.add_aux_coord(axc)
print regdData
# make memory free
print "regrid done"
print "after regrid", varName, regdData.data.min(), regdData.data.max()
if __LPRINT__: print "To shape", regdData.shape
regdData.attributes = tmpCube.attributes
if __LPRINT__: print "set the attributes back to regdData"
if __LPRINT__: print "regdData => ", regdData
# get the regridded lat/lons
stdNm, stash, fcstTm, refTm, lat1, lon1 = umfcs.getCubeAttr(regdData)
if __LPRINT__: print "Got attributes from regdData"
# save the cube in append mode as a grib2 file
if fcstTm.bounds is not None:
# (need this for pf files)
if dtype == 'ana':
# this is needed for analysis 00th simulated_hr
# get the first hour from bounds
if __anl_step_hour__ == 3:
# for 3-hourly ana file, we need to subtract 3 to get
# corresponding out hr.
# i.e. 3 means 0. Do not applicable for instantaneous fields.
if fhr == 1.5:
hr = str(int(fcstTm.bounds[-1][-1]))
elif fhr == 4.5:
hr = str(int(fcstTm.bounds[-1][0]) - 3)
elif __anl_step_hour__ == 6:
hr = str(int(fcstTm.bounds[-1][0]))
elif dtype == 'fcst':
# this is needed for forecast 00th simulated_hr
# get the last hour from bounds
hr = str(int(fcstTm.bounds[-1][-1]))
if __LPRINT__: print "Bounds comes in ", hr, fcstTm.bounds, fileName
else:
# get the fcst time point
# this is needed for analysis/forecast 00th simulated_hr
hr = str(int(fcstTm.points))
if __LPRINT__: print "points comes in ", hr, fileName
# end of if fcstTm.bounds:
if dtype == 'ana':
hr = str(int(hr) + int(_inDataPath_.split('/')[-1]))
# generate the out file name based on actual informations
outFn = __genAnlFcstOutFileName__(outFileNameStructure,
outFnIndecies, _current_date_, hr,
__utc__, _preExtension_)
# get the file full name except last extension, for the purpose
# of writing intermediate nc files
ofname = outFn.split(fileExtension)[0]
try:
save_tigge_tweaked_messages([regdData])
except Exception as e:
print "ALERT !!! Error while saving!! %s" % str(e)
print " So skipping this without saving data"
continue
# end of try:
print "saved"
# make memory free
del regdCube, tmpCube, regdData
# end of for fhr in fcstHours:
# end of for varName, varSTASH in varNamesSTASH:
# make memory free
del cubes
print " Time taken to convert the file: %8.5f seconds \n" %(time.time()-_startT_)
print " Finished converting file: %s into grib2 format for fcst file: %s \n" %(fileName,hr)
# end of def regridAnlFcstFiles(fname):
def save_tigge_tweaked_messages(cubeList):
global _ncmrGrib2LocalTableVars_, _aod_pseudo_level_var_, _opPath_, \
__setGrib2TableParameters__, __soilFirstSecondFixedSurfaceUnit__, \
_accumulationVars_, _total_cummulativeVars_
for cube in cubeList:
for cube, grib_message in iris.fileformats.grib.as_pairs(cube): #save_pairs_from_cube(cube): #
cstash = str(cube.attributes.get('STASH', 'None'))
print "Tweaking begin ", cube.standard_name, cube.long_name, cstash
# post process the GRIB2 message, prior to saving
gribapi.grib_set_long(grib_message, "centre", 29) # RMC of India
gribapi.grib_set_long(grib_message, "subCentre", 0) # No subcentre
print "reset the centre as 29"
# Set the tigge's standard production status of the data
# http://www.nco.ncep.noaa.gov/pmb/docs/grib2/grib2_table1-3.shtml
# https://software.ecmwf.int/wiki/display/TIGGE/Rules+for+data+encoding+and+exchange
# 4 for TIGGE-NCMRWF Operational data
# 5 for TIGGE-NCMRWF Test data
gribapi.grib_set_long(grib_message, "productionStatusOfProcessedData", 4) # Operational mode
if cube.coords("realization"):
# ensembles tweak
# http://www.nco.ncep.noaa.gov/pmb/docs/grib2/grib2_table4-3.shtml
# 4 points ensemble forecast
gribapi.grib_set_long(grib_message, "typeOfGeneratingProcess", 4)
if cube.coord("forecast_period").bounds is None:
#http://www.nco.ncep.noaa.gov/pmb/docs/grib2/grib2_table4-0.shtml
# template 01 would be better
gribapi.grib_set(grib_message, "productDefinitionTemplateNumber", 1)
else:
# template 11 would be better
gribapi.grib_set(grib_message, "productDefinitionTemplateNumber", 11)
# if we set bounds[0][0] = 0, wgrib2 gives error for 0 fcst time.
# so we need to set proper time intervals
# (typeOfTimeIncrement) as 2 as per below table.
# http://www.nco.ncep.noaa.gov/pmb/docs/grib2/grib2_table4-11.shtml
# fileformats/grib/_save_rules.py-> set_forecast_time() ->
# _non_missing_forecast_period() returns 'fp' as bounds[0][0].
# but mean while lets fix by setting typeOfTimeIncrement=2.
# http://www.cosmo-model.org/content/model/documentation/grib/pdtemplate_4.11.htm
gribapi.grib_set(grib_message, "typeOfTimeIncrement", 2)
# end of if cube.coord("forecast_period").bounds is None:
# setting ensemble no
ensno = int(cube.coord('realization').points[0])
gribapi.grib_set(grib_message, "perturbationNumber", ensno)
memno = str(ensno).zfill(3) # directory member number
# no encoding at present in Iris, set to missing
gribapi.grib_set(grib_message, "numberOfForecastsInEnsemble", 255)
if ensno:
# http://www.nco.ncep.noaa.gov/pmb/docs/grib2/grib2_table4-6.shtml
# 3 would be better, since we keep on increasing ensmble points
# from 1 to 44
gribapi.grib_set(grib_message, "typeOfEnsembleForecast", 3)
else:
# control forecast
# 1 would be better for control run
gribapi.grib_set(grib_message, "typeOfEnsembleForecast", 1)
# ensembles tweak end
else:
# deterministic forecast
memno = 'fcs' # directory member number
if cube.coord("forecast_period").bounds is not None:
# if we set bounds[0][0] = 0, wgrib2 gives error for 0 fcst time.
# so we need to set proper time intervals
# (typeOfTimeIncrement) as 2 as per below table.
# http://www.nco.ncep.noaa.gov/pmb/docs/grib2/grib2_table4-11.shtml
# fileformats/grib/_save_rules.py-> set_forecast_time() ->
# _non_missing_forecast_period() returns 'fp' as bounds[0][0].
# but mean while lets fix by setting typeOfTimeIncrement=2.
# http://www.cosmo-model.org/content/model/documentation/grib/pdtemplate_4.11.htm
gribapi.grib_set(grib_message, "typeOfTimeIncrement", 2)
# end of if cube.coord("realization"):
if cube.standard_name or cube.long_name:
if cube.standard_name:
loc_longname = None
if (cube.standard_name, cstash) == ('air_temperature', 'm01s03i236'):
# we have to explicitly re-set the type of first fixed
# surfcae as (103) and scale factor, scale value of 2m temperature
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 103)
gribapi.grib_set(grib_message, "scaleFactorOfFirstFixedSurface", 0)
gribapi.grib_set(grib_message, "scaledValueOfFirstFixedSurface", 2)
if cube.standard_name.startswith('air_pressure_at_sea_level'):
# we have to explicitly re-set the type of first fixed
# surfcae as Mean sea level (101)
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 101)
if cube.standard_name.startswith('toa'):
# we have to explicitly re-set the type of first surfcae
# as Nominal top of the atmosphere i.e. 8 (WMO standard)
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 8)
# end of if cube.standard_name.startswith('toa'):
if cube.standard_name.startswith('tropopause'):
# we have to explicitly re-set the type of first surfcae
# as tropopause i.e. 7 (WMO standard)
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 7)
# end of if cube.standard_name.startswith('tropopause'):
if cube.standard_name.startswith('soil_temperature'):
# as per TIGGE standard
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 106)
gribapi.grib_set(grib_message, "typeOfSecondFixedSurface", 106)
gribapi.grib_set(grib_message, "scaleFactorOfFirstFixedSurface", 0)
gribapi.grib_set(grib_message, "scaleFactorOfSecondFixedSurface", 1)
gribapi.grib_set(grib_message, "scaledValueOfFirstFixedSurface", 0)
gribapi.grib_set(grib_message, "scaledValueOfSecondFixedSurface", 2)
# end of if cube.standard_name.startswith('soil_temperature'):
# end of if cube.standard_name:
if cube.long_name:
if ((cube.long_name, cstash) in [('air_temperature_maximum', 'm01s03i236'), \
('air_temperature_minimum', 'm01s03i236')]):
# we have to explicitly re-set the type of first fixed
# surfcae as (103) and scale factor, scale value of 2m temperature
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 103)
gribapi.grib_set(grib_message, "scaleFactorOfFirstFixedSurface", 0)
gribapi.grib_set(grib_message, "scaledValueOfFirstFixedSurface", 2)
gribapi.grib_set(grib_message, "timeIncrementBetweenSuccessiveFields", 0)
if 'toa' in cube.long_name:
# we have to explicitly re-set the type of first surfcae
# as Nominal top of the atmosphere i.e. 8 (WMO standard)
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 8)
gribapi.grib_set(grib_message, "typeOfSecondFixedSurface", 255)
# end of if cube.long_name.startswith('toa'):
aod_name = _aod_pseudo_level_var_.keys()[0]
if cube.long_name.startswith(aod_name):
# we have to explicitly re-set the type of first surfcae
# as surfaced (1) and type of second fixed surface as
# tropopause (7) as per WMO standard, for the aod var.
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 1)
gribapi.grib_set(grib_message, "typeOfSecondFixedSurface", 7)
print "Set typeOfFirstFixedSurface as 1 and typeOfSecondFixedSurface as 7 to aod"
# end of if cube.long_name.startswith(aod_name):
# check for long name in _ncmrGrib2LocalTableVars_
loc_longname = [1 for lname in _ncmrGrib2LocalTableVars_ if cube.long_name.startswith(lname)]
# end of if cube.long_name:
if 'cloud' in str(cube.standard_name) or 'cloud' in str(cube.long_name):
# we have to explicitly re-set the type of first surfcae
# as surfaced (1) and type of second fixed surface as
# as Nominal top of the atmosphere i.e. 8 (WMO standard)
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 1)
gribapi.grib_set(grib_message, "typeOfSecondFixedSurface", 8)
# end of if 'cloud' in cube.long_name or 'cloud':
if cube.standard_name in _ncmrGrib2LocalTableVars_ or loc_longname:
# We have to enable local table version and disable the
# master table only the special variables.
# http://www.cosmo-model.org/content/model/documentation/grib/grib2keys_1.htm
# Above link says that tablesVersion must be set to 255,
# then only local table will be enabled.
gribapi.grib_set_long(grib_message, "tablesVersion", 255)
# http://apt-browse.org/browse/debian/wheezy/main/i386/libgrib-api-1.9.16/1.9.16-2%2Bb1/file/usr/share/grib_api/definitions/grib2/section.1.def (line no 42)
# Above link says versionNumberOfGribLocalTables is alias
# of LocalTablesVersion.
# Set local table version number as 1 as per
# ncmr_grib2_local_table standard.
gribapi.grib_set_long(grib_message, "versionNumberOfGribLocalTables", 1)
# end of if cube.standard_name in _ncmrGrib2LocalTableVars_:
if (cube.standard_name in _total_cummulativeVars_ or \
cube.long_name in _total_cummulativeVars_):
# set type of first fixed as surface by default for all variables
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 1)
if cube.long_name:
if 'toa' in cube.long_name:
# we have to explicitly re-set the type of first surfcae
# as Nominal top of the atmosphere i.e. 8 (WMO standard)
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 8)
if cube.standard_name:
if 'toa' in cube.standard_name:
# we have to explicitly re-set the type of first surfcae
# as Nominal top of the atmosphere i.e. 8 (WMO standard)
gribapi.grib_set(grib_message, "typeOfFirstFixedSurface", 8)
# set other parameters
gribapi.grib_set(grib_message, "scaleFactorOfFirstFixedSurface", 255)
gribapi.grib_set(grib_message, "scaledValueOfFirstFixedSurface", -1)
gribapi.grib_set(grib_message, "timeIncrementBetweenSuccessiveFields", 0)
gribapi.grib_set(grib_message, "typeOfStatisticalProcessing", 1)
# end of if cube.standard_name or ...:
if __setGrib2TableParameters__:
# This user defined parameters must be at last of this function!
for key, val in __setGrib2TableParameters__:
gribapi.grib_set_long(grib_message, key, val)
print "set user defined grib2table parameter ('%s', %s)" % (key, val)
# end of if __setGrib2TableParameters__:
print "Tweaking end ", cube.standard_name
# get tigge statndard filename
outgname, sname = getTiggeFileName(cube)
outgdir = os.path.join(_opPath_, *[memno, sname])
createDirWhileParallelRacing(outgdir)
outgpath = os.path.join(outgdir, outgname) # Join the outpath & outfilename
print "lets save into", outgpath
if (cube.standard_name, cstash) in _accumulationVars_:
iris.fileformats.netcdf.save(cube, outgpath+'.nc') # save nc file
else:
# finally save the cube/message into many individual grib2 files
iris.fileformats.grib.save_messages([grib_message], outgpath)
# end of for cube, grib_message in iris.fileformats.grib.as_pairs(cube):
# end of for cube in cubeList:
# end of def save_tigge_tweaked_messages(cube):
def makeTotalCummulativeVars(arg):
global _opPath_, _current_date_, __start_long_fcst_hour__, __end_long_fcst_hour__
svar, sname, umfcstype, ens = arg
ens = ens.zfill(3)
if svar == 'tp':
lname = 'time_cummulated_precipitation'
rstash = True
else:
lname = 'time_integrated_' + sname
rstash = False
fname = 'z_tigge_c_dems_' +_current_date_+ '000000_glob_prod_' # TIGGE prod
fname += umfcstype+ '_sl_%s_' + ens + '_0000_' + svar + '.nc'
infiles = [os.path.join(*[_opPath_, ens, svar, fname % str(t).zfill(4)])
for t in range(6, __end_long_fcst_hour__+1, 6)]
for infile in infiles:
if not os.path.isfile(infile):
print "Error: The infile '%s' doesnt exist to calculate makeTotalCummulativeVars" % infile
else:
print "Exists: ", infile
try:
cubes = iris.load(infile)[0]
except Exception as e:
print "Error : Unable to load file - ", infile
# end of for infile in infiles:
try:
cubes = iris.load(infiles)[0]
except Exception as e:
raise ValueError("Unable to load files from %s - while makeTotalCummulativeVars" % str(infiles))
# get the cummulated cubes generator
outcubes = cubeCummulator(cubes, standard_name='None', long_name=lname,
addZerosFirstCube=True, removeSTASH=rstash)
# save cummulated cubes into individual grib2 files
for cube in outcubes: save_tigge_tweaked_messages([cube])
cmd = 'rm -rf ' + ' '.join(infiles)
subprocess.call(cmd, shell=True)
# end of def makeTotalCummulativeVars():
# Start definition #6
def doFcstConvert(fname):
"""
New Module by AAT:
This module has been rewritten entirely by AAT for optimization as an embarassingly-
parallel problem! This module acts as the main program to feed the filenames as a
child process to a multiprocessing thread as a daemon process.
:param fname: Name of the FF filename in question as a "string"
:return: Nothing! TANGIBLE!
"""
global __start_long_fcst_hour__, __end_long_fcst_hour__, __UMtype__
if __UMtype__ == 'global':
# calculate start hour of long fcst in multiples of 24. Why?
# 00 hr contains from 06 to 24 hours data.
# 24 hr contains from 24 to 48 hours data, and so on.
start_fcst_hour = ((__start_long_fcst_hour__ / 24) - 1) * 24
# Here we are reducing one 24 because, 00 file contains upto 24 hour,
# and 24 hour files contains upto 48 hour and so on.
# here max fcst hours goes upto 240 only, not 241. why ??
# because 216 long fcst hours contains upto 240th hour fcst.
# and 240th long fcst contains upto 264th hour fcst.
# so here no need to add +1 to __end_long_fcst_hour__.
fcst_times = [str(hr).zfill(3) for hr in range(start_fcst_hour, __end_long_fcst_hour__, 24)]
elif __UMtype__ == 'regional':
fcst_times = [str(hr).zfill(2) for hr in range(0, __end_long_fcst_hour__, 6)]
# end of if __UMtype__ == 'global':
fcst_filenames = [(fname, hr) for hr in fcst_times]
nchild = len(fcst_times)
if not nchild: raise ValueError("Got 0 fcst_times, couldn't make parallel !")
# create the no of child parallel processes
inner_pool = mp.Pool(processes=nchild)
print "Creating %i (daemon) workers and jobs in child." % nchild
# pass the forecast hours as argument to take one fcst file per process / core to regrid it.
results = inner_pool.map(regridAnlFcstFiles, fcst_filenames)
# closing and joining child pools
inner_pool.close()
inner_pool.join()
# parallel end
# end def doFcstConvert(fname):
def doAnlConvert(fname):
"""
New Module by AAT:
This module has been rewritten entirely by AAT for optimization as an embarassingly-
parallel problem! This module acts as the main program to feed the filenames as a
child process to a multiprocessing thread as a daemon process.
:param fname: Name of the FF filename in question as a "string"
:return: Nothing! TANGIBLE!
"""
regridAnlFcstFiles((fname, '000'))
# end def doAnlConvert(fname):
# Start the convertFilesInParallel function
def convertFilesInParallel(fnames, ftype):
"""
convertFilesInParallel function calling all the sub-functions
:param fnames: a simple filename as argument in a string format
:return: THE SheBang!
"""
global _startT_, _tmpDir_, _opPath_
## get the no of files and
nprocesses = len(fnames)
if not nprocesses: raise ValueError("Got 0 fnames, couldn't make parallel !")
maxprocess = mp.cpu_count()
if nprocesses > maxprocess: nprocesses = maxprocess
# lets create no of parallel process w.r.t no of files.
# parallel begin - 1
pool = _MyPool(nprocesses)
print "Creating %d (non-daemon) workers and jobs in convertFilesInParallel process." % nprocesses
if ftype in ['anl', 'analysis']:
results = pool.map(doAnlConvert, fnames)
elif ftype in ['fcst', 'forecast']:
results = pool.map(doFcstConvert, fnames)
else:
raise ValueError("Unknown file type !")
# end of if ftype in ['anl', 'analysis']:
# closing and joining master pools
pool.close()
pool.join()
# parallel end - 1
print "Total time taken to convert %d files was: %8.5f seconds \n" %(len(fnames),(time.time()-_startT_))
return
# end of def convertFilesInParallel(fnames):
def _checkInFilesStatus(path, ftype, pfnames):
global __start_long_fcst_hour__, __end_long_fcst_hour__, __UMtype__
if ftype in ['ana', 'anl']:
fhrs = ['000']
elif ftype in ['fcst', 'prg']:
if __UMtype__ == 'global':
# calculate start hour of long fcst in multiples of 24. Why?
# 00 hr contains from 06 to 24 hours data.
# 24 hr contains from 24 to 48 hours data, and so on.
start_fcst_hour = (__start_long_fcst_hour__ / 24) * 24
# here max fcst hours goes upto 240 only, not 241. why ??
# because 216 long fcst hours contains upto 240th hour fcst.
# and 240th long fcst contains upto 264th hour fcst.
# so here no need to add +1 to __end_long_fcst_hour__.
fhrs = [str(hr).zfill(3) for hr in range(start_fcst_hour, __end_long_fcst_hour__, 24)]
elif __UMtype__ == 'regional':
fhrs = [str(hr).zfill(2) for hr in range(6, __end_long_fcst_hour__, 6)]
elif __UMtype__ == 'ensemble':
fhrs = [str(hr).zfill(3) for hr in range(0, __end_long_fcst_hour__, 6)]
# end of if __UMtype__ == 'global':
# end of if ftype in ['ana', 'anl']:
fileNotExistList = []
for pfname in pfnames:
for fhr in fhrs:
# constrct the correct fileName from partial fileName and hours
# add hour only if doenst have any extension on partial filename.
if __UMtype__ == 'global':
fname = pfname if '.' in pfname else pfname + fhr
elif __UMtype__ == 'regional':
# generate filenames like 'umnsaa_pb000', 'umnsaa_pb006', etc
fname = pfname if '.' in pfname else pfname + fhr.zfill(3)
elif __UMtype__ == 'ensemble':
fname = pfname if '.' in pfname else pfname + fhr
# end of if __UMtype__ == 'global':
fpath = os.path.join(path, fname)
if not os.path.isfile(fpath): fileNotExistList.append(fpath)
# end of for pfname in pfnames:
status = False if fileNotExistList else True
if status is False:
print "The following infiles are not exists!\n"
print "*" * 80
print "\n".join(fileNotExistList)
print "*" * 80
return status
# end of def _checkInFilesStatus(path, ftype, pfnames):
def _checkOutFilesStatus(path, ftype, date, utc, overwrite):
global _preExtension_, __end_long_fcst_hour__, __anlFileNameStructure__,\
__fcstFileNameStructure__, __fcst_step_hour__, \
__anl_step_hour__, __utc__, __start_long_fcst_hour__
if ftype in ['ana', 'anl']:
outFileNameStructure = __anlFileNameStructure__
fhrs = [utc] # ana_hour (short forecast hour) is same as simulated_hr (i.e. utc)
simulated_hr = int(__utc__)
# since ncum producing analysis files 00, 06, 12, 18 utc cycles and
# its forecast time starting from 0 and reference time based on utc.
# so we should calculate correct hour as below.
fhrs = range(0+simulated_hr, 6+simulated_hr, __anl_step_hour__)
elif ftype in ['fcst', 'prg']:
outFileNameStructure = __fcstFileNameStructure__
fhrs = range(__start_long_fcst_hour__, __end_long_fcst_hour__+1,
__fcst_step_hour__)
if __fcst_step_hour__ == 6 and __start_long_fcst_hour__: fhrs = fhrs[1:]
print "fhrs++", fhrs, __fcst_step_hour__, __start_long_fcst_hour__
# get the out fileName Structure based on pre / user defined indecies
outFnIndecies = __getAnlFcstFileNameIndecies__(outFileNameStructure)
status = None
fnames = []
print "fhrs = ", fhrs
for fhr in fhrs:
# generate the out file name based on actual informations.
# here preExtension is empty string to create final needed out file name
fname = __genAnlFcstOutFileName__(outFileNameStructure, outFnIndecies,
date, fhr, utc)
fpath = os.path.join(path, fname)
fnames.append(fname)
for ext in ['', '.ctl', '.idx']:
fpath = os.path.join(path, fname+ext)
if os.path.isfile(fpath):
print "Out File already exists", fpath,
if overwrite:
try:
os.remove(fpath)
except Exception, e:
print "Got error while removing file", e
finally:
status = 'FilesRemoved'
print ", but overwrite option is True. So removed it!"
else:
status = 'FilesExist'
else:
print "\nOut File does not exists", fpath
if status in [None, 'FilesRemoved']:
status = 'FilesDoNotExist'
continue
elif status is 'FilesExist':
status = 'PartialFilesExist'
break
# end of for ext in ['', '.ctl', '.idx']:
for ext in [_preExtension_, '_Ordered']:
fpath = os.path.join(path, fname)
if os.path.isfile(fpath) and ext in fpath:
try:
os.remove(fpath)
print "removed file : ", fpath
except Exception, e:
print "Got error while removing file", e
finally:
status = 'IntermediateFilesExist'
print "removed intermediate file"
# end of for ext in [_preExtension_, '_Ordered']:
# end of for fhr in fhrs:
ifiles = [fname for fname in os.listdir(path) if fname.endswith('.nc')]
if ifiles:
print "Intermediate files are exists in the outdirectory.", path
for ifile in ifiles:
if not [ifile for fname in fnames if fname.split('.')[0] in ifile]: continue
if outFileNameStructure[0] in ifile and utc in ifile and _preExtension_ in ifile:
status = 'IntermediateFilesExist'
# end of if ncfiles:
if status in ['PartialFilesExist', 'IntermediateFilesExist']:
# partial files exist, so make overwrite option as True and do
# recursive call one time to remove all output files.
print "Partial/Intermediate out files exist, so going to overwrite all files"
return _checkOutFilesStatus(path, ftype, date, utc, overwrite=True)
else:
return status
# end of def _checkOutFilesStatus(path, ftype, date, hr, overwrite):
def convertFcstFiles(inPath, outPath, tmpPath, **kwarg):
global _targetGrid_, _targetGridRes_, _current_date_, _startT_, _tmpDir_, \
_inDataPath_, _opPath_, _doRegrid_, _convertVars_, _requiredLat_, \
_requiredLon_, _createGrib2CtlIdxFiles_, _createGrib1CtlIdxFiles_, \
_convertGrib2FilestoGrib1Files_, __fcstFileNameStructure__, \
__LPRINT__, __utc__, __fcst_step_hour__, _reverseLatitude_, \
__end_long_fcst_hour__, __outFileType__, __grib1FilesNameSuffix__, \
__removeGrib2FilesAfterGrib1FilesCreated__, _depedendantVars_, \
_removeVars_, _requiredPressureLevels_, __setGrib2TableParameters__, \
__wgrib2Arguments__, __soilFirstSecondFixedSurfaceUnit__, __UMtype__, \
__start_long_fcst_hour__, _extraPolateMethod_, _targetGridFile_, \
__fillFullyMaskedVars__
# load key word arguments
UMtype = kwarg.get('UMtype', 'global')
UMInLongFcstFiles = kwarg.get('UMInLongFcstFiles', None)
targetGridResolution = kwarg.get('targetGridResolution', 0.25)
targetGridFile = kwarg.get('targetGridFile', '')
date = kwarg.get('date', time.strftime('%Y%m%d'))
utc = kwarg.get('utc', '00')
overwrite = kwarg.get('overwrite', False)
lprint = kwarg.get('lprint', False)
convertVars = kwarg.get('convertVars', None)
latitude = kwarg.get('latitude', None)
longitude = kwarg.get('longitude', None)
pressureLevels = kwarg.get('pressureLevels', None)
fillFullyMaskedVars = kwarg.get('fillFullyMaskedVars', None)
extraPolateMethod = kwarg.get('extraPolateMethod', 'auto')
soilFirstSecondFixedSurfaceUnit = kwarg.get('soilFirstSecondFixedSurfaceUnit', 'cm')
fcst_step_hour = kwarg.get('fcst_step_hour', 6)
start_long_fcst_hour = kwarg.get('start_long_fcst_hour', 6)
end_long_fcst_hour = kwarg.get('end_long_fcst_hour', 240)
fcstFileNameStructure = kwarg.get('fcstFileNameStructure', None)
createGrib2CtlIdxFiles = kwarg.get('createGrib2CtlIdxFiles', True)
createGrib1CtlIdxFiles = kwarg.get('createGrib1CtlIdxFiles', False)
convertGrib2FilestoGrib1Files = kwarg.get('convertGrib2FilestoGrib1Files', False)
grib1FilesNameSuffix = kwarg.get('grib1FilesNameSuffix', '1')
removeGrib2FilesAfterGrib1FilesCreated = kwarg.get('removeGrib2FilesAfterGrib1FilesCreated', False)
setGrib2TableParameters = kwarg.get('setGrib2TableParameters', None)
wgrib2Arguments = kwarg.get('wgrib2Arguments', None)
callBackScript = kwarg.get('callBackScript', None)
# assign out file type in global variable
__outFileType__ = 'fcst'
# assign the convert vars list of tuples to global variable
if convertVars: _convertVars_ = convertVars
# assign the analysis file name structure
if fcstFileNameStructure: __fcstFileNameStructure__ = fcstFileNameStructure
# set print variables details options
__LPRINT__ = lprint
# update global variables
__UMtype__ = UMtype
__utc__ = utc
__fcst_step_hour__ = fcst_step_hour
__start_long_fcst_hour__ = start_long_fcst_hour
__end_long_fcst_hour__ = end_long_fcst_hour
__removeGrib2FilesAfterGrib1FilesCreated__ = removeGrib2FilesAfterGrib1FilesCreated
__grib1FilesNameSuffix__ = grib1FilesNameSuffix
_targetGridRes_ = str(targetGridResolution)
_targetGridFile_ = targetGridFile
_extraPolateMethod_ = extraPolateMethod
_requiredPressureLevels_ = pressureLevels
__fillFullyMaskedVars__ = fillFullyMaskedVars
__soilFirstSecondFixedSurfaceUnit__ = soilFirstSecondFixedSurfaceUnit
_createGrib2CtlIdxFiles_ = createGrib2CtlIdxFiles
_createGrib1CtlIdxFiles_ = createGrib1CtlIdxFiles
_convertGrib2FilestoGrib1Files_ = convertGrib2FilestoGrib1Files
__setGrib2TableParameters__ = setGrib2TableParameters
__wgrib2Arguments__ = wgrib2Arguments
# forecast filenames partial name
if __UMtype__ == 'global':
# pass user passed long forecast global model infiles otherwise pass proper infiles.
fcst_fnames = UMInLongFcstFiles if UMInLongFcstFiles else ['umglaa_pb','umglaa_pd', 'umglaa_pe', 'umglaa_pf', 'umglaa_pi']
elif __UMtype__ == 'regional':
# pass user passed long forecast regional model infiles otherwise pass proper infiles.
fcst_fnames = UMInLongFcstFiles if UMInLongFcstFiles else ['umnsaa_pb','umnsaa_pd', 'umnsaa_pe', 'umnsaa_pf', 'umnsaa_pi']
# end of if __UMtype__ == 'global':
# get the current date in YYYYMMDD format
_tmpDir_ = tmpPath
_current_date_ = date
print "\n _current_date_ is %s" % _current_date_
logpath = os.path.join(_tmpDir_, _current_date_)
createDirWhileParallelRacing(logpath)
logfile = 'um2grb2_fcst_stdout_'+ _current_date_ +'_' + utc +'Z.log'
sys.stdout = myLog(os.path.join(logpath, logfile))
# start the timer now
_startT_ = time.time()
# set-up base folders
_inDataPath_ = os.path.join(inPath, _current_date_, utc)
if not os.path.exists(_inDataPath_):
raise ValueError("In datapath does not exists %s" % _inDataPath_)
# end of if not os.path.exists(_inDataPath_):
if convertVars:
# check either depedendant vars are need to be loaded
for var, dvars in _depedendantVars_.iteritems():
if var in convertVars:
for dvar in dvars:
if dvar not in convertVars:
_convertVars_.append(dvar) # include depedendant var
_removeVars_.append(dvar) # remove depedendant var at last
# end of for var, dvar in _depedendantVars_.iteritems():
# load only required file names to avoid unnneccessary computations
# by cross checking with user defined variables list.
for fpname in fcst_fnames[:]:
# loop through copy of fcst_fnames[:], because fcst_fnames list
# will may change within this loop.
hr = utc.zfill(3)
### if fileName has some extension, then do not add hr to it.
fileName = fpname + hr if not '.' in fpname else fpname
varNamesSTASH, _, _, _, _ = umfcs.getVarInOutFilesDetails(_inDataPath_, fileName, hr)
# check either user requires this file or not!
if not set(varNamesSTASH).intersection(convertVars):
# remove the fpname from fcst_fnames, because user didn't
# require variabels from this fpname file.
fcst_fnames.remove(fpname)
print "removed %s from list of files" % fpname
# end of if convertVars:
print "Final fpname list :", fcst_fnames
# check either infiles are exist or not!
status = _checkInFilesStatus(_inDataPath_, 'prg', fcst_fnames)
print "in status+++++++++++++++++++++++++++", status
if not status:
raise ValueError("In datapath does not contain the above valid infiles")
# end of if not instatus:
_opPath_ = os.path.join(outPath, _current_date_)
createDirWhileParallelRacing(_opPath_)
# define default global lat start, lon end points
slat, elat = (-90., 90.)
# define default global lon start, lon end points
slon, elon = (0., 360.)
# define user defined custom lat & lon start and end points
if latitude:
(slat, elat) = latitude
if slat > elat:
# just make sure while extracting south to north
slat, elat = elat, slat
# and reverse while saving into grib2 file.
_reverseLatitude_ = True
# end of if slat > elat:
_requiredLat_ = (slat, elat)
# end of if latitude:
if os.path.isfile(_targetGridFile_):
# load target grid from user specfied file and make it as target grid.
_targetGrid_ = iris.load(_targetGridFile_)[0]
_doRegrid_ = True
elif targetGridResolution is None:
_doRegrid_ = False
if longitude: (slon, elon) = longitude
# reduce one step if user passed / default lon is 360. If we write
# longitude from 0 upto 360, wgrib2 reads it as 0 to 0. To avoid it,
# just reduct one small step in longitude only incase of 360.
if int(elon) == 360: elon -= 0.0001
if longitude: _requiredLon_ = (slon, elon)
else:
if not isinstance(targetGridResolution, (int, float)):
raise ValueError("targetGridResolution must be either int or float")
if longitude: (slon, elon) = longitude
# reduce one step if user passed / default lon is 360. If we write
# longitude from 0 upto 360, wgrib2 reads it as 0 to 0. To avoid it,
# just reduct one step in longitude only incase of 360.
if int(elon) == 360: elon -= targetGridResolution
if longitude: _requiredLon_ = (slon, elon)
# target grid as 0.25 deg (default) resolution by setting up sample points
# based on coord
# generate lat, lon values
latpoints = numpy.arange(slat, elat+targetGridResolution, targetGridResolution)
lonpoints = numpy.arange(slon, elon+targetGridResolution, targetGridResolution)
# correct lat, lon end points
if latpoints[-1] > elat: latpoints = latpoints[:-1]
if lonpoints[-1] > elon: lonpoints = lonpoints[:-1]
# set target grid lat, lon values pair
_targetGrid_ = [('latitude', latpoints), ('longitude', lonpoints)]
_doRegrid_ = True
# end of iif os.path.isfile(_targetGridFile_):
# check either files are exists or not. delete the existing files in case
# of overwrite option is True, else return without re-converting files.
status = _checkOutFilesStatus(_opPath_, 'prg', _current_date_, utc, overwrite)
if status is 'FilesExist':
print "All files are already exists. So skipping convert Fcst files porcess"
return # return back without executing conversion process.
elif status in [None, 'FilesDoNotExist', 'FilesRemoved']:
print "Going to start convert Fcst files freshly"
# end of if status is 'FilesExists':
# do convert for forecast files
convertFilesInParallel(fcst_fnames, ftype='fcst')
time.sleep(30)
# make total time cummulated variables
for (TCV, TCVS, TCSVAR) in [('surface_net_downward_shortwave_flux', 'm01s01i202', 'ssr'),
('surface_net_downward_longwave_flux', 'm01s02i201', 'str'),
('surface_upward_latent_heat_flux', 'm01s03i234', 'slhf'),
('surface_upward_sensible_heat_flux', 'm01s03i217', 'sshf'),
('toa_outgoing_longwave_flux', 'm01s02i205', 'ttr'),
('precipitation_amount', 'm01s05i226', 'tp')]:
if (TCV, TCVS) in convertVars: makeTotalCummulativeVars((TCSVAR, TCV, 'fc', '000'))
# end of for (TCV, TCVS, TCSVAR) ...:
if callBackScript:
time.sleep(30) # required few seconds sleep before further process starts
callBackScript = os.path.abspath(callBackScript)
if not os.path.exists(callBackScript):
print "callBackScript '%s' doenst exist" % callBackScript
return
kwargs = ' --date=%s --outpath=%s --oftype=forecast --utc=%s' % (_current_date_, _opPath_, utc)
scriptExecuteCmd = callBackScript + ' ' + kwargs
# execute user defined call back script with keyword arguments
subprocess.call(scriptExecuteCmd, shell=True)
# end of if callBackScript:
# end of def convertFcstFiles(...):
def convertAnlFiles(inPath, outPath, tmpPath, **kwarg):
global _targetGrid_, _targetGridRes_, _current_date_, _startT_, _tmpDir_, \
_inDataPath_, _opPath_, _doRegrid_, _convertVars_, _requiredLat_, \
_requiredLon_, _createGrib2CtlIdxFiles_, _createGrib1CtlIdxFiles_, \
_convertGrib2FilestoGrib1Files_, __anlFileNameStructure__, \
__LPRINT__, __utc__, __outFileType__, __grib1FilesNameSuffix__, \
__removeGrib2FilesAfterGrib1FilesCreated__, _depedendantVars_, \
_removeVars_, __anl_step_hour__, _requiredPressureLevels_, \
__setGrib2TableParameters__, __anl_aavars_reference_time__, \
__anl_aavars_time_bounds__, _reverseLatitude_, __wgrib2Arguments__, \
__soilFirstSecondFixedSurfaceUnit__, _extraPolateMethod_, _targetGridFile_, \
__UMtype__, __fillFullyMaskedVars__
# load key word arguments
UMtype = kwarg.get('UMtype', 'global')
UMInAnlFiles = kwarg.get('UMInAnlFiles', None)
UMInShortFcstFiles = kwarg.get('UMInShortFcstFiles', None)
targetGridResolution = kwarg.get('targetGridResolution', 0.25)
targetGridFile = kwarg.get('targetGridFile', '')
date = kwarg.get('date', time.strftime('%Y%m%d'))
utc = kwarg.get('utc', '00')
overwrite = kwarg.get('overwrite', False)
lprint = kwarg.get('lprint', False)
convertVars = kwarg.get('convertVars', None)
latitude = kwarg.get('latitude', None)
longitude = kwarg.get('longitude', None)
pressureLevels = kwarg.get('pressureLevels', None)
fillFullyMaskedVars = kwarg.get('fillFullyMaskedVars', None)
extraPolateMethod = kwarg.get('extraPolateMethod', 'auto')
soilFirstSecondFixedSurfaceUnit = kwarg.get('soilFirstSecondFixedSurfaceUnit', 'cm')
anl_step_hour = kwarg.get('anl_step_hour', 6)
anl_aavars_reference_time = kwarg.get('anl_aavars_reference_time', 'shortforecast')
anl_aavars_time_bounds = kwarg.get('anl_aavars_time_bounds', True)
anlFileNameStructure = kwarg.get('anlFileNameStructure', None)
createGrib2CtlIdxFiles = kwarg.get('createGrib2CtlIdxFiles', True)
createGrib1CtlIdxFiles = kwarg.get('createGrib1CtlIdxFiles', False)
convertGrib2FilestoGrib1Files = kwarg.get('convertGrib2FilestoGrib1Files', False)
grib1FilesNameSuffix = kwarg.get('grib1FilesNameSuffix', '1')
removeGrib2FilesAfterGrib1FilesCreated = kwarg.get('removeGrib2FilesAfterGrib1FilesCreated', False)
setGrib2TableParameters = kwarg.get('setGrib2TableParameters', None)
wgrib2Arguments = kwarg.get('wgrib2Arguments', None)
callBackScript = kwarg.get('callBackScript', None)
# assign out file type in global variable
__outFileType__ = 'ana'
# assign the convert vars list of tuples to global variable
if convertVars: _convertVars_ = convertVars
# assign the analysis file name structure
if anlFileNameStructure: __anlFileNameStructure__ = anlFileNameStructure
# set print variables details options
__LPRINT__ = lprint
# update global variables
__UMtype__ = UMtype
__utc__ = utc
__anl_step_hour__ = anl_step_hour
__anl_aavars_reference_time__ = anl_aavars_reference_time
__anl_aavars_time_bounds__ = anl_aavars_time_bounds
__removeGrib2FilesAfterGrib1FilesCreated__ = removeGrib2FilesAfterGrib1FilesCreated
__grib1FilesNameSuffix__ = grib1FilesNameSuffix
_targetGridRes_ = str(targetGridResolution)
_targetGridFile_ = targetGridFile
_extraPolateMethod_ = extraPolateMethod
_requiredPressureLevels_ = pressureLevels
__fillFullyMaskedVars__ = fillFullyMaskedVars
__soilFirstSecondFixedSurfaceUnit__ = soilFirstSecondFixedSurfaceUnit
_createGrib2CtlIdxFiles_ = createGrib2CtlIdxFiles
_createGrib1CtlIdxFiles_ = createGrib1CtlIdxFiles
_convertGrib2FilestoGrib1Files_ = convertGrib2FilestoGrib1Files
__setGrib2TableParameters__ = setGrib2TableParameters
__wgrib2Arguments__ = wgrib2Arguments
# analysis filenames partial name
if __UMtype__ == 'global':
# pass user passed short forecast in files otherwise pass proper infiles.
anl_fnames = UMInShortFcstFiles if UMInShortFcstFiles else ['umglca_pb', 'umglca_pd', 'umglca_pe', 'umglca_pf', 'umglca_pi']
if utc == '00':
# pass user passed analysis in files valid for 00UTC otherwise pass proper infile.
anl_fnames = UMInAnlFiles + anl_fnames if UMInAnlFiles else anl_fnames.insert(0, 'qwqg00.pp0')
# end of if __UMtype__ == 'global':
# get the current date in YYYYMMDD format
_tmpDir_ = tmpPath
_current_date_ = date
print "\n _current_date_ is %s" % _current_date_
logpath = os.path.join(_tmpDir_, _current_date_)
createDirWhileParallelRacing(logpath)
logfile = 'um2grb2_anal_stdout_'+ _current_date_ +'_' + utc +'Z.log'
sys.stdout = myLog(os.path.join(logpath, logfile))
# start the timer now
_startT_ = time.time()
# set-up base folders
_inDataPath_ = os.path.join(inPath, _current_date_, utc)
if not os.path.exists(_inDataPath_):
raise ValueError("In datapath does not exists %s" % _inDataPath_)
# end of if not os.path.exists(_inDataPath_):
if convertVars:
# check either depedendant vars are need to be loaded
for var, dvars in _depedendantVars_.iteritems():
if var in convertVars:
for dvar in dvars:
if dvar not in convertVars:
_convertVars_.append(dvar) # include depedendant var
_removeVars_.append(dvar) # remove depedendant var at last
# end of for var, dvar in _depedendantVars_.iteritems():
# load only required file names to avoid unnneccessary computations
# by cross checking with user defined variables list.
for fpname in anl_fnames[:]:
# loop through copy of fcst_fnames[:], because fcst_fnames list
# will may change within this loop.
hr = utc.zfill(3)
### if fileName has some extension, then do not add hr to it.
fileName = fpname + hr if not '.' in fpname else fpname
varNamesSTASH, _, _, _, _ = umfcs.getVarInOutFilesDetails(_inDataPath_, fileName, hr)
# check either user requires this file or not!
if not set(varNamesSTASH).intersection(convertVars):
# remove the fpname from fcst_fnames, because user didn't
# require variabels from this fpname file.
anl_fnames.remove(fpname)
print "removed %s from list of files" % fpname
# end of if convertVars:
print "Final fpname list :", anl_fnames
# check either infiles are exist or not!
status = _checkInFilesStatus(_inDataPath_, 'ana', anl_fnames)
if not status:
raise ValueError("In datapath does not contain the above valid infiles")
# end of if not instatus:
_opPath_ = os.path.join(outPath, _current_date_)
createDirWhileParallelRacing(_opPath_)
# define default global lat start, lon end points
slat, elat = (-90., 90.)
# define default global lon start, lon end points
slon, elon = (0., 360.)
# define user defined custom lat & lon start and end points
if latitude:
(slat, elat) = latitude
if slat > elat:
# just make sure while extracting south to north
slat, elat = elat, slat
# and reverse while saving into grib2 file.
_reverseLatitude_ = True
# end of if slat > elat:
_requiredLat_ = (slat, elat)
# end of if latitude:
if os.path.isfile(_targetGridFile_):
# load target grid from user specfied file and make it as target grid.
_targetGrid_ = iris.load(_targetGridFile_)[0]
_doRegrid_ = True
elif targetGridResolution is None:
_doRegrid_ = False
if longitude: (slon, elon) = longitude
# reduce one step if user passed / default lon is 360. If we write
# longitude from 0 upto 360, wgrib2 reads it as 0 to 0. To avoid it,
# just reduct one small step in longitude only incase of 360.
if int(elon) == 360: elon -= 0.0001
if longitude: _requiredLon_ = (slon, elon)
else:
if not isinstance(targetGridResolution, (int, float)):
raise ValueError("targetGridResolution must be either int or float")
if longitude: (slon, elon) = longitude
# reduce one step if user passed / default lon is 360. If we write
# longitude from 0 upto 360, wgrib2 reads it as 0 to 0. To avoid it,
# just reduct one step in longitude only incase of 360.
if int(elon) == 360: elon -= targetGridResolution
if longitude: _requiredLon_ = (slon, elon)
# target grid as 0.25 deg (default) resolution by setting up sample points
# based on coord
# generate lat, lon values
latpoints = numpy.arange(slat, elat+targetGridResolution, targetGridResolution)
lonpoints = numpy.arange(slon, elon+targetGridResolution, targetGridResolution)
# correct lat, lon end points
if latpoints[-1] > elat: latpoints = latpoints[:-1]
if lonpoints[-1] > elon: lonpoints = lonpoints[:-1]
# set target grid lat, lon values pair
_targetGrid_ = [('latitude', latpoints), ('longitude', lonpoints)]
_doRegrid_ = True
# end of if os.path.isfile(_targetGridFile_):
print "_reverseLatitude_ =", _reverseLatitude_
# check either files are exists or not. delete the existing files in case
# of overwrite option is True, else return without re-converting files.
status = _checkOutFilesStatus(_opPath_, 'ana', _current_date_, utc, overwrite)
if status is 'FilesExist':
print "All files are already exists. So skipping convert Anl files porcess"
return # return back without executing conversion process.
elif status in [None, 'FilesDoNotExist', 'FilesRemoved']:
print "Going to start convert Anl files freshly"
# end of if status is 'FilesExists':
# do convert for analysis files
convertFilesInParallel(anl_fnames, ftype='anl')
if callBackScript:
time.sleep(30) # required few seconds sleep before further process starts
callBackScript = os.path.abspath(callBackScript)
if not os.path.exists(callBackScript):
print "callBackScript '%s' doenst exist" % callBackScript
return
kwargs = ' --date=%s --outpath=%s --oftype=analysis --utc=%s' % (_current_date_, _opPath_, utc)
scriptExecuteCmd = callBackScript + ' ' + kwargs
# execute user defined call back script with keyword arguments
subprocess.call(scriptExecuteCmd, shell=True)
# end of if callBackScript:
# end of def convertAnlFiles(...):
#################################### EPS STUFF ###############################
def packEnsembles(arg, **kwarg):
global _targetGrid_, _targetGridRes_, _startT_, _inDataPath_, _opPath_, \
_preExtension_, _ncfilesVars_, _requiredLat_, _requiredLon_, \
_doRegrid_, __utc__, _requiredPressureLevels_, __LPRINT__, \
__outg2files__, _lock_, _accumulationVars_, __fcst_step_hour__, \
_targetGridFile_, _extraPolateMethod_, _current_date_, \
_reverseLatitude_, _precipVars_, _maskOverOceanVars_, __end_long_fcst_hour__
infiles, varNamesSTASHFcstHour = arg
varName, varSTASH, fhr = varNamesSTASHFcstHour
# update function for tweaking grib messages
if (varName, varSTASH) in _accumulationVars_:
# update the forecast hour, since precipitation_amount is accumulated
# var, not instantaneous one.
fhr = fhr-3 if fhr else fhr+3
# end of if (varName, varSTASH) in [('precipitation_amount', 'm01s05i226')]:
simulated_hr = __utc__
# define variable name constraint
varConstraint = iris.Constraint(name=varName)
# define varibale stash code constraint
STASHConstraint = iris.AttributeConstraint(STASH=varSTASH)
#
forecast_period_constraint = iris.Constraint(forecast_period=fhr)
# define (simulated_hr) forecast_reference_time constraint
fcstRefTimeConstraint = iris.Constraint(forecast_reference_time=PartialDateTime(hour=simulated_hr))
# Define default lat, lon, pressure contraint (None just bring model global data)
latConstraint, lonConstraint, pressureConstraint = None, None, None
if _requiredLat_:
# make constraint of required latitude
latConstraint = iris.Constraint(latitude=lambda cell:
_requiredLat_[0] <= cell <= _requiredLat_[-1])
if _requiredLon_:
# make constraint of required longitude
lonConstraint = iris.Constraint(longitude=lambda cell:
_requiredLon_[0] <= cell <= _requiredLon_[-1])
if _requiredPressureLevels_:
# make constraint of required pressure
# To slice out particular pressure levels (like 850, 200, 1000 alone)
# then the following way is essential.
pressureConstraint = iris.Constraint(pressure=lambda cell:
int(cell.point) in _requiredPressureLevels_)
# make load constraints together
loadConstraints = varConstraint & STASHConstraint & forecast_period_constraint & latConstraint & lonConstraint
# initialize
ensembleData, ensCube, dshape = None, None, None
print "packEnsembles Started using", infiles
for idx, infile in enumerate(infiles):
print "extracting ensemble data", infile
# load ensemble cube with all constraints
ensCube = getCubeData(infile, constraints=loadConstraints)
if not ensCube: raise ValueError("unable to extract variable %s %s %d from %s" % (varName, varSTASH, fhr, infile))
# Got variable successfully!
ensCube = ensCube[0]
# extract pressure levels
if pressureConstraint and ensCube.coords('pressure'):
if (varName, varSTASH) == ('geopotential_height', 'm01s16i202'):
# extract 50 hPa only to gh variable for TIGGE
pressureC = iris.Constraint(pressure=lambda cell:
int(cell.point) in _requiredPressureLevels_+[50])
ensCube = ensCube.extract(pressureC)
else:
ensCube = ensCube.extract(pressureConstraint)
# ene of if pressureConstraint and tmpCube.coords('pressure'):
if ensCube.has_lazy_data():
print "Loaded", ensCube.standard_name, "into memory",
## By accessing tmpCube.data (even for printing), the full
## data has been loaded into memory instead of being lazy
## data. Especially for dust aod, we must make it as fully
## loaded otherwise full data will be treated as zeros only
## instead of 6 pseudo_level data.
print "- min", ensCube.data.min(), "max", ensCube.data.max(),
print "has_lazy_data =", ensCube.has_lazy_data()
# end of if ensCube.has_lazy_data():
exmode = None # required, when user didnt do any regrid
# interpolate it as per targetGridResolution deg resolution by
# setting up sample points based on coord
if _doRegrid_:
if __LPRINT__: print "From shape", ensCube.shape
if (varName, varSTASH) in _precipVars_:
# DO NOT APPLY iris.analysis.Linear(extrapolation_mode='mask'),
# which writes nan every where for the snowfall_flux,
# rainfall_flux, precipitation_flux. So donot apply that.
exmode = 'linear'
else:
# In general all the other variables should not be
# extrapolated over masked grid points.
exmode = 'mask'
# end of if (...):
# However, if user specified custom method do that!
exmode = _extraPolateMethod_ if _extraPolateMethod_ != 'auto' else exmode
# but make sure that soil variables (or whichever variables do not have values over ocean)
# do not extrapolate over ocean/masked regions. Otherwise, it will write only nan.
exmode = 'mask' if varName in _maskOverOceanVars_ else exmode
if os.path.isfile(_targetGridFile_):
print "\n Regridding data to %s degree spatial resolution based on file %s\n" % (_targetGrid_.shape, _targetGridFile_)
# Do regrid based on user specfied target grid file.
scheme = iris.analysis.Linear(extrapolation_mode=exmode)
regdCube = ensCube.regrid(_targetGrid_, scheme)
print "regrid data shape", regdCube.shape
else:
# Do regrid based on user specfied target grid resolution number.
print "\n Regridding data to %sx%s degree spatial resolution \n" % (_targetGridRes_, _targetGridRes_)
try:
# This lienar interpolate will do extra polate over ocean even
# though original data doesnt have values over ocean and wise versa.
# So lets be aware of this.
regdCube = ensCube.interpolate(_targetGrid_, iris.analysis.Linear(extrapolation_mode=exmode))
except Exception as e:
print "ALERT !!! Error while regridding!! %s" % str(e)
print " So skipping this without saving data"
continue
# end of try:
else:
# do not apply regrid. this is temporary fix.
regdCube = ensCube
# end of if _doRegrid_:
if _reverseLatitude_:
# Need to reverse latitude from SN to NS
rcsh = len(regdCube.data.shape)
if rcsh == 3:
regdCube.data = regdCube.data[:,::-1,:]
elif rcsh == 2:
regdCube.data = regdCube.data[::-1,:]
lat = regdCube.coords('latitude')[0]
lat.points = lat.points[::-1]
# end of if _reverseLatitude_:
unit = regdCube.units
if varName.endswith('_flux'):
# applicable only to TIGGE
# converting flux unit from time average into time integrated
# by multiplying 60*60*6 = 21600 seconds in 6-hour
regdCube.data *= 21600.0
unit = Unit('W m-2 s') # changed unit from (W m-2) into (W m-2 s)
print "Multiplied data with 60*60*6 seconds to make flux variable into time-intergrated"
print regdCube.data.min(), regdCube.data.max()
# end of if varName.endswith('_flux'):
if (varName, varSTASH) == ('toa_outgoing_longwave_flux', 'm01s02i205'):
# https://software.ecmwf.int/wiki/display/TIGGE/Rules+for+data+encoding+and+exchange
# as per the tigge statndard, The flux sign convention will be positive downwards.
# So, here toa-olr shoule be negative, but ncum model just gives the magnitude. So
# lets fix this by ourself.
if regdCube.data.max() > 0 and regdCube.data.min() > 0:
# convert to negative if only this data sign is positive.
regdCube.data *= -1 # multiply with -1 to indicate this is upward flux.
# end of if (varName, varSTASH) == ('toa_outgoing_longwave_flux', 'm01s02i205'):
if (varName, varSTASH) in _precipVars_:
# Since we are not using 'mask' option for extrapolate while
# doing linear regrid, which bring -ve values after regrid in
# extrapolated grids. So lets make it as 0 as minimum value.
regdCube.data[regdCube.data < 0.0] = 0.0
# end of if (varName, varSTASH) in _precipVars_:
if (varName, varSTASH) in [('land_binary_mask', 'm01s00i030')]:
regdCube.data[regdCube.data > 0] = 1
# trying to keep values either 0 or 1. Not fraction!
regdCube.data = numpy.ma.array(regdCube.data, dtype=numpy.int)
# end of if (varName, varSTASH) in [('land_binary_mask', 'm01s00i030')]:
if (varName, varSTASH) in [('surface_altitude', 'm01s00i033')]:
regdCube.standard_name = None
regdCube.long_name = 'orography'
# end of if (varName, varSTASH) in [('surface_altitude', 'm01s00i033')]:
if exmode == 'mask':
# For the above set of variables we shouldnot convert into
# masked array. Otherwise its full data goes as nan.
# convert data into masked array
regdCube.data = numpy.ma.masked_array(regdCube.data,
dtype=numpy.float64, fill_value=9.999e+20)
if (varName, varSTASH) in [('moisture_content_of_soil_layer', 'm01s08i223'),
('sea_ice_area_fraction', 'm01s00i031'),
('sea_ice_thickness', 'm01s00i032'),]:
# We should assign 0 instead 1e-15 only for this var!
regdCube.data[regdCube.data <= 1e-15] = 0.0
elif (varName, varSTASH) == ('soil_temperature', 'm01s03i238'):
# We should assign min instead 1e-15 only for this var!
# because 0 will not make sense when temperature unit is Kelvin
nmin = numpy.ma.masked_less_equal(regdCube.data, 1e-15).min()
regdCube.data[regdCube.data <= 1e-15] = nmin
# http://www.cpc.ncep.noaa.gov/products/wesley/g2grb.html
# Says that 9.999e+20 value indicates as missingValue in grib2
# by default g2ctl.pl generate "undefr 9.999e+20", so we must
# keep the fill_value / missingValue as 9.999e+20 only.
numpy.ma.set_fill_value(regdCube.data, 9.999e+20)
# end of if exmode == 'mask':
if (varName, varSTASH) == ('soil_temperature', 'm01s03i238'):
# convert to 20cm layer as per TIGGE
regdCube = convertSoilVarto20cm(regdCube)
# end of if (varName, varSTASH) == ('soil_temperature', 'm01s03i238'):
if __fillFullyMaskedVars__ is not None and isinstance(regdCube.data, numpy.ma.masked_array):
# yes, it is ma array
if regdCube.data.mask.all():
# Now data is fully masked. So lets fill with user passed value.
# And still create ma array
regdCube.data = regdCube.data.filled(__fillFullyMaskedVars__)
print "filled masked vars", regdCube.data
regdCube.data = numpy.ma.masked_array(regdCube.data.filled(__fillFullyMaskedVars__),
fill_value=9.999e+20)
elif regdCube.data.min() == regdCube.data.max():
# Both min and max are same value. But its mask is not fully True.
# So previous condition not executed, anyhow lets set
# fully the value of fillFullyMaskedVars.
print "Both min and max are same. So lets fillFullyMaskedVars as", __fillFullyMaskedVars__
regdCube.data = numpy.ma.masked_array(regdCube.data.filled(__fillFullyMaskedVars__),
fill_value=9.999e+20)
# end of if __fillFullyMaskedVars__ and ...:
print "regrid done"
# introduce ensemble dimension at first axis
dshape = list(regdCube.data.shape)
dshape.insert(0, 1)
ensembleData = regdCube.data.reshape(dshape)
print "taken into memory of all ensembles", ensembleData.shape
# convert data into masked array
ensembleData = numpy.ma.masked_array(ensembleData, dtype=numpy.float64)
if (varName, varSTASH) in [('precipitation_amount', 'm01s05i226'),]:
# precipitation should not go less than 0.
ensembleData.data[ensembleData.data < 0] = 0.0
# end of if ...:
# http://www.cpc.ncep.noaa.gov/products/wesley/g2grb.html
# Says that 9.999e+20 value indicates as missingValue in grib2
# by default g2ctl.pl generate "undefr 9.999e+20", so we must
# keep the fill_value / missingValue as 9.999e+20 only.
numpy.ma.set_fill_value(ensembleData, 9.999e+20)
totEns = len(ensembleData)
member = int(infile.split('/')[-1].split('_')[0]) # get member number
# create ensemble coordinate
enscoord = iris.coords.DimCoord(numpy.array(member, dtype=numpy.int32),
standard_name='realization', units=Unit('no_unit'),
long_name='ensemble_member')
# get list of dimension coordinates
dim_coords = list(regdCube.dim_coords)
# insert ensemble dimension at first axis
dim_coords.insert(0, enscoord)
# generate list of tuples contain index and coordinate
dim_coords = [(coord, i) for i,coord in enumerate(dim_coords)]
# set all other dimensions
ensembleData = iris.cube.Cube(ensembleData, regdCube.standard_name,
regdCube.long_name, regdCube.var_name,
unit, regdCube.attributes,
regdCube.cell_methods, dim_coords)
# add all time coordinates
for axc in regdCube.aux_coords: ensembleData.add_aux_coord(axc)
print "setting aux_coords to", ensembleData.shape, varName, fhr
# create cell method for ensembles
cm = iris.coords.CellMethod('realization', ('realization',),
intervals=('1',), comments=(' ENS',))
# add cell_methods to the ensembleData
if regdCube.cell_methods:
if (varName, varSTASH) in _accumulationVars_:
# The following variables cell_methods should show accumulated/sum, but
# UM pp code doesnt support for accumulation. So lets fix it here !
cm1 = iris.coords.CellMethod('sum', ('time',),
intervals=('1 hour',), comments=('6 hour accumulation',))
ensembleData.cell_methods = (cm, cm1)
else:
ensembleData.cell_methods = (cm, regdCube.cell_methods[0])
else:
ensembleData.cell_methods = (cm,)
print ensembleData
# make memory free
del regdCube
print "To ensembleData shape", ensembleData.shape
print ensembleData
try:
save_tigge_tweaked_messages([ensembleData])
except Exception as e:
print "ALERT !!! Error while saving!! %s" % str(e)
print " So skipping this without saving data"
continue
# end of try:
print "saved!"
print ensembleData.standard_name, ensembleData.data.min(), ensembleData.data.max()
print ensembleData
# make memory free
del ensembleData
# end of for idx, infile in enumerate(infiles):
# end of def packEnsembles(arg):
def packEnsemblesInParallel(arg):
global _startT_, _inDataPath_, __fcst_step_hour__, __LPRINT__, \
_opPath_, _ensemble_count_, __outg2files__, __start_long_fcst_hour__, \
_current_date_, _ensemble_member_
fpname, hr = arg
step_fcst_hour = __fcst_step_hour__
ensembleFiles_allConstraints_list = []
fexthr = hr if int(hr) else '000'
if _ensemble_member_ is not None:
# generate file name for particular ensemble_member
ensembleFiles = [os.path.join(_inDataPath_, str(_ensemble_member_).zfill(3)+'_'+fpname+fexthr)]
else:
# generate files name for all ensemble memebers from 0
ensembleFiles = [os.path.join(_inDataPath_, str(ens).zfill(3)+'_'+fpname+fexthr)
for ens in range(0, _ensemble_count_+1, 1)]
fileName = '000_' + fpname + '000' # sample file to get the variabels name.
fname = os.path.join(_inDataPath_, fileName)
# get variable indices
varNamesSTASH, fcstHours, doMultiHourlyMean, infile = umeps.getVarInOutFilesDetails(_inDataPath_,
fileName, hr)
for fname in ensembleFiles:
if not os.path.isfile(fname):
print "The file doesn't exists: %s.. \n" %fname
return
# end of if not os.path.isfile(fname):
# end of for fname in ensembleFiles:
if _convertVars_:
# load only needed variables from this file as well sort as per user specified ordered vars!
varNamesSTASH = [vns for vns in _convertVars_ if vns in varNamesSTASH]
if not varNamesSTASH:
print "No varibale selected to load from the file '%s' " % fname
if __LPRINT__:
print "Because global variable _convertVars_ doesn't contain any one of the following variables"
print "\n".join([str(i+1)+' : ' + str(tu) for i, tu in enumerate(varNamesSTASH)])
return None
else:
print "The following variables are going to be converted from file ", fname
print "\n".join([str(i+1)+' : ' + str(tu) for i, tu in enumerate(varNamesSTASH)])
# end of if not varNamesSTASH:
for varName, varSTASH in varNamesSTASH:
for fhr in fcstHours:
allConstraints = [varName, varSTASH, fhr]
ensembleFiles_allConstraints_list.append((ensembleFiles, allConstraints))
# end of for varName, varSTASH in varNamesSTASH:
print "Started Processing the file: \n"
## get the no of childs process to create fcst ensemble files
nchild = len(ensembleFiles_allConstraints_list)
maxprocess = mp.cpu_count()
if nchild > maxprocess: nchild = maxprocess
# create the no of child parallel processes
inner_pool = mp.Pool(processes=nchild)
print "Creating %i (daemon) workers and jobs in child." % nchild
print "parallel ensemble begins for", varName, varSTASH
# pass the (ensemblefileslist, allConstraints, pressureConstraint) as
# argument to take one fcst ensemble file per process / core to regrid it.
results = inner_pool.map(packEnsembles, ensembleFiles_allConstraints_list)
# closing and joining child pools
inner_pool.close()
inner_pool.join()
# parallel end
# end of if __fcst_step_hour__ == 6:
print "Time taken to convert the file: %8.5f seconds \n" %(time.time()-_startT_)
print "Finished converting file: %s into grib2 format for fcst file: %s \n" %(fpname, hr)
# end of def packEnsemblesInParallel(arg):
# Start the convertFilesInParallel function
def convertEPSFilesInParallel(fnames, ftype):
"""
convertFilesInParallel function calling all the sub-functions
:param fnames: a simple filename as argument in a string format
"""
global _startT_, _tmpDir_, _opPath_, __end_long_fcst_hour__,\
__fcst_step_hour__, _createGrib2CtlIdxFiles_, \
__start_long_fcst_hour__, _current_date_
fcst_times = [str(hr).zfill(3) for hr in range(__start_long_fcst_hour__, __end_long_fcst_hour__, 6)]
for hr in fcst_times:
fcst_filenames = [(fname, hr) for fname in fnames]
## get the no of files and
fcst_filenames_len = len(fcst_filenames)
nprocesses = fcst_filenames_len
maxprocess = mp.cpu_count()
# lets create no of parallel process w.r.t no of files.
if maxprocess > 16: maxprocess = 16
if nprocesses > maxprocess: nprocesses = maxprocess
# parallel begin - 1
pool = _MyPool(nprocesses)
print "Creating %d (non-daemon) workers and jobs in convertFilesInParallel process." % nprocesses
if ftype in ['fcst', 'forecast']:
results = pool.map(packEnsemblesInParallel, fcst_filenames)
else:
raise ValueError("Unknown file type !")
# end of if ftype in ['fcst', 'forecast']:
# closing and joining master pools
pool.close()
pool.join()
# parallel end - 1
print "Total time taken to convert %d files was: %8.5f seconds \n" %(len(fcst_filenames),(time.time()-_startT_))
# fcst_times = [str(hr).zfill(3) for hr in range(__start_long_fcst_hour__, __end_long_fcst_hour__, 6)]
# fcst_filenames = [(fname, hr) for fname in fnames for hr in fcst_times]
# ## get the no of files and
# fcst_filenames_len = len(fcst_filenames)
# nprocesses = fcst_filenames_len
# maxprocess = mp.cpu_count()
# if nprocesses > maxprocess: nprocesses = maxprocess
# # lets create no of parallel process w.r.t no of files.
# if maxprocess > 16: maxprocess = 16
# if fcst_filenames_len > 16: maxprocess -= 1
# den, rem = fcst_filenames_len/maxprocess, fcst_filenames_len%maxprocess
# if rem: den += 1
# prange = range(0, maxprocess*den+1, maxprocess)
# hours_set = [(prange[i], prange[i+1]) for i in range(len(prange)-1)]
# for st_idx, en_idx in hours_set:
# print "start_idx, end_idx", st_idx, en_idx
# # parallel begin - 1
# pool = _MyPool(nprocesses)
# print "Creating %d (non-daemon) workers and jobs in convertFilesInParallel process." % nprocesses
# if ftype in ['fcst', 'forecast']:
# results = pool.map(packEnsemblesInParallel, fcst_filenames[st_idx:en_idx])
# else:
# raise ValueError("Unknown file type !")
# # end of if ftype in ['fcst', 'forecast']:
# # closing and joining master pools
# pool.close()
# pool.join()
# # parallel end - 1
# print "Total time taken to convert %d files was: %8.5f seconds \n" %(len(fcst_filenames),(time.time()-_startT_))
return
# end of def convertEPSFilesInParallel(fnames):
def convertEPSFcstFiles(inPath, outPath, tmpPath, **kwarg):
global _targetGrid_, _targetGridRes_, _current_date_, _startT_, _tmpDir_, \
_inDataPath_, _opPath_, _doRegrid_, _convertVars_, _requiredLat_, \
_requiredLon_, _createGrib2CtlIdxFiles_, _createGrib1CtlIdxFiles_, \
_convertGrib2FilestoGrib1Files_, __fcstFileNameStructure__, \
__LPRINT__, __utc__, __fcst_step_hour__, __start_long_fcst_hour__, \
__end_long_fcst_hour__, __outFileType__, __grib1FilesNameSuffix__, \
__removeGrib2FilesAfterGrib1FilesCreated__, _depedendantVars_, \
_removeVars_, _requiredPressureLevels_, __setGrib2TableParameters__, \
__outg2files__, __start_long_fcst_hour__, __wgrib2Arguments__, \
__UMtype__, _preExtension_, _extraPolateMethod_, _targetGridFile_, \
__fillFullyMaskedVars__, _reverseLatitude_, epsMeanVars, _ensemble_member_
# load key word arguments
UMtype = kwarg.get('UMtype', 'ensemble')
targetGridResolution = kwarg.get('targetGridResolution', None)
targetGridFile = kwarg.get('targetGridFile', '')
date = kwarg.get('date', time.strftime('%Y%m%d'))
utc = kwarg.get('utc', '00')
overwrite = kwarg.get('overwrite', False)
lprint = kwarg.get('lprint', False)
convertVars = kwarg.get('convertVars', None)
convertVarIdx = kwarg.get('convertVarIdx', None)
ensemble_member = kwarg.get('ensemble_member', None)
latitude = kwarg.get('latitude', None)
longitude = kwarg.get('longitude', None)
pressureLevels = kwarg.get('pressureLevels', None)
fillFullyMaskedVars = kwarg.get('fillFullyMaskedVars', None)
extraPolateMethod = kwarg.get('extraPolateMethod', 'auto')
fcst_step_hour = kwarg.get('fcst_step_hour', 6)
start_long_fcst_hour = kwarg.get('start_long_fcst_hour', 6)
end_long_fcst_hour = kwarg.get('end_long_fcst_hour', 240)
fcstFileNameStructure = kwarg.get('fcstFileNameStructure', None)
createGrib2CtlIdxFiles = kwarg.get('createGrib2CtlIdxFiles', True)
createGrib1CtlIdxFiles = kwarg.get('createGrib1CtlIdxFiles', False)
convertGrib2FilestoGrib1Files = kwarg.get('convertGrib2FilestoGrib1Files', False)
grib1FilesNameSuffix = kwarg.get('grib1FilesNameSuffix', '1')
removeGrib2FilesAfterGrib1FilesCreated = kwarg.get('removeGrib2FilesAfterGrib1FilesCreated', False)
callBackScript = kwarg.get('callBackScript', None)
setGrib2TableParameters = kwarg.get('setGrib2TableParameters', None)
wgrib2Arguments = kwarg.get('wgrib2Arguments', None)
# assign out file type in global variable
__outFileType__ = 'fcst'
# set only needed convert variable by selecting appropriate index passed by user
if convertVarIdx and convertVars: convertVars = [convertVars[convertVarIdx-1]]
# assign the convert vars list of tuples to global variable
if convertVars: _convertVars_ = convertVars
if ensemble_member is not None: _ensemble_member_ = ensemble_member
# assign the analysis file name structure
if fcstFileNameStructure: __fcstFileNameStructure__ = fcstFileNameStructure
# set print variables details options
__LPRINT__ = lprint
# update global variables
__UMtype__ = UMtype
__utc__ = utc
__fcst_step_hour__ = fcst_step_hour
__start_long_fcst_hour__ = start_long_fcst_hour
__end_long_fcst_hour__ = end_long_fcst_hour
__removeGrib2FilesAfterGrib1FilesCreated__ = removeGrib2FilesAfterGrib1FilesCreated
__grib1FilesNameSuffix__ = grib1FilesNameSuffix
_targetGridRes_ = str(targetGridResolution)
_targetGridFile_ = targetGridFile
_requiredLat_ = latitude
_requiredLon_ = longitude
_requiredPressureLevels_ = pressureLevels
_extraPolateMethod_ = extraPolateMethod
__fillFullyMaskedVars__ = fillFullyMaskedVars
_createGrib2CtlIdxFiles_ = createGrib2CtlIdxFiles
_createGrib1CtlIdxFiles_ = createGrib1CtlIdxFiles
_convertGrib2FilestoGrib1Files_ = convertGrib2FilestoGrib1Files
__setGrib2TableParameters__ = setGrib2TableParameters
__wgrib2Arguments__ = wgrib2Arguments
# forecast filenames partial name
fcst_fnames = ['pd', 'pg']
# get the current date in YYYYMMDD format
_tmpDir_ = tmpPath
_current_date_ = date
print "\n _current_date_ is %s" % _current_date_
logpath = os.path.join(_tmpDir_, _current_date_)
createDirWhileParallelRacing(logpath)
logfile = 'um2grb2_fcst_stdout_'+ _current_date_ +'_' + utc +'Z.log'
sys.stdout = myLog(os.path.join(logpath, logfile))
# start the timer now
_startT_ = time.time()
# set-up base folders
_inDataPath_ = os.path.join(inPath, _current_date_)
if not os.path.exists(_inDataPath_):
raise ValueError("In datapath does not exists %s" % _inDataPath_)
# end of if not os.path.exists(_inDataPath_):
if convertVars:
# check either depedendant vars are need to be loaded
for var, dvars in _depedendantVars_.iteritems():
if var in convertVars:
for dvar in dvars:
if dvar not in convertVars:
_convertVars_.append(dvar) # include depedendant var
_removeVars_.append(dvar) # remove depedendant var at last
# end of for var, dvar in _depedendantVars_.iteritems():
for fcst_fname in fcst_fnames[:]:
# load only required file names to avoid unnneccessary computations
# by cross checking with user defined variables list.
hr = 0
## if fileName has some extension, then do not add hr to it.
fileName = '000_' + fcst_fname + '1'
varNamesSTASH, _, _, _ = umeps.getVarInOutFilesDetails(_inDataPath_, fileName, hr)
print "varNamesSTASH", varNamesSTASH
print "convertVars", convertVars
# check either user requires this file or not!
if not set(varNamesSTASH).intersection(convertVars):
# remove the ext from fcst_fname, because user didn't
# require variabels from this fcst_fnames file.
fcst_fnames.remove(fcst_fname)
print "removed %s from list of files" % fcst_fname
# end of for fcst_fname in fcst_fnames:
print "Final fname list :", fcst_fnames
# end of if convertVars:
for fcst_fname in fcst_fnames:
# check either infiles are exist or not!
status = umeps._checkInFilesStatus(_inDataPath_, 'prg', fcst_fname,
start_long_fcst_hour=__start_long_fcst_hour__,
end_long_fcst_hour=__end_long_fcst_hour__,
fcst_step_hour=__fcst_step_hour__,
ensemble_count=_ensemble_count_)
print "in status+++++++++++++++++++++++++++", status
if not status:
raise ValueError("In datapath does not contain the above valid infiles")
# end of if not instatus:
# end of for fcst_fname in fcst_fnames:
_opPath_ = os.path.join(outPath, _current_date_)
createDirWhileParallelRacing(_opPath_)
# define default global lat start, lon end points
slat, elat = (-90., 90.)
# define default global lon start, lon end points
slon, elon = (0., 360.)
# define user defined custom lat & lon start and end points
if latitude:
(slat, elat) = latitude
if slat > elat:
# just make sure while extracting south to north
slat, elat = elat, slat
# and reverse while saving into grib2 file.
_reverseLatitude_ = True
# end of if slat > elat:
_requiredLat_ = (slat, elat)
# end of if latitude:
if os.path.isfile(_targetGridFile_):
# load target grid from user specfied file and make it as target grid.
_targetGrid_ = iris.load(_targetGridFile_)[0]
_doRegrid_ = True
elif targetGridResolution is None:
_doRegrid_ = False
if longitude: (slon, elon) = longitude
# reduce one step if user passed / default lon is 360. If we write
# longitude from 0 upto 360, wgrib2 reads it as 0 to 0. To avoid it,
# just reduct one small step in longitude only incase of 360.
if int(elon) == 360: elon -= 0.0001
if longitude: _requiredLon_ = (slon, elon)
else:
if not isinstance(targetGridResolution, (int, float)):
raise ValueError("targetGridResolution must be either int or float")
if longitude: (slon, elon) = longitude
# reduce one step if user passed / default lon is 360. If we write
# longitude from 0 upto 360, wgrib2 reads it as 0 to 0. To avoid it,
# just reduct one step in longitude only incase of 360.
if int(elon) == 360: elon -= targetGridResolution
if longitude: _requiredLon_ = (slon, elon)
# target grid as 0.25 deg (default) resolution by setting up sample points
# based on coord
# generate lat, lon values
latpoints = numpy.arange(slat, elat+targetGridResolution, targetGridResolution)
lonpoints = numpy.arange(slon, elon+targetGridResolution, targetGridResolution)
# correct lat, lon end points
if latpoints[-1] > elat: latpoints = latpoints[:-1]
if lonpoints[-1] > elon: lonpoints = lonpoints[:-1]
# set target grid lat, lon values pair
_targetGrid_ = [('latitude', latpoints), ('longitude', lonpoints)]
_doRegrid_ = True
# end of iif os.path.isfile(_targetGridFile_):
# check either files are exists or not. delete the existing files in case
# of overwrite option is True, else return without re-converting files.
status = umeps._checkOutFilesStatus(_opPath_, 'prg', _current_date_, utc, overwrite)
if status is 'FilesExist':
print "All files are already exists. So skipping convert Fcst files porcess"
return # return back without executing conversion process.
elif status in [None, 'FilesDoNotExist', 'FilesRemoved']:
print "Going to start convert Fcst files freshly"
# end of if status is 'FilesExists':
# do convert for forecast files
convertEPSFilesInParallel(fcst_fnames, ftype='fcst')
time.sleep(30)
# make total time cummulated variables
for (TCV, TCVS, TCSVAR) in [('surface_net_downward_shortwave_flux', 'm01s01i202', 'ssr'),
('surface_net_downward_longwave_flux', 'm01s02i201', 'str'),
('surface_upward_latent_heat_flux', 'm01s03i234', 'slhf'),
('surface_upward_sensible_heat_flux', 'm01s03i217', 'sshf'),
('toa_outgoing_longwave_flux', 'm01s02i205', 'ttr'),
('precipitation_amount', 'm01s05i226', 'tp')]:
if (TCV, TCVS) not in convertVars: continue
if _ensemble_member_ is not None:
# cummulative precipitation_amount calculate just for single/particular member
etype = 'cf' if not int(_ensemble_member_) else 'pf'
cummulated_ens = [(TCSVAR, TCV, etype, str(_ensemble_member_))]
else:
# cummulative precipitation_amount calculate for control run and ensemble members in parallel
cummulated_ens = [(TCSVAR, TCV, 'pf', str(ensno)) for ensno in range(1, _ensemble_count_+1, 1)]
cummulated_ens.insert(0, (TCSVAR, TCV, 'cf', '000'))
# end of if _ensemble_member_ is not None:
## get the no of files and
nprocesses = len(cummulated_ens)
maxprocess = mp.cpu_count()
if nprocesses > maxprocess: nprocesses = maxprocess
# lets create no of parallel process w.r.t no of files.
# parallel begin - 1
pool = _MyPool(nprocesses)
print "Creating %d (non-daemon) workers and jobs in convertFilesInParallel process." % nprocesses
results = pool.map(makeTotalCummulativeVars, cummulated_ens)
# closing and joining master pools
pool.close()
pool.join()
# end of for (TCV, TCVS, TCSVAR) ...:
# pwd = os.getcwd()
# os.chdir(_opPath_) # change to our path
# if __fcst_step_hour__ == 6:
# outg2files = [inf for inf in os.listdir(_opPath_) if 'hr' in inf if _preExtension_ in inf]
# listOfInOutFiles = []
# for fname in outg2files:
# inFn = fname
# outFn = fname.replace(_preExtension_, '')
# listOfInOutFiles.append((inFn, outFn))
# # end of for fname in outg2files:
#
# ## get the no of childs process to create fcst ensemble files
# nchild = len(listOfInOutFiles)
# maxprocess = mp.cpu_count()
# if nchild > maxprocess: nchild = maxprocess
# # create the no of child parallel processes
# inner_pool = mp.Pool(processes=nchild)
# print "Creating %i (daemon) workers and jobs in child." % nchild
# # pass the (ensemblefileslist, allConstraints, pressureConstraint) as
# # argument to take one fcst ensemble file per process / core to regrid it.
# results = inner_pool.map(doWgrib2cmd, listOfInOutFiles)
# # closing and joining child pools
# inner_pool.close()
# inner_pool.join()
# # parallel end
#
# for (inFn, outFn) in listOfInOutFiles:
# print inFn, outFn
# # Lets create ctl and idx file.
# createGrib2CtlIdxFilesFn(outFn, ftype='fcst')
# # remove infile
# os.remove(inFn)
# # end of for inFn, outFn in listOfInOutFiles:
#
# elif __fcst_step_hour__ == 24:
# dy = 'day'+str(int(__start_long_fcst_hour__) / 24).zfill(2)
# outg2files = [inf for inf in os.listdir(_opPath_) if dy in inf if _preExtension_ in inf]
# fname = '_'.join(outg2files[0].split('_')[1:]) # remove STASH alone
# outFn = fname.replace(_preExtension_, '') # remove _preExtension_
#
# for varName, varSTASH in _convertVars_:
# # make unique file name becase we are running in parallel
# if varName == 'air_temperature_maximum':
# inFn = [inf for inf in outg2files if inf.startswith(varSTASH+'-max')]
# elif varName == 'air_temperature_minimum':
# inFn = [inf for inf in outg2files if inf.startswith(varSTASH+'-min')]
# else:
# # Generic all other vars filter with simple varSTASH
# inFn = [inf for inf in outg2files if inf.startswith(varSTASH) if not '-' in inf]
# # end of if varName == 'air_temperature_maximum':
# if not inFn: continue
# inFn = inFn[0]
# if __wgrib2Arguments__ is not None:
# # execute post wgrib2 command in parellel (-ncpu 4 Best speed compare to 32)
# cmd = "%s %s %s %s" % (wgrib2, inFn, __wgrib2Arguments__, outFn)
# print "wgrib2 merge cmd", cmd
# subprocess.call(cmd, shell=True)
# else:
# cubes = iris.load_cubes(inFn)
# iris.fileformats.grib.save_messages(tweaked_messages(cubes),
# outFn, append=True) # save grib2 file
# # end of if __wgrib2Arguments__:
# time.sleep(15)
# if (varName, varSTASH) not in epsMeanVars: os.remove(inFn)
# ## epsMeanVars will be created through callback script. For that
# ## purpose we should not delete those files, because
# ## it requires to create EPS MEAN VSDB INPUT. We have to load
# ## this file only in Python-IRIS. Because IRIS able to read it
# ## properly only for the simple compression algorithm not for the
# ## complex2 (wgrib2) algorithm. IRIS read the values wrongly,
# ## if grib2 is written in complex2 algorithm. So... theses will
# ## be used to read it to create EPS mean and then will be deleted.
# ## Dated : 05-Aug-2016.
# # end of for varName, varSTASH in varNamesSTASH:
# time.sleep(15)
# # Lets create ctl and idx file.
# createGrib2CtlIdxFilesFn(outFn, ftype='fcst')
# # end of if __fcst_step_hour__ == 6:
# os.chdir(pwd) # Back to previous directory
#
# if callBackScript:
# callBackScript = os.path.abspath(callBackScript)
# if not os.path.exists(callBackScript):
# print "callBackScript '%s' doesn't exist" % callBackScript
# return
# kwargs = ' --date=%s --start_long_fcst_hour=%d --end_long_fcst_hour=%d --fcst_step_hour=%d' % (_current_date_, start_long_fcst_hour, end_long_fcst_hour, __fcst_step_hour__)
# scriptExecuteCmd = callBackScript + ' ' + kwargs
# # execute user defined call back script with keyword arguments
# subprocess.call(scriptExecuteCmd, shell=True)
# # end of if callBackScript:
## end of def convertFcstFiles(...):
#
# -- End code
|
arulalant/UMRider
|
g2utils/um2grb2tigge.py
|
Python
|
gpl-2.0
| 143,069
|
[
"NetCDF"
] |
395275c50d6f0083ce2d9a8fe5af00a1c1f57d324147cad386048fc66b7024ca
|
from __future__ import print_function
import os
import tempfile
import numpy as np
import fitsio
from astrometry.util.fits import fits_table, merge_tables
from astrometry.util.file import trymakedirs
from tractor.ellipses import EllipseESoft, EllipseE
from tractor.galaxy import ExpGalaxy
from tractor import PointSource, ParamList, ConstantFitsWcs
from legacypipe.utils import EllipseWithPriors, galaxy_min_re
import logging
logger = logging.getLogger('legacypipe.survey')
def info(*args):
from legacypipe.utils import log_info
log_info(logger, args)
def debug(*args):
from legacypipe.utils import log_debug
log_debug(logger, args)
# search order: $TMPDIR, $TEMP, $TMP, then /tmp, /var/tmp, /usr/tmp
tempdir = tempfile.gettempdir()
# The apertures we use in aperture photometry, in ARCSEC radius
apertures_arcsec = np.array([0.5, 0.75, 1., 1.5, 2., 3.5, 5., 7.])
# WISE apertures, in ARCSEC radius
wise_apertures_arcsec = np.array([3., 5., 7., 9., 11.])
# Ugly hack: for sphinx documentation, the astrometry and tractor (and
# other) packages are replaced by mock objects. But you can't
# subclass a mock object correctly, so we have to un-mock
# EllipseWithPriors here.
if 'Mock' in str(type(EllipseWithPriors)):
class duck(object):
pass
EllipseWithPriors = duck
def mjd_to_year(mjd):
# mjd_to_year(57205.875) -> 2015.5
from tractor.tractortime import TAITime
return (mjd - TAITime.mjd2k) / TAITime.daysperyear + 2000.
def tai_to_mjd(tai):
return tai / (24. * 3600.)
def radec_at_mjd(ra, dec, ref_year, pmra, pmdec, parallax, mjd):
'''
Units:
- matches Gaia DR1/DR2
- pmra,pmdec are in mas/yr. pmra is in angular speed (ie, has a cos(dec) factor)
- parallax is in mas.
NOTE: does not broadcast completely correctly -- all params
vectors or all motion params vector + scalar mjd work fine. Other
combos: not certain.
Returns RA,Dec
'''
from tractor.tractortime import TAITime
from astrometry.util.starutil_numpy import radectoxyz, arcsecperrad, axistilt, xyztoradec
dt = mjd_to_year(mjd) - ref_year
cosdec = np.cos(np.deg2rad(dec))
dec = dec + dt * pmdec / (3600. * 1000.)
ra = ra + (dt * pmra / (3600. * 1000.)) / cosdec
parallax = np.atleast_1d(parallax)
I = np.flatnonzero(parallax)
if len(I):
scalar = np.isscalar(ra) and np.isscalar(dec)
ra = np.atleast_1d(ra)
dec = np.atleast_1d(dec)
suntheta = 2.*np.pi * np.fmod((mjd - TAITime.equinox) / TAITime.daysperyear, 1.0)
# Finite differences on the unit sphere -- xyztoradec handles
# points that are not exactly on the surface of the sphere.
axis = np.deg2rad(axistilt)
scale = parallax[I] / 1000. / arcsecperrad
xyz = radectoxyz(ra[I], dec[I])
xyz[:,0] += scale * np.cos(suntheta)
xyz[:,1] += scale * np.sin(suntheta) * np.cos(axis)
xyz[:,2] += scale * np.sin(suntheta) * np.sin(axis)
r,d = xyztoradec(xyz)
ra [I] = r
dec[I] = d
# radectoxyz / xyztoradec do weird broadcasting
if scalar:
ra = ra[0]
dec = dec[0]
return ra,dec
# Gaia measures positions better than we will, we assume, so the
# GaiaPosition class pretends that it does not have any parameters
# that can be optimized; therefore they stay fixed.
class GaiaPosition(ParamList):
def __init__(self, ra, dec, ref_epoch, pmra, pmdec, parallax):
'''
Units:
- matches Gaia DR1
- pmra,pmdec are in mas/yr. pmra is in angular speed (ie, has a cos(dec) factor)
- parallax is in mas.
- ref_epoch: year (eg 2015.5)
'''
self.ra = ra
self.dec = dec
self.ref_epoch = float(ref_epoch)
self.pmra = pmra
self.pmdec = pmdec
self.parallax = parallax
super(GaiaPosition, self).__init__()
self.cached_positions = {}
def copy(self):
return GaiaPosition(self.ra, self.dec, self.ref_epoch, self.pmra, self.pmdec,
self.parallax)
def getPositionAtTime(self, mjd):
from tractor import RaDecPos
try:
return self.cached_positions[mjd]
except KeyError:
# not cached
pass
if self.pmra == 0. and self.pmdec == 0. and self.parallax == 0.:
pos = RaDecPos(self.ra, self.dec)
self.cached_positions[mjd] = pos
return pos
ra,dec = radec_at_mjd(self.ra, self.dec, self.ref_epoch,
self.pmra, self.pmdec, self.parallax, mjd)
pos = RaDecPos(ra, dec)
self.cached_positions[mjd] = pos
return pos
@staticmethod
def getName():
return 'GaiaPosition'
def __str__(self):
return ('%s: RA, Dec = (%.5f, %.5f), pm (%.1f, %.1f), parallax %.3f' %
(self.getName(), self.ra, self.dec, self.pmra, self.pmdec, self.parallax))
class GaiaSource(PointSource):
@staticmethod
def getName():
return 'GaiaSource'
def getSourceType(self):
return 'GaiaSource'
@classmethod
def from_catalog(cls, g, bands):
from tractor import NanoMaggies
# Gaia has NaN entries when no proper motion or parallax is measured.
# Convert to zeros.
def nantozero(x):
if not np.isfinite(x):
return 0.
return x
pos = GaiaPosition(g.ra, g.dec, g.ref_epoch,
nantozero(g.pmra),
nantozero(g.pmdec),
nantozero(g.parallax))
# initialize from decam_mag_B if available, otherwise Gaia G.
fluxes = {}
for band in bands:
try:
mag = g.get('decam_mag_%s' % band)
except KeyError:
mag = g.phot_g_mean_mag
fluxes[band] = NanoMaggies.magToNanomaggies(mag)
bright = NanoMaggies(order=bands, **fluxes)
src = cls(pos, bright)
src.forced_point_source = g.pointsource
src.reference_star = getattr(g, 'isgaia', False) or getattr(g, 'isbright', False)
return src
#
# We need a subclass of the standand WCS class to handle moving sources.
#
class LegacySurveyWcs(ConstantFitsWcs):
def __init__(self, wcs, tai):
super(LegacySurveyWcs, self).__init__(wcs)
self.tai = tai
def copy(self):
return LegacySurveyWcs(self.wcs, self.tai)
def positionToPixel(self, pos, src=None):
if isinstance(pos, GaiaPosition):
pos = pos.getPositionAtTime(tai_to_mjd(self.tai.getValue()))
return super(LegacySurveyWcs, self).positionToPixel(pos, src=src)
class LegacyEllipseWithPriors(EllipseWithPriors):
# Prior on (softened) ellipticity: Gaussian with this standard deviation
ellipticityStd = 0.25
from tractor.sersic import SersicIndex
class LegacySersicIndex(SersicIndex):
def __init__(self, val=0):
super(LegacySersicIndex, self).__init__(val=val)
self.lower = 0.5
self.upper = 6.0
self.maxstep = 0.25
class LogRadius(EllipseESoft):
''' Class used during fitting of the RexGalaxy type -- an ellipse
type where only the radius is variable, and is represented in log
space.'''
def __init__(self, *args, **kwargs):
super(LogRadius, self).__init__(*args, **kwargs)
self.lowers = [None]
# MAGIC -- 10" default max r_e!
# SEE ALSO utils.py : class(EllipseWithPriors)!
self.uppers = [np.log(10.)]
self.lowers = [np.log(galaxy_min_re)]
def isLegal(self):
return ((self.logre <= self.uppers[0]) and
(self.logre >= self.lowers[0]))
def setMaxLogRadius(self, rmax):
self.uppers[0] = rmax
def getMaxLogRadius(self):
return self.uppers[0]
@staticmethod
def getName():
return 'LogRadius'
@staticmethod
def getNamedParams():
# log r: log of effective radius in arcsec
return dict(logre=0)
def __repr__(self):
return 'log r_e=%g' % (self.logre)
@property
def theta(self):
return 0.
@property
def e(self):
return 0.
class RexGalaxy(ExpGalaxy):
'''This defines the 'REX' galaxy profile -- an exponential profile
that is round (zero ellipticity) with variable radius.
It is used to measure marginally-resolved galaxies.
The real action (what makes it a Rex) happens when it is constructed,
via, eg,
rex = RexGalaxy(position, brightness, LogRadius(0.))
(which happens in oneblob.py)
'''
def __init__(self, *args):
super(RexGalaxy, self).__init__(*args)
def getName(self):
return 'RexGalaxy'
class SimpleGalaxy(ExpGalaxy):
'''This defines the 'SIMP' galaxy profile -- an exponential profile
with a fixed shape of a 0.45 arcsec effective radius and spherical
shape. It is used to detect marginally-resolved galaxies.
'''
shape = EllipseE(0.45, 0., 0.)
def __init__(self, *args):
super(SimpleGalaxy, self).__init__(*args)
self.shape = SimpleGalaxy.shape
def __str__(self):
return (self.name + ' at ' + str(self.pos)
+ ' with ' + str(self.brightness))
def __repr__(self):
return (self.name + '(pos=' + repr(self.pos) +
', brightness=' + repr(self.brightness) + ')')
@staticmethod
def getNamedParams():
return dict(pos=0, brightness=1)
def getName(self):
return 'SimpleGalaxy'
### HACK -- for Galaxy.getParamDerivatives()
def isParamFrozen(self, pname):
if pname == 'shape':
return True
return super(SimpleGalaxy, self).isParamFrozen(pname)
class BrickDuck(object):
'''A little duck-typing class when running on a custom RA,Dec center
rather than a brick center.
'''
def __init__(self, ra, dec, brickname):
self.ra = ra
self.dec = dec
self.brickname = brickname
self.brickid = -1
def get_git_version(dirnm=None):
'''
Runs 'git describe' in the current directory (or given dir) and
returns the result as a string.
Parameters
----------
dirnm : string
If non-None, "cd" to the given directory before running 'git describe'
Returns
-------
Git version string
'''
from astrometry.util.run_command import run_command
cmd = ''
if dirnm is None:
# Get the git version of the legacypipe product
import legacypipe
dirnm = os.path.dirname(legacypipe.__file__)
cmd = "cd '%s' && git describe" % dirnm
rtn,version,err = run_command(cmd)
if rtn:
raise RuntimeError('Failed to get version string (%s): ' % cmd +
version + err)
version = version.strip()
return version
def get_version_header(program_name, survey_dir, release, git_version=None,
proctype='tile'):
'''
Creates a fitsio header describing a DECaLS data product.
'''
import datetime
import socket
if program_name is None:
import sys
program_name = sys.argv[0]
if git_version is None:
git_version = get_git_version()
hdr = fitsio.FITSHDR()
for s in [
'Data product of the DESI Imaging Legacy Surveys',
'Full documentation at http://legacysurvey.org',
]:
hdr.add_record(dict(name='COMMENT', value=s, comment=s))
hdr.add_record(dict(name='LEGPIPEV', value=git_version,
comment='legacypipe git version'))
hdr.add_record(dict(name='LSDIR', value=survey_dir,
comment='$LEGACY_SURVEY_DIR directory'))
hdr.add_record(dict(name='LSDR', value='DR9',
comment='Data release number'))
hdr.add_record(dict(name='RUNDATE', value=datetime.datetime.now().isoformat(),
comment='%s run time' % program_name))
hdr.add_record(dict(name='SURVEY', value='DECaLS+BASS+MzLS',
comment='The LegacySurveys'))
# Requested by NOAO
hdr.add_record(dict(name='SURVEYID', value='DECaLS BASS MzLS',
comment='Survey names'))
hdr.add_record(dict(name='DRVERSIO', value=release,
comment='LegacySurveys Data Release number'))
hdr.add_record(dict(name='OBSTYPE', value='object',
comment='Observation type'))
hdr.add_record(dict(name='PROCTYPE', value=proctype,
comment='Processing type'))
hdr.add_record(dict(name='NODENAME', value=socket.gethostname(),
comment='Machine where script was run'))
hdr.add_record(dict(name='HOSTNAME', value=os.environ.get('NERSC_HOST', 'none'),
comment='NERSC machine where script was run'))
hdr.add_record(dict(name='JOB_ID', value=os.environ.get('SLURM_JOB_ID', 'none'),
comment='SLURM job id'))
hdr.add_record(dict(name='ARRAY_ID', value=os.environ.get('ARRAY_TASK_ID', 'none'),
comment='SLURM job array id'))
return hdr
def get_dependency_versions(unwise_dir, unwise_tr_dir, unwise_modelsky_dir, galex_dir):
import astrometry
import astropy
import matplotlib
try:
import mkl_fft
except ImportError:
mkl_fft = None
import photutils
import tractor
import scipy
import unwise_psf
depvers = []
headers = []
default_ver = 'UNAVAILABLE'
for name,pkg in [('astrometry', astrometry),
('astropy', astropy),
('fitsio', fitsio),
('matplotlib', matplotlib),
('mkl_fft', mkl_fft),
('numpy', np),
('photutils', photutils),
('scipy', scipy),
('tractor', tractor),
('unwise_psf', unwise_psf),
]:
if pkg is None:
depvers.append((name, 'none'))
continue
try:
depvers.append((name, pkg.__version__))
except:
try:
depvers.append((name,
get_git_version(os.path.dirname(pkg.__file__))))
except:
pass
# Get additional paths from environment variables
dep = 'LARGEGALAXIES_CAT'
value = os.environ.get(dep, default_ver)
if value == default_ver:
print('Warning: failed to get version string for "%s"' % dep)
else:
depvers.append((dep, value))
if os.path.exists(value):
from legacypipe.reference import get_large_galaxy_version
ver,preburn = get_large_galaxy_version(value)
depvers.append(('LARGEGALAXIES_VER', ver))
depvers.append(('LARGEGALAXIES_PREBURN', preburn))
for dep in ['TYCHO2_KD', 'GAIA_CAT', 'SKY_TEMPLATE']:
value = os.environ.get('%s_DIR' % dep, default_ver)
if value == default_ver:
print('Warning: failed to get version string for "%s"' % dep)
else:
depvers.append((dep, value))
if galex_dir is not None:
depvers.append(('galex', galex_dir))
if unwise_dir is not None:
dirs = unwise_dir.split(':')
depvers.append(('unwise', unwise_dir))
for i,d in enumerate(dirs):
headers.append(('UNWISD%i' % (i+1), d, ''))
if unwise_tr_dir is not None:
depvers.append(('unwise_tr', unwise_tr_dir))
# this is assumed to be only a single directory
headers.append(('UNWISTD', unwise_tr_dir, ''))
if unwise_modelsky_dir is not None:
depvers.append(('unwise_modelsky', unwise_modelsky_dir))
# this is assumed to be only a single directory
headers.append(('UNWISSKY', unwise_modelsky_dir, ''))
for i,(name,value) in enumerate(depvers):
headers.append(('DEPNAM%02i' % i, name, ''))
headers.append(('DEPVER%02i' % i, value, ''))
return headers
def tim_get_resamp(tim, targetwcs):
from astrometry.util.resample import resample_with_wcs,OverlapError
if hasattr(tim, 'resamp'):
return tim.resamp
try:
Yo,Xo,Yi,Xi,_ = resample_with_wcs(targetwcs, tim.subwcs, intType=np.int16)
except OverlapError:
debug('No overlap between tim', tim.name, 'and target WCS')
return None
if len(Yo) == 0:
return None
return Yo,Xo,Yi,Xi
# Increasing this hacky factor causes the RGB images to be stretched
# harder, eg for deep imaging such as HSC.
rgb_stretch_factor = 1.0
def sdss_rgb(imgs, bands, scales=None, m=0.03, Q=20, mnmx=None, clip=True):
rgbscales=dict(u = (2, 6.0 * rgb_stretch_factor),
g = (2, 6.0 * rgb_stretch_factor),
r = (1, 3.4 * rgb_stretch_factor),
i = (0, 3.0 * rgb_stretch_factor),
z = (0, 2.2 * rgb_stretch_factor),
N501 = (2, 6.0 * rgb_stretch_factor),
N673 = (1, 3.4 * rgb_stretch_factor),
# HSC
r2 = (1, 3.4 * rgb_stretch_factor),
i2 = (0, 3.0 * rgb_stretch_factor),
y = (0, 2.2 * rgb_stretch_factor),
)
# rgbscales = {'u': 1.5, #1.0,
# 'g': 2.5,
# 'r': 1.5,
# 'i': 1.0,
# 'z': 0.4, #0.3
# }
if scales is not None:
rgbscales.update(scales)
I = 0
for img,band in zip(imgs, bands):
plane,scale = rgbscales[band]
img = np.maximum(0, img * scale + m)
I = I + img
I /= len(bands)
if Q is not None:
fI = np.arcsinh(Q * I) / np.sqrt(Q)
I += (I == 0.) * 1e-6
I = fI / I
H,W = I.shape
rgb = np.zeros((H,W,3), np.float32)
if bands == 'griz':
rgbvec = dict(
g = (0., 0., 0.75),
r = (0., 0.5, 0.25),
i = (0.25, 0.5, 0.),
z = (0.75, 0., 0.))
for img,band in zip(imgs, bands):
rf,gf,bf = rgbvec[band]
if mnmx is None:
v = np.clip((img * scale + m) * I, 0, 1)
else:
mn,mx = mnmx
v = np.clip(((img * scale + m) - mn) / (mx - mn), 0, 1)
if rf != 0.:
rgb[:,:,0] += rf*v
if gf != 0.:
rgb[:,:,1] += gf*v
if bf != 0.:
rgb[:,:,2] += bf*v
else:
for img,band in zip(imgs, bands):
plane,scale = rgbscales[band]
if mnmx is None:
imgplane = (img * scale + m) * I
else:
mn,mx = mnmx
imgplane = ((img * scale + m) - mn) / (mx - mn)
if clip:
imgplane = np.clip(imgplane, 0, 1)
rgb[:,:,plane] = imgplane
return rgb
def narrowband_rgb(imgs, bands, allbands, scales=None, m=0.03, Q=20, mnmx=None):
n501scale = 6.0
n673scale = 3.4
rgbscales=dict(N501=(2, n501scale),
N673=(0, n673scale))
if allbands == ['N501', 'N673']:
rgb = sdss_rgb(imgs, bands, scales=rgbscales, clip=False)
rgb[:,:,1] = rgb[:,:,0]/2 + rgb[:,:,2]/2
elif allbands == ['N501']:
rgb = sdss_rgb(imgs, bands, scales=rgbscales, clip=False)
rgb[:,:,0] = rgb[:,:,2]
rgb[:,:,1] = rgb[:,:,2]
elif allbands == ['N673']:
rgb = sdss_rgb(imgs, bands, scales=rgbscales, clip=False)
rgb[:,:,1] = rgb[:,:,0]
rgb[:,:,2] = rgb[:,:,0]
rgb = np.clip(rgb, 0, 1)
return rgb
def get_rgb(imgs, bands, allbands=['g','r','z'],
resids=False, mnmx=None, arcsinh=None):
'''
Given a list of images in the given bands, returns a scaled RGB
image.
*imgs* a list of numpy arrays, all the same size, in nanomaggies
*bands* a list of strings, eg, ['g','r','z']
*mnmx* = (min,max), values that will become black/white *after* scaling.
Default is (-3,10)
*arcsinh* use nonlinear scaling as in SDSS
*scales*
Returns a (H,W,3) numpy array with values between 0 and 1.
'''
allbands = list(allbands)
# Yuck, special-cased ODIN narrow-band rgb schemes.
if (allbands == ['N501', 'N673']) or (allbands == ['N501']) or (allbands == ['N673']):
return narrowband_rgb(imgs, bands, allbands)
if len(bands) == 5:
return get_rgb(imgs[:3], bands[:3], resids=resids, mnmx=mnmx, arcsinh=arcsinh)
if len(bands) == 3 and bands[0] == 'N501' and bands[1] == 'r' and bands[2] == 'N673':
return sdss_rgb(imgs, bands, scales=dict(N673=(0,3.4)))
# (ignore arcsinh...)
if resids:
mnmx = (-0.1, 0.1)
if mnmx is not None:
return sdss_rgb(imgs, bands, m=0., Q=None, mnmx=mnmx)
return sdss_rgb(imgs, bands)
def wcs_for_brick(b, W=3600, H=3600, pixscale=0.262):
'''
Returns an astrometry.net style Tan WCS object for a given brick object.
b: row from survey-bricks.fits file
W,H: size in pixels
pixscale: pixel scale in arcsec/pixel.
Returns: Tan wcs object
'''
from astrometry.util.util import Tan
pixscale = pixscale / 3600.
return Tan(b.ra, b.dec, W/2.+0.5, H/2.+0.5,
-pixscale, 0., 0., pixscale,
float(W), float(H))
def bricks_touching_wcs(targetwcs, survey=None, B=None, margin=20):
'''
Finds LegacySurvey bricks touching a given WCS header object.
Parameters
----------
targetwcs : astrometry.util.Tan object or similar
The region of sky to search
survey : legacypipe.survey.LegacySurveyData object
From which the brick table will be retrieved
B : FITS table
The table of brick objects to search
margin : int
Margin in pixels around the outside of the WCS
Returns
-------
A table (subset of B, if given) containing the bricks touching the
given WCS region + margin.
'''
from astrometry.libkd.spherematch import match_radec
from astrometry.util.miscutils import clip_wcs
if B is None:
assert(survey is not None)
B = survey.get_bricks_readonly()
ra,dec = targetwcs.radec_center()
radius = targetwcs.radius()
# MAGIC 0.25 brick size
I,_,_ = match_radec(B.ra, B.dec, ra, dec,
radius + np.hypot(0.25,0.25)/2. + 0.05)
debug(len(I), 'bricks nearby')
keep = []
for i in I:
b = B[i]
brickwcs = wcs_for_brick(b)
clip = clip_wcs(targetwcs, brickwcs)
if len(clip) == 0:
debug('No overlap with brick', b.brickname)
continue
keep.append(i)
return B[np.array(keep)]
def ccds_touching_wcs(targetwcs, ccds, ccdrad=None, polygons=True):
'''
targetwcs: wcs object describing region of interest
ccds: fits_table object of CCDs
ccdrad: radius of CCDs, in degrees.
If None (the default), compute from the CCDs table.
(0.17 for DECam)
Returns: index array I of CCDs within range.
'''
from astrometry.util.util import Tan
from astrometry.util.miscutils import polygons_intersect
from astrometry.util.starutil_numpy import degrees_between
trad = targetwcs.radius()
if ccdrad is None:
ccdrad = max(np.sqrt(np.abs(ccds.cd1_1 * ccds.cd2_2 -
ccds.cd1_2 * ccds.cd2_1)) *
np.hypot(ccds.width, ccds.height) / 2.)
rad = trad + ccdrad
r,d = targetwcs.radec_center()
I, = np.where(np.abs(ccds.dec - d) < rad)
I = I[np.where(degrees_between(r, d, ccds.ra[I], ccds.dec[I]) < rad)[0]]
if not polygons:
return I
# now check actual polygon intersection
tw,th = targetwcs.imagew, targetwcs.imageh
targetpoly = [(0.5,0.5),(tw+0.5,0.5),(tw+0.5,th+0.5),(0.5,th+0.5)]
cd = targetwcs.get_cd()
tdet = cd[0]*cd[3] - cd[1]*cd[2]
if tdet > 0:
targetpoly = list(reversed(targetpoly))
targetpoly = np.array(targetpoly)
keep = []
for i in I:
W,H = ccds.width[i],ccds.height[i]
wcs = Tan(*[float(x) for x in
[ccds.crval1[i], ccds.crval2[i], ccds.crpix1[i], ccds.crpix2[i],
ccds.cd1_1[i], ccds.cd1_2[i], ccds.cd2_1[i], ccds.cd2_2[i], W, H]])
cd = wcs.get_cd()
wdet = cd[0]*cd[3] - cd[1]*cd[2]
poly = []
for x,y in [(0.5,0.5),(W+0.5,0.5),(W+0.5,H+0.5),(0.5,H+0.5)]:
rr,dd = wcs.pixelxy2radec(x,y)
_,xx,yy = targetwcs.radec2pixelxy(rr,dd)
poly.append((xx,yy))
if wdet > 0:
poly = list(reversed(poly))
poly = np.array(poly)
if polygons_intersect(targetpoly, poly):
keep.append(i)
I = np.array(keep)
return I
def create_temp(**kwargs):
f,fn = tempfile.mkstemp(dir=tempdir, **kwargs)
os.close(f)
os.unlink(fn)
return fn
def imsave_jpeg(jpegfn, img, **kwargs):
'''
Saves a image in JPEG format.
Some matplotlib installations
don't support jpeg, so we optionally write to PNG and then convert
to JPEG using the venerable netpbm tools.
*jpegfn*: JPEG filename
*img*: image, in the typical matplotlib formats (see plt.imsave)
'''
from matplotlib.pyplot import imsave
if True:
kwargs.update(format='jpg')
imsave(jpegfn, img, **kwargs)
else:
tmpfn = create_temp(suffix='.png')
imsave(tmpfn, img, **kwargs)
cmd = ('pngtopnm %s | pnmtojpeg -quality 90 > %s' % (tmpfn, jpegfn))
rtn = os.system(cmd)
print(cmd, '->', rtn)
os.unlink(tmpfn)
class LegacySurveyData(object):
'''
A class describing the contents of a LEGACY_SURVEY_DIR directory --
tables of CCDs and of bricks, and calibration data. Methods for
dealing with the CCDs and bricks tables.
This class is also responsible for creating LegacySurveyImage
objects (eg, DecamImage objects), which then allow data to be read
from disk.
'''
def __init__(self, survey_dir=None, cache_dir=None, output_dir=None,
allbands=None, prime_cache=False):
'''
Create a LegacySurveyData object.
The LegacySurveyData object will look for data in the given
*survey_dir* directory, or from the $LEGACY_SURVEY_DIR
environment variable.
Parameters
----------
survey_dir : string
Defaults to $LEGACY_SURVEY_DIR environment variable. Where to look for
files including calibration files, tables of CCDs and bricks, image data,
etc.
cache_dir : string
Directory to search for input files before looking in survey_dir. Useful
for, eg, Burst Buffer.
output_dir : string
Base directory for output files; default ".".
prime_cache: when creating a LegacySurveyImage object with get_image_object(), copy
any available files into cache_dir.
'''
from legacypipe.decam import DecamImage
from legacypipe.mosaic import MosaicImage
from legacypipe.bok import BokImage
from legacypipe.ptf import PtfImage
from legacypipe.cfht import MegaPrimeImage
from legacypipe.hsc import HscImage
from legacypipe.panstarrs import PanStarrsImage
from collections import OrderedDict
if allbands is None:
allbands = ['g','r','z']
if survey_dir is None:
survey_dir = os.environ.get('LEGACY_SURVEY_DIR')
if survey_dir is None:
print('Warning: you should set the $LEGACY_SURVEY_DIR environment variable.')
print('Using the current directory as LEGACY_SURVEY_DIR.')
survey_dir = os.getcwd()
self.survey_dir = survey_dir
self.cache_dir = cache_dir
self.prime_cache = prime_cache
self.primed_files = []
self.calib_dir = os.path.join(self.survey_dir, 'calib')
if output_dir is None:
self.output_dir = '.'
else:
self.output_dir = output_dir
self.output_file_hashes = OrderedDict()
self.ccds = None
self.bricks = None
self.ccds_index = None
# Create and cache a kd-tree for bricks_touching_radec_box ?
self.cache_tree = False
self.bricktree = None
### HACK! Hard-coded brick edge size, in degrees!
self.bricksize = 0.25
self.psfex_conf = None
# Cached CCD kd-tree --
# - initially None, then a list of (fn, kd)
self.ccd_kdtrees = None
self.image_typemap = {
'decam' : DecamImage,
'decam+noise' : DecamImage,
'mosaic' : MosaicImage,
'mosaic3': MosaicImage,
'90prime': BokImage,
'ptf' : PtfImage,
'megaprime': MegaPrimeImage,
'hsc' : HscImage,
'panstarrs' : PanStarrsImage,
}
self.allbands = allbands
# Filename prefix for coadd files
self.file_prefix = 'legacysurvey'
def __str__(self):
return ('%s: dir %s, out %s' %
(type(self).__name__, self.survey_dir, self.output_dir))
def get_default_release(self):
return None
def ccds_for_fitting(self, brick, ccds):
# By default, use all.
return None
def image_class_for_camera(self, camera):
# Assert that we have correctly removed trailing spaces
assert(camera == camera.strip())
return self.image_typemap[camera]
def sed_matched_filters(self, bands):
from legacypipe.detection import sed_matched_filters
return sed_matched_filters(bands)
def find_file(self, filetype, brick=None, brickpre=None, band='%(band)s',
camera=None, expnum=None, ccdname=None, tier=None, img=None,
output=False, use_cache=True, **kwargs):
'''
Returns the filename of a Legacy Survey file.
*filetype* : string, type of file to find, including:
"tractor" -- Tractor catalogs
"depth" -- PSF depth maps
"galdepth" -- Canonical galaxy depth maps
"nexp" -- number-of-exposure maps
*brick* : string, brick name such as "0001p000"
*output*: True if we are about to write this file; will use self.outdir as
the base directory rather than self.survey_dir.
Returns: path to the specified file (whether or not it exists).
'''
from glob import glob
if brick is None:
brick = '%(brick)s'
brickpre = '%(brick).3s'
else:
brickpre = brick[:3]
if output:
basedir = self.output_dir
else:
basedir = self.survey_dir
if brick is not None:
codir = os.path.join(basedir, 'coadd', brickpre, brick)
# Swap in files in the self.cache_dir, if they exist.
def swap(fn):
if output:
return fn
if not use_cache:
return fn
return self.check_cache(fn)
def swaplist(fns):
if output or (self.cache_dir is None) or not use_cache:
return fns
return [self.check_cache(fn) for fn in fns]
sname = self.file_prefix
if filetype == 'bricks':
fn = os.path.join(basedir, 'survey-bricks.fits.gz')
if os.path.exists(fn):
return swap(fn)
fns = glob(os.path.join(basedir, 'survey-bricks-*.fits.gz'))
if len(fns) > 0:
return swap(fns[0])
return None
elif filetype == 'ccds':
return swaplist(
glob(os.path.join(basedir, 'survey-ccds*.fits.gz')))
elif filetype == 'ccd-kds':
return swaplist(
glob(os.path.join(basedir, 'survey-ccds*.kd.fits')))
elif filetype == 'tycho2':
dirnm = os.environ.get('TYCHO2_KD_DIR')
if dirnm is not None:
fn = os.path.join(dirnm, 'tycho2.kd.fits')
if os.path.exists(fn):
return fn
return swap(os.path.join(basedir, 'tycho2.kd.fits'))
elif filetype == 'large-galaxies':
fn = os.environ.get('LARGEGALAXIES_CAT')
if fn is None:
return None
if os.path.isfile(fn):
return fn
return None
elif filetype == 'annotated-ccds':
return swaplist(
glob(os.path.join(basedir, 'ccds-annotated-*.fits.gz')))
elif filetype == 'psf':
return swap(img.merged_psffn)
elif filetype == 'sky':
return swap(img.merged_skyfn)
elif filetype == 'psf-single':
return swap(img.psffn)
elif filetype == 'sky-single':
return swap(img.skyfn)
elif filetype == 'tractor':
return swap(os.path.join(basedir, 'tractor', brickpre,
'tractor-%s.fits' % brick))
elif filetype == 'tractor-intermediate':
return swap(os.path.join(basedir, 'tractor-i', brickpre,
'tractor-i-%s.fits' % brick))
elif filetype == 'galaxy-sims':
return swap(os.path.join(basedir, 'tractor', brickpre,
'galaxy-sims-%s.fits' % brick))
elif filetype in ['ccds-table', 'depth-table']:
ty = filetype.split('-')[0]
return swap(
os.path.join(codir, '%s-%s-%s.fits' % (sname, brick, ty)))
elif filetype in ['image-jpeg', 'model-jpeg', 'resid-jpeg',
'blobmodel-jpeg',
'imageblob-jpeg', 'simscoadd-jpeg','imagecoadd-jpeg',
'wise-jpeg', 'wisemodel-jpeg', 'wiseresid-jpeg',
'galex-jpeg', 'galexmodel-jpeg', 'galexresid-jpeg',
]:
ty = filetype.split('-')[0]
return swap(
os.path.join(codir, '%s-%s-%s.jpg' % (sname, brick, ty)))
elif filetype in ['outliers-pre', 'outliers-post',
'outliers-masked-pos', 'outliers-masked-neg']:
return swap(
os.path.join(basedir, 'metrics', brickpre,
'%s-%s.jpg' % (filetype, brick)))
elif filetype in ['invvar', 'chi2', 'image', 'model', 'blobmodel',
'depth', 'galdepth', 'nexp', 'psfsize',
'copsf']:
tstr = ''
# coadd psf tier
if tier is not None:
tstr = '-tier%s' % tier
return swap(os.path.join(codir, '%s-%s-%s-%s%s.fits.fz' %
(sname, brick, filetype, band, tstr)))
elif filetype in ['blobmap']:
return swap(os.path.join(basedir, 'metrics', brickpre,
'blobs-%s.fits.gz' % (brick)))
elif filetype in ['blobmask']:
return swap(os.path.join(basedir, 'metrics', brickpre,
'blobmask-%s.fits.gz' % (brick)))
elif filetype in ['maskbits']:
return swap(os.path.join(codir,
'%s-%s-%s.fits.fz' % (sname, brick, filetype)))
elif filetype in ['all-models']:
return swap(os.path.join(basedir, 'metrics', brickpre,
'all-models-%s.fits' % (brick)))
elif filetype == 'ref-sources':
return swap(os.path.join(basedir, 'metrics', brickpre,
'reference-%s.fits' % (brick)))
elif filetype == 'checksums':
return swap(os.path.join(basedir, 'tractor', brickpre,
'brick-%s.sha256sum' % brick))
elif filetype == 'outliers_mask':
return swap(os.path.join(basedir, 'metrics', brickpre,
'outlier-mask-%s.fits.fz' % (brick)))
elif filetype == 'forced':
estr = '%08i' % expnum
return swap(os.path.join(basedir, 'forced', camera, estr[:5],
'forced-%s-%i.fits' % (camera, expnum)))
elif filetype == 'forced-brick':
return swap(os.path.join(basedir, 'forced-brick', brickpre,
'forced-%s.fits' % brick))
print('Unknown filetype "%s"' % filetype)
assert(False)
def check_cache(self, fn):
if self.cache_dir is None:
return fn
if fn is None:
return fn
cfn = fn.replace(self.survey_dir, self.cache_dir)
#debug('checking for cache fn', cfn)
if os.path.exists(cfn):
debug('Cached file hit:', fn, '->', cfn)
return cfn
debug('Cached file miss:', fn, '-/->', cfn)
return fn
def get_compression_args(self, filetype, shape=None):
comp = dict(# g: sigma ~ 0.002. qz -1e-3: 6 MB, -1e-4: 10 MB
image = ('R', 'qz -1e-4'),
model = ('R', 'qz -1e-4'),
chi2 = ('R', 'qz -0.1'),
invvar = ('R', 'q0 16'),
nexp = ('H', None),
outliers_mask = ('R', None),
maskbits = ('H', None),
depth = ('G', 'qz 0'),
galdepth = ('G', 'qz 0'),
psfsize = ('G', 'qz 0'),
).get(filetype)
if comp is None:
return None
method, args = comp
mname = dict(R='RICE',
H='HCOMPRESS',
G='GZIP',
).get(method)
if args is None:
pat = '[compress %s %%(tilew)i,%%(tileh)i]' % method
else:
pat = '[compress %s %%(tilew)i,%%(tileh)i; %s]' % (method, args)
# Default tile compression size:
tilew,tileh = 100,100
if shape is not None:
H,W = shape
# CFITSIO's fpack compression can't handle partial tile
# sizes < 4 pix. Select a tile size that works, or don't
# compress if we can't find one.
if W < 4 or H < 4:
return None
while tilew <= W:
remain = W % tilew
if remain == 0 or remain >= 4:
break
tilew += 1
while tileh <= H:
remain = H % tileh
if remain == 0 or remain >= 4:
break
tileh += 1
s = pat % dict(tilew=tilew, tileh=tileh)
return s, method, args, mname, (tilew,tileh)
def get_compression_string(self, filetype, shape=None, **kwargs):
A = self.get_compression_args(filetype, shape=shape)
if A is None:
return None
return A[0]
def get_psfex_conf(self, camera, expnum, ccdname):
'''
Return additional psfex configuration flags for a given expnum and
ccdname.
Extra config flags are in the file $PSFEX_CONF_FILE.
'''
if self.psfex_conf is None:
self.psfex_conf = {}
if self.psfex_conf.get(camera, None) is None:
self.psfex_conf[camera] = read_psfex_conf(camera)
camconf = self.psfex_conf[camera]
res = camconf.get((expnum, ccdname.strip().upper()), '')
if res == '':
res = camconf.get((expnum, None), '')
return res
def write_output(self, filetype, hashsum=True, filename=None, **kwargs):
'''
Returns a context manager for writing an output file.
Example use: ::
with survey.write_output('ccds', brick=brickname) as out:
ccds.writeto(out.fn, primheader=primhdr)
For FITS output, out.fits is a fitsio.FITS object. The file
contents will actually be written in memory, and then a
sha256sum computed before the file contents are written out to
the real disk file. The 'out.fn' member variable is NOT set.
::
with survey.write_output('ccds', brick=brickname) as out:
ccds.writeto(None, fits_object=out.fits, primheader=primhdr)
Does the following on entry:
- calls self.find_file() to determine which filename to write to
- ensures the output directory exists
- appends a ".tmp" to the filename
Does the following on exit:
- moves the ".tmp" to the final filename (to make it atomic)
- computes the sha256sum
'''
class OutputFileContext(object):
def __init__(self, fn, survey, hashsum=True, relative_fn=None,
compression=None):
'''
*compression*: a CFITSIO compression specification, eg:
"[compress R 100,100; qz -0.05]"
'''
self.real_fn = fn
self.relative_fn = relative_fn
self.survey = survey
self.is_fits = (fn.endswith('.fits') or
fn.endswith('.fits.gz') or
fn.endswith('.fits.fz'))
self.tmpfn = os.path.join(os.path.dirname(fn),
'tmp-'+os.path.basename(fn))
if self.is_fits:
self.fits = fitsio.FITS('mem://' + (compression or ''),
'rw')
else:
self.fn = self.tmpfn
self.hashsum = hashsum
def __enter__(self):
dirnm = os.path.dirname(self.tmpfn)
trymakedirs(dirnm)
return self
def __exit__(self, exc_type, exc_value, traceback):
# If an exception was thrown, bail out
if exc_type is not None:
return
if self.hashsum:
import hashlib
hashfunc = hashlib.sha256
sha = hashfunc()
if self.is_fits:
# Read back the data written into memory by the
# fitsio library
rawdata = self.fits.read_raw()
# close the fitsio file
self.fits.close()
# If gzip, we now have to actually do the
# compression to gzip format...
if self.tmpfn.endswith('.gz'):
from io import BytesIO
import gzip
#ulength = len(rawdata)
# We gzip to a memory file (BytesIO) so we can compute
# the hashcode before writing to disk for real
gzipped = BytesIO()
gzf = gzip.GzipFile(self.real_fn, 'wb', 9, gzipped)
gzf.write(rawdata)
gzf.close()
rawdata = gzipped.getvalue()
gzipped.close()
del gzipped
#clength = len(rawdata)
#print('Gzipped', ulength, 'to', clength)
if self.hashsum:
sha.update(rawdata)
f = open(self.tmpfn, 'wb')
f.write(rawdata)
f.close()
debug('Wrote', self.tmpfn)
del rawdata
else:
f = open(self.tmpfn, 'rb')
if self.hashsum:
sha.update(f.read())
f.close()
if self.hashsum:
hashcode = sha.hexdigest()
del sha
os.rename(self.tmpfn, self.real_fn)
debug('Renamed to', self.real_fn)
info('Wrote', self.real_fn)
if self.hashsum:
# List the relative filename (from output dir) in
# shasum file.
fn = self.relative_fn or self.real_fn
self.survey.add_hashcode(fn, hashcode)
# end of OutputFileContext class
if filename is not None:
fn = filename
else:
# Get the output filename for this filetype
fn = self.find_file(filetype, output=True, **kwargs)
compress = self.get_compression_string(filetype, **kwargs)
# Find the relative path (relative to output_dir), which is the string
# we will put in the shasum file.
relfn = fn
if relfn.startswith(self.output_dir):
relfn = relfn[len(self.output_dir):]
if relfn.startswith('/'):
relfn = relfn[1:]
out = OutputFileContext(fn, self, hashsum=hashsum, relative_fn=relfn,
compression=compress)
return out
def add_hashcode(self, fn, hashcode):
'''
Callback to be called in the *write_output* routine.
'''
self.output_file_hashes[fn] = hashcode
def __getstate__(self):
'''
For pickling; we omit cached tables.
'''
d = self.__dict__.copy()
d['ccds'] = None
d['bricks'] = None
d['bricktree'] = None
d['ccd_kdtrees'] = None
return d
def drop_cache(self):
'''
Clears all cached data contained in this object. Useful for
pickling / multiprocessing.
'''
self.ccds = None
self.bricks = None
if self.bricktree is not None:
from astrometry.libkd.spherematch import tree_free
tree_free(self.bricktree)
self.bricktree = None
def get_calib_dir(self):
return self.calib_dir
def get_image_dir(self):
'''
Returns the directory containing image data.
'''
return os.path.join(self.survey_dir, 'images')
def get_survey_dir(self):
'''
Returns the base LEGACY_SURVEY_DIR directory.
'''
return self.survey_dir
def get_se_dir(self):
'''
Returns the directory containing SourceExtractor config files,
used during calibration.
'''
from pkg_resources import resource_filename
return resource_filename('legacypipe', 'config')
def get_bricks(self):
'''
Returns a table of bricks. The caller owns the table.
For read-only purposes, see *get_bricks_readonly()*, which
uses a cached version.
'''
return fits_table(self.find_file('bricks'))
def get_bricks_readonly(self):
'''
Returns a read-only (shared) copy of the table of bricks.
'''
if self.bricks is None:
self.bricks = self.get_bricks()
# Assert that bricks are the sizes we think they are.
# ... except for the two poles, which are half-sized
assert(np.all(np.abs((self.bricks.dec2 - self.bricks.dec1)[1:-1] -
self.bricksize) < 1e-3))
return self.bricks
def get_brick(self, brickid):
'''
Returns a brick (as one row in a table) by *brickid* (integer).
'''
B = self.get_bricks_readonly()
I, = np.nonzero(B.brickid == brickid)
if len(I) == 0:
return None
return B[I[0]]
def get_brick_by_name(self, brickname):
'''
Returns a brick (as one row in a table) by name (string).
'''
B = self.get_bricks_readonly()
I, = np.nonzero(np.array([n == brickname for n in B.brickname]))
if len(I) == 0:
return None
return B[I[0]]
def get_bricks_by_name(self, brickname):
'''
Returns a brick (as a table with one row) by name (string).
'''
B = self.get_bricks_readonly()
I, = np.nonzero(np.array([n == brickname for n in B.brickname]))
if len(I) == 0:
return None
return B[I]
def get_bricks_near(self, ra, dec, radius):
'''
Returns a set of bricks near the given RA,Dec and radius (all in degrees).
'''
bricks = self.get_bricks_readonly()
if self.cache_tree:
from astrometry.libkd.spherematch import tree_build_radec, tree_search_radec
# Use kdtree
if self.bricktree is None:
self.bricktree = tree_build_radec(bricks.ra, bricks.dec)
I = tree_search_radec(self.bricktree, ra, dec, radius)
else:
from astrometry.util.starutil_numpy import degrees_between
d = degrees_between(bricks.ra, bricks.dec, ra, dec)
I, = np.nonzero(d < radius)
if len(I) == 0:
return None
return bricks[I]
def bricks_touching_radec_box(self, bricks,
ralo, rahi, declo, dechi):
'''
Returns an index vector of the bricks that touch the given RA,Dec box.
'''
if bricks is None:
bricks = self.get_bricks_readonly()
if self.cache_tree and bricks == self.bricks:
from astrometry.libkd.spherematch import tree_build_radec, tree_search_radec
from astrometry.util.starutil_numpy import degrees_between
# Use kdtree
if self.bricktree is None:
self.bricktree = tree_build_radec(bricks.ra, bricks.dec)
# brick size
radius = np.sqrt(2.)/2. * self.bricksize
# + RA,Dec box size
radius = radius + degrees_between(ralo, declo, rahi, dechi) / 2.
dec = (dechi + declo) / 2.
c = (np.cos(np.deg2rad(rahi)) + np.cos(np.deg2rad(ralo))) / 2.
s = (np.sin(np.deg2rad(rahi)) + np.sin(np.deg2rad(ralo))) / 2.
ra = np.rad2deg(np.arctan2(s, c))
J = tree_search_radec(self.bricktree, ra, dec, radius)
I = J[np.nonzero((bricks.ra1[J] <= rahi ) * (bricks.ra2[J] >= ralo) *
(bricks.dec1[J] <= dechi) * (bricks.dec2[J] >= declo))[0]]
return I
if rahi < ralo:
# Wrap-around
debug('In Dec slice:', len(np.flatnonzero((bricks.dec1 <= dechi) *
(bricks.dec2 >= declo))))
debug('Above RAlo=', ralo, ':', len(np.flatnonzero(bricks.ra2 >= ralo)))
debug('Below RAhi=', rahi, ':', len(np.flatnonzero(bricks.ra1 <= rahi)))
debug('In RA slice:', len(np.nonzero(np.logical_or(bricks.ra2 >= ralo,
bricks.ra1 <= rahi))))
I, = np.nonzero(np.logical_or(bricks.ra2 >= ralo, bricks.ra1 <= rahi) *
(bricks.dec1 <= dechi) * (bricks.dec2 >= declo))
debug('In RA&Dec slice', len(I))
else:
I, = np.nonzero((bricks.ra1 <= rahi ) * (bricks.ra2 >= ralo) *
(bricks.dec1 <= dechi) * (bricks.dec2 >= declo))
return I
def get_ccds_readonly(self):
'''
Returns a shared copy of the table of CCDs.
'''
if self.ccds is None:
self.ccds = self.get_ccds()
return self.ccds
def filter_ccds_files(self, fns):
'''
When reading the list of CCDs, we find all files named
survey-ccds-\*.fits.gz, then filter that list using this function.
'''
return fns
def filter_ccd_kd_files(self, fns):
return fns
def get_ccds(self, **kwargs):
'''
Returns the table of CCDs.
'''
fns = self.find_file('ccd-kds')
fns = self.filter_ccd_kd_files(fns)
# If 'ccd-kds' files exist, read the CCDs tables from them!
# Otherwise, fall back to survey-ccds-*.fits.gz files.
if len(fns) == 0:
fns = self.find_file('ccds')
fns.sort()
fns = self.filter_ccds_files(fns)
if len(fns) == 0:
print('Failed to find any valid survey-ccds tables')
raise RuntimeError('No survey-ccds files')
TT = []
for fn in fns:
debug('Reading CCDs from', fn)
T = fits_table(fn, **kwargs)
debug('Got', len(T), 'CCDs')
TT.append(T)
if len(TT) > 1:
T = merge_tables(TT, columns='fillzero')
else:
T = TT[0]
debug('Total of', len(T), 'CCDs')
del TT
T = self.cleanup_ccds_table(T)
return T
def filter_annotated_ccds_files(self, fns):
'''
When reading the list of annotated CCDs,
filter file list using this function.
'''
return fns
def get_annotated_ccds(self):
'''
Returns the annotated table of CCDs.
'''
fns = self.find_file('annotated-ccds')
fns = self.filter_annotated_ccds_files(fns)
TT = []
for fn in fns:
debug('Reading annotated CCDs from', fn)
T = fits_table(fn)
debug('Got', len(T), 'CCDs')
TT.append(T)
T = merge_tables(TT, columns='fillzero')
debug('Total of', len(T), 'CCDs')
del TT
T = self.cleanup_ccds_table(T)
return T
def cleanup_ccds_table(self, ccds):
# Remove trailing spaces from 'ccdname' column
if 'ccdname' in ccds.columns():
# "N4 " -> "N4"
ccds.ccdname = np.array([s.strip() for s in ccds.ccdname])
# Remove trailing spaces from 'camera' column.
if 'camera' in ccds.columns():
ccds.camera = np.array([c.strip() for c in ccds.camera])
# And 'filter' column
if 'filter' in ccds.columns():
ccds.filter = np.array([f.strip() for f in ccds.filter])
return ccds
def ccds_touching_wcs(self, wcs, **kwargs):
'''
Returns a table of the CCDs touching the given *wcs* region.
'''
kdfns = self.get_ccd_kdtrees()
if len(kdfns):
from astrometry.libkd.spherematch import tree_search_radec
# MAGIC number: we'll search a 1-degree radius for CCDs
# roughly in range, then refine using the
# ccds_touching_wcs() function.
radius = 1.
ra,dec = wcs.radec_center()
TT = []
for fn,kd in kdfns:
I = tree_search_radec(kd, ra, dec, radius)
debug(len(I), 'CCDs within', radius, 'deg of RA,Dec',
'(%.3f, %.3f)' % (ra,dec))
if len(I) == 0:
continue
# Read only the CCD-table rows within range.
TT.append(fits_table(fn, rows=I))
if len(TT) == 0:
return None
ccds = merge_tables(TT, columns='fillzero')
ccds = self.cleanup_ccds_table(ccds)
else:
ccds = self.get_ccds_readonly()
I = ccds_touching_wcs(wcs, ccds, **kwargs)
if len(I) == 0:
return None
return ccds[I]
def get_ccd_kdtrees(self):
# check cache...
if self.ccd_kdtrees is not None:
return self.ccd_kdtrees
fns = self.find_file('ccd-kds')
fns = self.filter_ccd_kd_files(fns)
from astrometry.libkd.spherematch import tree_open
self.ccd_kdtrees = []
for fn in fns:
debug('Opening kd-tree', fn)
kd = tree_open(fn, 'ccds')
self.ccd_kdtrees.append((fn, kd))
return self.ccd_kdtrees
def get_image_object(self, t, camera=None, prime_cache=True, **kwargs):
'''
Returns a DecamImage or similar object for one row of the CCDs table.
'''
# get Image subclass
if camera is None:
camera = t.camera
imageType = self.image_class_for_camera(camera)
# call Image subclass constructor
img = imageType(self, t, **kwargs)
if self.prime_cache and prime_cache:
self.prime_cache_for_image(img)
img.check_for_cached_files(self)
return img
def prime_cache_for_image(self, img):
import shutil
from astrometry.util.file import trymakedirs
fns = img.get_cacheable_filenames()
cacheable = img.get_cacheable_filename_variables()
for varname in cacheable:
fn = getattr(img, varname, None)
fns.append(fn)
for fn in fns:
if fn is None:
continue
if not os.path.exists(fn):
# source does not exist
continue
cfn = fn.replace(self.survey_dir, self.cache_dir)
if os.path.exists(cfn):
# destination already exists (check timestamps???)
continue
cdir = os.path.dirname(cfn)
info('Priming the cache: copying', fn, 'to', cfn)
trymakedirs(cdir)
ctmp = cfn + '.tmp'
shutil.copyfile(fn, ctmp)
os.rename(ctmp, cfn)
shutil.copystat(fn, cfn)
self.primed_files.append(cfn)
def delete_primed_cache_files(self):
for fn in self.primed_files:
try:
info('Removing primed-cache file', fn)
os.remove(fn)
except:
pass
def get_approx_wcs(self, ccd):
from astrometry.util.util import Tan
W,H = ccd.width,ccd.height
wcs = Tan(*[float(x) for x in
[ccd.crval1, ccd.crval2, ccd.crpix1, ccd.crpix2,
ccd.cd1_1, ccd.cd1_2, ccd.cd2_1, ccd.cd2_2, W, H]])
return wcs
def tims_touching_wcs(self, targetwcs, mp, bands=None,
**kwargs):
'''
Creates tractor.Image objects for CCDs touching the given
*targetwcs* region.
mp: multiprocessing object
kwargs are passed to LegacySurveyImage.get_tractor_image() and
may include:
* gaussPsf
* pixPsf
'''
# Read images
C = self.ccds_touching_wcs(targetwcs)
# Sort by band
if bands is not None:
C.cut(np.array([b in bands for b in C.filter]))
ims = []
for t in C:
debug('Image file', t.image_filename, 'hdu', t.image_hdu)
im = self.get_image_object(t)
ims.append(im)
# Read images, clip to ROI
W,H = targetwcs.get_width(), targetwcs.get_height()
targetrd = np.array([targetwcs.pixelxy2radec(x,y) for x,y in
[(1,1),(W,1),(W,H),(1,H),(1,1)]])
args = [(im, targetrd, kwargs) for im in ims]
tims = mp.map(read_one_tim, args)
return tims
def find_ccds(self, expnum=None, ccdname=None, camera=None):
'''
Returns a table of CCDs matching the given *expnum* (exposure
number, integer), *ccdname* (string), and *camera* (string),
if given.
'''
if expnum is not None:
C = self.try_expnum_kdtree(expnum)
if C is not None:
if len(C) == 0:
return None
if ccdname is not None:
C = C[C.ccdname == ccdname]
if camera is not None:
C = C[C.camera == camera]
return C
if expnum is not None and ccdname is not None:
# use ccds_index
if self.ccds_index is None:
if self.ccds is not None:
C = self.ccds
else:
C = self.get_ccds(columns=['expnum','ccdname'])
self.ccds_index = dict([((e,n),i) for i,(e,n) in
enumerate(zip(C.expnum, C.ccdname))])
row = self.ccds_index[(expnum, ccdname)]
if self.ccds is not None:
return self.ccds[row]
#import numpy as np
#C = self.get_ccds(rows=np.array([row]))
#return C[0]
T = self.get_ccds_readonly()
if expnum is not None:
T = T[T.expnum == expnum]
if ccdname is not None:
T = T[T.ccdname == ccdname]
if camera is not None:
T = T[T.camera == camera]
return T
def try_expnum_kdtree(self, expnum):
'''
# By creating a kd-tree from the 'expnum' column, search for expnums
# can be sped up:
from astrometry.libkd.spherematch import *
from astrometry.util.fits import fits_table
T=fits_table('/global/cscratch1/sd/dstn/dr7-depthcut/survey-ccds-dr7.kd.fits',
columns=['expnum'])
ekd = tree_build(np.atleast_2d(T.expnum.copy()).T.astype(float),
nleaf=60, bbox=False, split=True)
ekd.set_name('expnum')
ekd.write('ekd.fits')
> fitsgetext -i $CSCRATCH/dr7-depthcut/survey-ccds-dr7.kd.fits -o dr7-%02i -a -M
> fitsgetext -i ekd.fits -o ekd-%02i -a -M
> cat dr7-0* ekd-0[123456] > $CSCRATCH/dr7-depthcut+/survey-ccds-dr7.kd.fits
'''
fns = self.find_file('ccd-kds')
fns = self.filter_ccd_kd_files(fns)
if len(fns) == 0:
return None
from astrometry.libkd.spherematch import tree_open
TT = []
for fn in fns:
debug('Searching', fn)
try:
kd = tree_open(fn, 'expnum')
except:
debug('Failed to open', fn, ':')
import traceback
traceback.print_exc()
continue
if kd is None:
return None
I = kd.search(np.array([expnum]), 0.5, 0, 0)
debug(len(I), 'CCDs with expnum', expnum, 'in', fn)
if len(I) == 0:
continue
# Read only the CCD-table rows within range.
TT.append(fits_table(fn, rows=I))
if len(TT) == 0:
##??
return fits_table()
ccds = merge_tables(TT, columns='fillzero')
ccds = self.cleanup_ccds_table(ccds)
return ccds
def run_calibs(X):
im = X[0]
kwargs = X[1]
noraise = kwargs.pop('noraise', False)
debug('run_calibs for image', im, ':', kwargs)
try:
return im.run_calibs(**kwargs)
except:
print('Exception in run_calibs:', im, kwargs)
import traceback
traceback.print_exc()
if not noraise:
raise
def read_one_tim(X):
from astrometry.util.ttime import Time
(im, targetrd, kwargs) = X
t0 = Time()
tim = im.get_tractor_image(radecpoly=targetrd, **kwargs)
if tim is not None:
th,tw = tim.shape
print('Time to read %i x %i image, hdu %i:' % (tw,th, im.hdu), Time()-t0)
return tim
def read_psfex_conf(camera):
psfex_conf = {}
from pkg_resources import resource_filename
dirname = resource_filename('legacypipe', 'data')
fn = os.path.join(dirname, camera + '-special-psfex-conf.dat')
if not os.path.exists(fn):
info('could not find special psfex configuration file for ' +
camera + ' not using per-image psfex configurations.')
return psfex_conf
f = open(fn)
for line in f.readlines():
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
parts = line.split(None, maxsplit=1)
if len(parts) != 2:
print('Skipping line ', line)
continue
expname, flags = parts
if '-' in expname:
idparts = expname.split('-')
if len(idparts) != 2:
print('Skipping line ', line)
continue
expidstr = idparts[0].strip()
ccd = idparts[1].strip().upper()
else:
expidstr = expname.strip()
ccd = None
try:
expnum = int(expidstr, 10)
except ValueError:
print('Skipping line', line)
continue
psfex_conf[(expnum, ccd)] = flags
return psfex_conf
|
legacysurvey/pipeline
|
py/legacypipe/survey.py
|
Python
|
gpl-2.0
| 64,528
|
[
"Galaxy",
"Gaussian"
] |
0c751d43ef9e3d0f5076283bcea2313627cb751702667d562e45c9efeabc2c96
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
from json import dumps
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.039476
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/main.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class main(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(main, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def menu(self, title, name, content, **KWS):
## CHEETAH: generated from #def menu($title, $name, $content) at line 36, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<div id="leftmenu_main">\r
\t\t\t\t<div id="leftmenu_top">\r
\t\t\t\t\t''')
_v = VFFSL(SL,"title",True) # u'$title' on line 39, col 6
if _v is not None: write(_filter(_v, rawExpr=u'$title')) # from line 39, col 6.
write(u'''\r
''')
if VFFSL(SL,"name",True) in VFFSL(SL,"collapsed",True): # generated from line 40, col 6
write(u'''\t\t\t\t\t<div id="leftmenu_expander_''')
_v = VFFSL(SL,"name",True) # u'$name' on line 41, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 41, col 33.
write(u'''" class="leftmenu_icon leftmenu_icon_collapse" onclick="toggleMenu(\'''')
_v = VFFSL(SL,"name",True) # u'$name' on line 41, col 106
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 41, col 106.
write(u'''\');"></div>\r
''')
else: # generated from line 42, col 6
write(u'''\t\t\t\t\t<div id="leftmenu_expander_''')
_v = VFFSL(SL,"name",True) # u'$name' on line 43, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 43, col 33.
write(u'''" class="leftmenu_icon" onclick="toggleMenu(\'''')
_v = VFFSL(SL,"name",True) # u'$name' on line 43, col 83
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 43, col 83.
write(u'''\');"></div>\r
''')
write(u'''\t\t\t\t</div>\r
''')
if VFFSL(SL,"name",True) in VFFSL(SL,"collapsed",True): # generated from line 46, col 5
write(u'''\t\t\t\t<div id="leftmenu_container_''')
_v = VFFSL(SL,"name",True) # u'$name' on line 47, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 47, col 33.
write(u'''" style="display: none;">\r
''')
else: # generated from line 48, col 5
write(u'''\t\t\t\t<div id="leftmenu_container_''')
_v = VFFSL(SL,"name",True) # u'$name' on line 49, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 49, col 33.
write(u'''">\r
''')
write(u'''\t\t\t\t''')
_v = VFFSL(SL,"content",True) # u'$content' on line 51, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$content')) # from line 51, col 5.
write(u'''\r
\t\t\t\t</div>\r
\t\t\t</div>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def mainMenu(self, **KWS):
## CHEETAH: generated from #def mainMenu at line 56, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<ul>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/tv\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['television'] # u"$tstrings['television']" on line 58, col 74
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['television']")) # from line 58, col 74.
write(u'''</a></li>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/radio\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['radio'] # u"$tstrings['radio']" on line 59, col 77
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['radio']")) # from line 59, col 77.
write(u"""</a></li>\r
\t\t\t\t<li><a href='ajax/multiepg2' target=_blank>""")
_v = VFFSL(SL,"tstrings",True)['tv_multi_epg'] # u"$tstrings['tv_multi_epg']" on line 60, col 48
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['tv_multi_epg']")) # from line 60, col 48.
write(u'''</a></li>\r
\t\t\t</ul>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def volumeMenu(self, **KWS):
## CHEETAH: generated from #def volumeMenu at line 64, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<div class="volslider">\r
\t\t\t\t\t<p style="text-align:center; padding-bottom:8px;"> \r
\t\t\t\t\t\t<label for="amount">''')
_v = VFFSL(SL,"tstrings",True)['volume'] # u"$tstrings['volume']" on line 67, col 27
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['volume']")) # from line 67, col 27.
write(u''':</label>\r
\t\t\t\t\t\t<input type="text" id="amount" style="border:0; color:#f6931f; font-weight:bold; width:40px;" />\r
\t\t\t\t\t</p>\r
\t\t\t\t<div id="slider" style="width:130px;"></div>\r
\t\t\t</div>\r
\t\t\t<div style="width:100%; text-align:center; padding-top:5px; padding-bottom:10px;"><img id="volimage" src="images/volume.png" title="" border="0" width="48" height="48"></div>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def controlMenu(self, **KWS):
## CHEETAH: generated from #def controlMenu at line 75, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<ul>\r
\t\t\t\t<li><a href=\'#\' onclick="load_dm(\'ajax/powerstate\',\'''')
_v = VFFSL(SL,"tstrings",True)["powercontrol"] # u'$tstrings["powercontrol"]' on line 77, col 57
if _v is not None: write(_filter(_v, rawExpr=u'$tstrings["powercontrol"]')) # from line 77, col 57.
write(u'''\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['powercontrol'] # u"$tstrings['powercontrol']" on line 77, col 101
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['powercontrol']")) # from line 77, col 101.
write(u'''</a></li>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/screenshot\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['grabscreenshot'] # u"$tstrings['grabscreenshot']" on line 78, col 82
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['grabscreenshot']")) # from line 78, col 82.
write(u'''</a></li>\r
\t\t\t\t<li><a href=\'#\' onclick="load_message_dm(\'ajax/message\',\'''')
_v = VFFSL(SL,"tstrings",True)["sendamessage"] # u'$tstrings["sendamessage"]' on line 79, col 62
if _v is not None: write(_filter(_v, rawExpr=u'$tstrings["sendamessage"]')) # from line 79, col 62.
write(u'''\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['sendamessage'] # u"$tstrings['sendamessage']" on line 79, col 106
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['sendamessage']")) # from line 79, col 106.
write(u'''</a></li>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/timers\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['timers'] # u"$tstrings['timers']" on line 80, col 78
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['timers']")) # from line 80, col 78.
write(u'''</a></li>\r
\t\t\t</ul>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def infoMenu(self, **KWS):
## CHEETAH: generated from #def infoMenu at line 84, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<ul>\r
\t\t\t\t<li><a href="#" onclick="load_maincontent(\'ajax/boxinfo\'); return false">''')
_v = VFFSL(SL,"tstrings",True)['box_info'] # u"$tstrings['box_info']" on line 86, col 78
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['box_info']")) # from line 86, col 78.
write(u'''</a></li>\r
\t\t\t\t<li><a href="#" onclick="load_maincontent(\'ajax/about\'); return false">''')
_v = VFFSL(SL,"tstrings",True)['about'] # u"$tstrings['about']" on line 87, col 76
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['about']")) # from line 87, col 76.
write(u'''</a></li>\r
\t\t\t</ul>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def streamMenu(self, **KWS):
## CHEETAH: generated from #def streamMenu at line 91, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<ul>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent_spin(\'ajax/movies\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['movies'] # u"$tstrings['movies']" on line 93, col 83
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['movies']")) # from line 93, col 83.
write(u'''</a></li>\r
<!--\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/workinprogress\'); return false;">Web Tv</a></li> -->\r
''')
if VFFSL(SL,"zapstream",True): # generated from line 95, col 5
write(u'''\t\t\t\t<li><input type="checkbox" name="zapstream" checked="checked" />''')
_v = VFFSL(SL,"tstrings",True)['zapbeforestream'] # u"$tstrings['zapbeforestream']" on line 96, col 69
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['zapbeforestream']")) # from line 96, col 69.
write(u'''</li>\r
''')
else: # generated from line 97, col 5
write(u'''\t\t\t\t<li><input type="checkbox" name="zapstream" />''')
_v = VFFSL(SL,"tstrings",True)['zapbeforestream'] # u"$tstrings['zapbeforestream']" on line 98, col 51
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['zapbeforestream']")) # from line 98, col 51.
write(u'''</li>\r
''')
write(u'''\t\t\t</ul>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def searchMenu(self, **KWS):
## CHEETAH: generated from #def searchMenu at line 103, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
if VFFSL(SL,"epgsearchcaps",True): # generated from line 104, col 4
write(u'''\t\t\t<ul>\r
''')
if VFFSL(SL,"epgsearchtype",True): # generated from line 106, col 5
write(u'''\t\t\t\t<li><input type="checkbox" name="epgsearchtype" checked="checked" />''')
_v = VFFSL(SL,"tstrings",True)['epgsearchextended'] # u"$tstrings['epgsearchextended']" on line 107, col 73
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['epgsearchextended']")) # from line 107, col 73.
write(u'''</li>\r
''')
else: # generated from line 108, col 5
write(u'''\t\t\t\t<li><input type="checkbox" name="epgsearchtype" />''')
_v = VFFSL(SL,"tstrings",True)['epgsearchextended'] # u"$tstrings['epgsearchextended']" on line 109, col 55
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['epgsearchextended']")) # from line 109, col 55.
write(u'''</li>\r
''')
write(u'''\t\t\t</ul>\r
''')
write(u'''\t\t\t<form action="" onSubmit="open_epg_search_dialog(); return false;">\r
\t\t\t\t<div style="width:100%; text-align:center; padding-top:5px;"><input type="text" id="epgSearch" size="14" /></div>\r
\t\t\t\t<div style="width:100%; text-align:center;padding-top:5px; padding-bottom:7px;" class="epgsearch"><button>''')
_v = VFFSL(SL,"tstrings",True)['search'] # u"$tstrings['search']" on line 115, col 111
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['search']")) # from line 115, col 111.
write(u'''</button></div>\r
\t\t\t</form>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def remoteMenu(self, **KWS):
## CHEETAH: generated from #def remoteMenu at line 119, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<div style="width:100%; text-align:center;">\r
\t\t\t\t<img src="images/remotes/ow_remote.png" width="135" height="183" usemap="#menuremote" border="0">\r
\t\t\t\t<map name="menuremote" >\r
\t\t\t\t\t<area shape="circle" coords="67,148,13" alt="ok" onclick="pressMenuRemote(\'352\');">\r
\t\t\t\t\t<area shape="circle" coords="68,173,9" alt="down" onclick="pressMenuRemote(\'108\');">\r
\t\t\t\t\t<area shape="circle" coords="44,148,9" alt="left" onclick="pressMenuRemote(\'105\');">\r
\t\t\t\t\t<area shape="circle" coords="92,147,9" alt="right" onclick="pressMenuRemote(\'106\');">\r
\t\t\t\t\t<area shape="circle" coords="68,126,8" alt="up" onclick="pressMenuRemote(\'103\');">\r
\t\t\t\t\t<area shape="circle" coords="117,163,10" alt="blue" onclick="pressMenuRemote(\'401\');">\r
\t\t\t\t\t<area shape="circle" coords="118,132,11" alt="yellow" onclick="pressMenuRemote(\'400\');">\r
\t\t\t\t\t<area shape="circle" coords="18,163,11" alt="green" onclick="pressMenuRemote(\'399\');">\r
\t\t\t\t\t<area shape="circle" coords="19,133,10" alt="red" onclick="pressMenuRemote(\'398\');">\r
\t\t\t\t\t<area shape="rect" coords="5,89,44,117" alt="menu" onclick="pressMenuRemote(\'139\');">\r
\t\t\t\t\t<area shape="rect" coords="90,89,128,117" alt="exit" onclick="pressMenuRemote(\'174\');">\r
\t\t\t\t\t<area shape="rect" coords="47,89,87,117" alt="0" onclick="pressMenuRemote(\'11\');">\r
\t\t\t\t\t<area shape="rect" coords="90,60,128,86" alt="9" onclick="pressMenuRemote(\'10\');">\r
\t\t\t\t\t<area shape="rect" coords="47,60,87,86" alt="8" onclick="pressMenuRemote(\'9\');">\r
\t\t\t\t\t<area shape="rect" coords="4,60,44,86" alt="7" onclick="pressMenuRemote(\'8\');">\r
\t\t\t\t\t<area shape="rect" coords="90,30,129,57" alt="6" onclick="pressMenuRemote(\'7\');">\r
\t\t\t\t\t<area shape="rect" coords="47,30,87,57" alt="5" onclick="pressMenuRemote(\'6\');">\r
\t\t\t\t\t<area shape="rect" coords="4,30,44,57" alt="4" onclick="pressMenuRemote(\'5\');">\r
\t\t\t\t\t<area shape="rect" coords="90,0,129,27" alt="3" onclick="pressMenuRemote(\'4\');">\r
\t\t\t\t\t<area shape="rect" coords="46,0,88,28" alt="2" onclick="pressMenuRemote(\'3\');">\r
\t\t\t\t\t<area shape="rect" coords="5,0,45,28" alt="1" onclick="pressMenuRemote(\'2\');">\r
\t\t\t\t</map>\r
\t\t\t\t<div id="help">\r
\t\t\t\t\t''')
_v = VFFSL(SL,"tstrings",True)['shiftforlong'] # u"$tstrings['shiftforlong']" on line 146, col 6
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['shiftforlong']")) # from line 146, col 6.
write(u'''\r
\t\t\t\t</div>\r
\t\t\t\t<ul>\r
''')
if VFFSL(SL,"remotegrabscreenshot",True): # generated from line 149, col 6
write(u'''\t\t\t\t\t<li><input type="checkbox" name="remotegrabscreen" checked="checked" />''')
_v = VFFSL(SL,"tstrings",True)['grabscreenshot'] # u"$tstrings['grabscreenshot']" on line 150, col 77
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['grabscreenshot']")) # from line 150, col 77.
write(u'''</li>\r
''')
else: # generated from line 151, col 6
write(u'''\t\t\t\t\t<li><input type="checkbox" name="remotegrabscreen" />''')
_v = VFFSL(SL,"tstrings",True)['grabscreenshot'] # u"$tstrings['grabscreenshot']" on line 152, col 59
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['grabscreenshot']")) # from line 152, col 59.
write(u'''</li>\r
''')
write(u'''\t\t\t\t\t<li><a href="#" onclick="toggleFullRemote(); return false;">''')
_v = VFFSL(SL,"tstrings",True)['showfullremote'] # u"$tstrings['showfullremote']" on line 154, col 66
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['showfullremote']")) # from line 154, col 66.
write(u'''</a></li>\r
\t\t\t\t</ul>\r
\t\t\t</div>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def extrasMenu(self, **KWS):
## CHEETAH: generated from #def extrasMenu at line 159, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<ul>\r
''')
for extra in VFFSL(SL,"extras",True): # generated from line 161, col 5
if VFN(VFFSL(SL,"extra",True)["key"],"endswith",False)('lcd4linux/config'): # generated from line 162, col 6
write(u"""\t\t\t\t\t\t<li><a href='""")
_v = VFFSL(SL,"extra",True)["key"] # u'$extra["key"]' on line 163, col 20
if _v is not None: write(_filter(_v, rawExpr=u'$extra["key"]')) # from line 163, col 20.
write(u"""' target='_blank'>""")
_v = VFFSL(SL,"extra",True)["description"] # u'$extra["description"]' on line 163, col 51
if _v is not None: write(_filter(_v, rawExpr=u'$extra["description"]')) # from line 163, col 51.
write(u'''</a></li>\r
''')
else: # generated from line 164, col 6
write(u'''\t\t\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'''')
_v = VFFSL(SL,"extra",True)["key"] # u'$extra["key"]' on line 165, col 50
if _v is not None: write(_filter(_v, rawExpr=u'$extra["key"]')) # from line 165, col 50.
write(u'''\'); return false;">''')
_v = VFFSL(SL,"extra",True)["description"] # u'$extra["description"]' on line 165, col 82
if _v is not None: write(_filter(_v, rawExpr=u'$extra["description"]')) # from line 165, col 82.
write(u'''</a></li>\r
''')
write(u'''\t\t\t</ul>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\r
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\r
<html xmlns="http://www.w3.org/1999/xhtml">\r
<head>\r
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
<link rel="shortcut icon" href="images/favicon.png">\r
<link rel="stylesheet" type="text/css" href="css/style.css" />\r
<link type="text/css" href="css/jquery-ui-1.8.18.custom.css" rel="stylesheet" />\t\r
<script type="text/javascript" src="js/jquery-1.6.2.min.js"></script>\r
<script type="text/javascript" src="js/jquery-ui-1.8.18.custom.min.js"></script>\r
<script type="text/javascript" src="js/openwebif.js"></script>\r
<script type="text/javascript">initJsTranslation(''')
_v = VFFSL(SL,"dumps",False)(VFFSL(SL,"tstrings",True)) # u'$dumps($tstrings)' on line 14, col 50
if _v is not None: write(_filter(_v, rawExpr=u'$dumps($tstrings)')) # from line 14, col 50.
write(u''')</script>\r
<title>Open Webif</title>\r
</head>\r
\r
<body>\r
\t<div id="container">\r
\t\t<div id="header">\r
\t\t\t<h1><a href="/">Open<span class="off">Webif</span></a></h1>\r
''')
if VFFSL(SL,"showname",True): # generated from line 22, col 4
write(u'''\t\t\t<h2>''')
_v = VFFSL(SL,"tstrings",True)['openwebif_header'] # u"$tstrings['openwebif_header']" on line 23, col 8
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['openwebif_header']")) # from line 23, col 8.
write(u'''<br /><span class="off">''')
_v = VFFSL(SL,"boxname",True) # u'$boxname' on line 23, col 61
if _v is not None: write(_filter(_v, rawExpr=u'$boxname')) # from line 23, col 61.
write(u'''</span></h2>\r
''')
else: # generated from line 24, col 4
write(u'''\t\t\t<h2>''')
_v = VFFSL(SL,"tstrings",True)['openwebif_header'] # u"$tstrings['openwebif_header']" on line 25, col 8
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['openwebif_header']")) # from line 25, col 8.
write(u'''</h2>\r
''')
write(u'''\t\t</div>\r
\t\t\r
\t\t<div id="statusheader">\r
\t\t\t<div id="osd">''')
_v = VFFSL(SL,"tstrings",True)['nothing_play'] # u"$tstrings['nothing_play']" on line 30, col 18
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['nothing_play']")) # from line 30, col 18.
write(u'''</div>\r
\t\t\t<div id="osd_status"></div>\r
\t\t\t<div id="osd_bottom"></div>\r
\t\t</div>\r
\t\t\r
\t\t<div id="leftmenu">\r
\t\t\r
\t\t\r
\r
\t\t\r
\t\t\r
\t\t\r
\t\t\t\r
\t\t\t\r
\t\t\r
\t\t\t<div id="menucontainer">\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['main'], "main", VFFSL(SL,"mainMenu",True)) # u'$menu($tstrings[\'main\'], "main", $mainMenu)' on line 172, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'main\'], "main", $mainMenu)')) # from line 172, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['volumecontrol'], "volume", VFFSL(SL,"volumeMenu",True)) # u'$menu($tstrings[\'volumecontrol\'], "volume", $volumeMenu)' on line 173, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'volumecontrol\'], "volume", $volumeMenu)')) # from line 173, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['boxcontrol'], "control", VFFSL(SL,"controlMenu",True)) # u'$menu($tstrings[\'boxcontrol\'], "control", $controlMenu)' on line 174, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'boxcontrol\'], "control", $controlMenu)')) # from line 174, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['remote'], "remote", VFFSL(SL,"remoteMenu",True)) # u'$menu($tstrings[\'remote\'], "remote", $remoteMenu)' on line 175, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'remote\'], "remote", $remoteMenu)')) # from line 175, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['info'], "info", VFFSL(SL,"infoMenu",True)) # u'$menu($tstrings[\'info\'], "info", $infoMenu)' on line 176, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'info\'], "info", $infoMenu)')) # from line 176, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['stream'], "stream", VFFSL(SL,"streamMenu",True)) # u'$menu($tstrings[\'stream\'], "stream", $streamMenu)' on line 177, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'stream\'], "stream", $streamMenu)')) # from line 177, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['extras'], "extras", VFFSL(SL,"extrasMenu",True)) # u'$menu($tstrings[\'extras\'], "extras", $extrasMenu)' on line 178, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'extras\'], "extras", $extrasMenu)')) # from line 178, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['epgsearch'], "search", VFFSL(SL,"searchMenu",True)) # u'$menu($tstrings[\'epgsearch\'], "search", $searchMenu)' on line 179, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'epgsearch\'], "search", $searchMenu)')) # from line 179, col 5.
write(u'''\r
\t\t\t</div>\r
\t\t\t<div id="remotecontainer" style="display: none;">\r
\t\t\t\t<div id="leftmenu_main">\r
\t\t\t\t\t<div id="leftmenu_top">''')
_v = VFFSL(SL,"tstrings",True)['remote'] # u"$tstrings['remote']" on line 183, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['remote']")) # from line 183, col 29.
write(u'''</div>\r
\t\t\t\t\t<div style="width:100%; text-align:center;">\r
\t\t\t\t\t\t<div id="remote_container" style="width:100%; text-align:center;"></div>\r
\t\t\t\t\t\t<script type="text/javascript">\r
\t\t\t\t\t\t\t$(document).ready(function() {\r
\t\t\t\t\t\t\t\t$("#remote_container").load("static/remotes/''')
_v = VFFSL(SL,"remote",True) # u'${remote}' on line 188, col 54
if _v is not None: write(_filter(_v, rawExpr=u'${remote}')) # from line 188, col 54.
write(u'''.html");\r
\t\t\t\t\t\t\t});\r
\t\t\t\t\t\t</script>\r
\t\t\t\t\t\t<div id="help">\r
\t\t\t\t\t\t\t''')
_v = VFFSL(SL,"tstrings",True)['shiftforlong'] # u"$tstrings['shiftforlong']" on line 192, col 8
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['shiftforlong']")) # from line 192, col 8.
write(u'''\r
\t\t\t\t\t\t</div>\r
\t\t\t\t\t\t<ul>\r
''')
if VFFSL(SL,"remotegrabscreenshot",True): # generated from line 195, col 8
write(u'''\t\t\t\t\t\t\t<li><input type="checkbox" name="remotegrabscreen" checked="checked" />''')
_v = VFFSL(SL,"tstrings",True)['grabscreenshot'] # u"$tstrings['grabscreenshot']" on line 196, col 79
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['grabscreenshot']")) # from line 196, col 79.
write(u'''</li>\r
''')
else: # generated from line 197, col 8
write(u'''\t\t\t\t\t\t\t<li><input type="checkbox" name="remotegrabscreen" />''')
_v = VFFSL(SL,"tstrings",True)['grabscreenshot'] # u"$tstrings['grabscreenshot']" on line 198, col 61
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['grabscreenshot']")) # from line 198, col 61.
write(u'''</li>\r
''')
write(u'''\t\t\t\t\t\t\t<li><a href="#" onclick="toggleFullRemote(); return false;" class="leftmenu_remotelink">''')
_v = VFFSL(SL,"tstrings",True)['hidefullremote'] # u"$tstrings['hidefullremote']" on line 200, col 96
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['hidefullremote']")) # from line 200, col 96.
write(u'''</a></li>\r
\t\t\t\t\t\t</ul>\r
\t\t\t\t\t</div>\r
\t\t\t\t</div>\r
\t\t\t</div>\r
\t\t</div>\r
\t\t\r
\t\t<div id="content">\r
\t\t\t<div id="content_container">\r
\t\t\t''')
_v = VFFSL(SL,"content",True) # u'$content' on line 209, col 4
if _v is not None: write(_filter(_v, rawExpr=u'$content')) # from line 209, col 4.
write(u'''\r
\t\t\t</div>\r
\t\t\t<div id="footer"><h3> <a href="https://github.com/E2OpenPlugins">E2OpenPlugins</a> | <a href="http://www.opena.tv">openATV</a> | <a href="http://www.vuplus-community.net">Black Hole</a> | <a href="http://www.egami-image.com">EGAMI</a> | <a href="http://www.hdfreaks.cc">OpenHDF</a> | <a href="http://www.hdmedia-universe.com">HDMU</a> | <a href="http://openpli.org">OpenPli</a> | <a href="http://forum.sifteam.eu">Sif</a> | <a href="http://openspa.info">OpenSpa</a> | <a href="http://www.world-of-satellite.com">OpenViX</a> | <a href="http://www.droidsat.org">OpenDroid</a> | <a href="http://www.vuplus-support.org">VTi</a></h3></div>\r
\t\t</div>\r
\t</div>\r
\t<form name="portForm" action="web/stream.m3u" method="GET" target="_blank">\r
\t\t<input type="hidden" name="ref">\r
\t\t<input type="hidden" name="name">\r
\t\t<input type="hidden" name="device">\r
\t</form>\r
\t<form name="portFormTs" action="web/ts.m3u" method="GET" target="_blank">\r
\t\t<input type="hidden" name="file">\r
\t\t<input type="hidden" name="device">\r
\t</form>\r
\t<div id="modaldialog"></div>\r
\t<div id="dialog" title="Work in progress" style="display:none">\r
\t\t<p>Sorry, this function is not yet implemented.</p>\r
\t</div>\r
\t<div id="editTimerForm" title="''')
_v = VFFSL(SL,"tstrings",True)['edit_timer'] # u"$tstrings['edit_timer']" on line 227, col 33
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['edit_timer']")) # from line 227, col 33.
write(u'''"></div>\r
\t\r
</body>\r
\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_main= 'respond'
## END CLASS DEFINITION
if not hasattr(main, '_initCheetahAttributes'):
templateAPIClass = getattr(main, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(main)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=main()).run()
|
MOA-2011/e2openplugin-OpenWebif
|
plugin/controllers/views/main.py
|
Python
|
gpl-2.0
| 36,828
|
[
"VisIt"
] |
d2f2e62701ef6ef1e309870c97319e11cdf3297a59b6f3485109e7e82790f3f8
|
# Copyright 2017 Codas Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
import skimage.io
import numpy as np
from matplotlib import pyplot as plt
from image_analysis.pipeline import OrientationFilter
from image_analysis.pipeline import Pipeline
from skimage.color import rgb2gray
# Test the bowtie filter with sine waves
# img = skimage.io.imread('../test1.jpg')[:426, :426, :]
img = skimage.io.imread('bricks.jpg')[:1000, :1000, :]
motion_analysis = Pipeline(data=[[img]],
ops=[OrientationFilter('bowtie', 90, 42, 1000, .2,
1000, 'triangle')],
save_all=True)
pipeline_output = motion_analysis.extract()
# print(pipeline_output[-1]['seq_features'])
print(pipeline_output[0]['frame_features']['bowtie_filter'].shape)
# print(motion_analysis.as_ndarray(seq_key='batch_length'))
# motion_analysis.display()
# Now, let's access some of the features extracted from frames
# img_filtered_amp_spectrum = np.load('filtered_img_amp_spectrum.npy')
imgs = [pipeline_output[0]['input'],
pipeline_output[0]['frame_features']['bowtie_filter']]
# filtered_minus_original = imgs[1] - rgb2gray(imgs[0])
# skimage.io.imshow_collection(imgs)
# plt.imshow(rgb2gray(pipeline_output[0]['input']), cmap='gray')
plt.imshow(pipeline_output[0]['frame_features']['bowtie_filter'], cmap='gray')
# plt.imshow(filtered_minus_original)
plt.show()
# plt.imshow(img_filtered_amp_spectrum)
# plt.show()
|
CoDaS-Lab/image_analysis
|
demo/obliquevr.py
|
Python
|
apache-2.0
| 2,088
|
[
"Bowtie"
] |
9fc45bdf4bf24c32bd996e64cd119d4b2a671e21d4da4e968a59c009d1af2dd7
|
from matplotlib import patches
import numpy as np
import pickle
from pybrain.datasets import SupervisedDataSet
from pybrain.structure import RecurrentNetwork, FeedForwardNetwork
from pybrain.structure.connections.full import FullConnection
from pybrain.structure.modules.linearlayer import LinearLayer
from pybrain.structure.modules.sigmoidlayer import SigmoidLayer
from pybrain.supervised import BackpropTrainer
import matplotlib.pyplot as plt
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.xml import NetworkWriter, NetworkReader
from image_processing import get_cat_dog_trainset, get_cat_dog_testset
from neuromodulation.connection import NMConnection
import root
def generateTrainingData(size=10000, saveAfter = False):
"""
Creates a set of training data with 4-dimensioanal input and 2-dimensional output
with `size` samples
"""
np.random.seed()
data = SupervisedDataSet(4,2)
for i in xrange(1, int(size/2)):
[a, b] = np.random.random_integers(1, 100, 2)
[c, d] = np.random.random_integers(100, 500, 2)
data.addSample((a, b, c, d), (-1, 1))
for i in xrange(1, int(size/2)):
[a, b] = np.random.random_integers(100, 500, 2)
[c, d] = np.random.random_integers(1, 100, 2)
data.addSample((a, b, c, d), (1, -1))
if saveAfter:
data.saveToFile(root.path()+"/res/dataSet")
return data
def getDatasetFromFile(path = "/res/dataSet"):
return SupervisedDataSet.loadFromFile(path)
# def getRecNetFromFile(path):
def exportANN(net, fileName = root.path()+"/res/recANN"):
fileObject = open(fileName, 'w')
pickle.dump(net, fileObject)
fileObject.close()
def importANN(fileName = root.path()+"/res/recANN"):
fileObject = open(fileName, 'r')
net = pickle.load(fileObject)
fileObject.close()
return net
def exportRNN(net, fileName = root.path()+"/res/recRNN"):
fileObject = open(fileName, 'w')
pickle.dump(net, fileObject)
fileObject.close()
def importRNN(fileName = root.path()+"/res/recRNN"):
fileObject = open(fileName, 'r')
net = pickle.load(fileObject)
fileObject.close()
return net
def exportRFCNN(net, fileName = root.path()+"/res/recRFCNN"):
fileObject = open(fileName, 'w')
pickle.dump(net, fileObject)
fileObject.close()
def importRFCNN(fileName = root.path()+"/res/recRFCNN"):
fileObject = open(fileName, 'r')
net = pickle.load(fileObject)
fileObject.close()
return net
def exportCatDogANN(net, fileName = root.path()+"/res/cat_dog_params"):
arr = net.params
np.save(fileName, arr)
def exportCatDogRNN(net, fileName = root.path()+"/res/cat_dog_nm_params"):
# arr = net.params
# np.save(fileName, arr)
# fileObject = open(fileName+'.pickle', 'w')
# pickle.dump(net, fileObject)
# fileObject.close()
NetworkWriter.writeToFile(net, fileName+'.xml')
def exportCatDogRFCNN(net, fileName = root.path()+"/res/cat_dog_fc_params"):
# arr = net.params
# np.save(fileName, arr)
# fileObject = open(fileName+'.pickle', 'w')
# pickle.dump(net, fileObject)
# fileObject.close()
NetworkWriter.writeToFile(net, fileName+'.xml')
def importCatDogANN(fileName = root.path()+"/res/recCatDogANN"):
n = FeedForwardNetwork()
n.addInputModule(LinearLayer(7500, name='in'))
n.addModule(SigmoidLayer(9000, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.sortModules()
params = np.load(root.path()+'/res/cat_dog_params.txt.npy')
n._setParameters(params)
return n
def importCatDogRNN(fileName = root.path()+"/res/recCatDogANN"):
n = NetworkReader.readFrom(root.path()+"/res/cat_dog_nm_params.xml")
return n
def trainedRNN():
n = RecurrentNetwork()
n.addInputModule(LinearLayer(4, name='in'))
n.addModule(SigmoidLayer(6, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(NMConnection(n['out'], n['out'], name='nmc'))
# n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], inSliceFrom = 0, inSliceTo = 1, outSliceFrom = 0, outSliceTo = 3))
n.sortModules()
draw_connections(n)
d = getDatasetFromFile(root.path()+"/res/dataSet")
t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
t.trainOnDataset(d)
count = 0
while True:
globErr = t.train()
print globErr
if globErr < 0.01:
break
count += 1
if count == 50:
return trainedRNN()
# exportRNN(n)
draw_connections(n)
return n
def trainedANN():
n = FeedForwardNetwork()
n.addInputModule(LinearLayer(4, name='in'))
n.addModule(SigmoidLayer(6, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.sortModules()
draw_connections(n)
# d = generateTrainingData()
d = getDatasetFromFile(root.path()+"/res/dataSet")
t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
t.trainOnDataset(d)
# FIXME: I'm not sure the recurrent ANN is going to converge
# so just training for fixed number of epochs
count = 0
while True:
globErr = t.train()
print globErr
if globErr < 0.01:
break
count += 1
if count == 20:
return trainedANN()
exportANN(n)
draw_connections(n)
return n
#return trained recurrent full connected neural network
def trainedRFCNN():
n = RecurrentNetwork()
n.addInputModule(LinearLayer(4, name='in'))
n.addModule(SigmoidLayer(6, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))
n.sortModules()
draw_connections(n)
# d = generateTraininqgData()
d = getDatasetFromFile(root.path()+"/res/dataSet")
t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
t.trainOnDataset(d)
# FIXME: I'm not sure the recurrent ANN is going to converge
# so just training for fixed number of epochs
count = 0
while True:
globErr = t.train()
print globErr
if globErr < 0.01:
break
count = count + 1
if (count == 100):
return trainedRFCNN()
# for i in range(100):
# print t.train()
exportRFCNN(n)
draw_connections(n)
return n
def draw_connections(net):
for mod in net.modules:
print "Module:", mod.name
if mod.paramdim > 0:
print "--parameters:", mod.params
for conn in net.connections[mod]:
print "-connection to", conn.outmod.name
if conn.paramdim > 0:
print "- parameters", conn.params
if hasattr(net, "recurrentConns"):
print "Recurrent connections"
for conn in net.recurrentConns:
print "-", conn.inmod.name, " to", conn.outmod.name
if conn.paramdim > 0:
print "- parameters", conn.params
def initial_with_zeros(net):
zeros = ([10.0]*len(net.params))
net._setParameters(zeros)
def draw_graphics(net, path_net = None):
red_patch = patches.Patch(color='red', label='First neuron')
blue_patch = patches.Patch(color='blue', label='Second neuron')
orange_patch = patches.Patch(color='orange', label='Both neurons')
black_patch = patches.Patch(color='black', label='Neither')
path = path_net + 'h;h;x;y/'
k = 0
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+str(value1)+","+str(value2)+","+"x"+","+"y"+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([value1, value2, i, j])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
path = path_net + 'h;x;h;y/'
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+str(value1)+","+"x"+","+str(value2)+","+"y"+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([value1, i, value2, j])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
path = path_net + 'h;x;y;h/'
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+str(value1)+","+"x"+","+"y"+","+str(value2)+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([value1, i, j, value2])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
path = path_net + 'x;h;y;h/'
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+"x"+","+str(value1)+","+"y"+","+str(value2)+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([i, value1, j, value2])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
path = path_net + 'x;y;h;h/'
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+"x"+","+"y"+","+str(value1)+","+str(value2)+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([i, j, value1, value2])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
path = path_net + 'x;h;h;y/'
for value1 in [50, 100, 150]:
for value2 in [50, 100, 150]:
k = k + 1
plt.figure(k)
# plt.title("["+str(value)+",50"+",x,"+"y"+"]")
title = "["+"x"+","+str(value1)+","+str(value2)+","+"y"+"]"
plt.title(title)
for i in range(50,500, 5):
print k," ",i
for j in range(50, 500, 5):
activation = net.activate([i, value1, value2, j])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
else:
# activation[0] == np.float32(0.0) and activation[1] == np.float32(0.0):
color = 'black'
x = i
y = j
plt.scatter(x,y,c=color,s = 20, alpha=0.9, edgecolor = 'none')
plt.grid(True)
plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
plt.savefig(path + title + '.png')
# plt.legend(handles=[red_patch, blue_patch, orange_patch, black_patch])
# plt.show()
def calculateCapacity(net):
count1st = 0
count2nd = 0
both = 0
neither = 0
total = 0
for x1 in range(0, 500, 20):
for x2 in range(0, 500, 20):
for x3 in range(0, 500, 20):
for x4 in range(0, 500, 20):
activation = net.activate([x1, x2, x3, x4])
if activation[0] > np.float32(0.0) and activation[1] <= np.float32(0.0):
color = 'red'
count1st += 1
total += 1
elif activation[0] <= np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'blue'
count2nd += 1
total += 1
elif activation[0] > np.float32(0.0) and activation[1] > np.float32(0.0):
color = 'orange'
both += 1
total += 1
else:
color = 'black'
neither += 1
total += 1
print 'iteration: ', x1
count1st = float(count1st)*100/float(total)
count2nd = float(count2nd)*100/float(total)
neither = float(neither)*100/float(total)
both = float(both)*100/float(total)
print '1st: ', count1st
print '2nd: ', count2nd
print 'neither: ', neither
print 'both', both
return count1st, count2nd, both, neither
def trained_cat_dog_ANN():
n = FeedForwardNetwork()
d = get_cat_dog_trainset()
input_size = d.getDimension('input')
n.addInputModule(LinearLayer(input_size, name='in'))
n.addModule(SigmoidLayer(input_size+1500, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.sortModules()
n.convertToFastNetwork()
print 'successful converted to fast network'
t = BackpropTrainer(n, d, learningrate=0.0001)#, momentum=0.75)
count = 0
while True:
globErr = t.train()
print globErr
count += 1
if globErr < 0.01:
break
if count == 30:
break
exportCatDogANN(n)
return n
def trained_cat_dog_RNN():
n = RecurrentNetwork()
d = get_cat_dog_trainset()
input_size = d.getDimension('input')
n.addInputModule(LinearLayer(input_size, name='in'))
n.addModule(SigmoidLayer(input_size+1500, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(NMConnection(n['out'], n['hidden'], name='nmc'))
n.sortModules()
t = BackpropTrainer(n, d, learningrate=0.0001)#, momentum=0.75)
count = 0
while True:
globErr = t.train()
print globErr
count += 1
if globErr < 0.01:
break
if count == 30:
break
exportCatDogRNN(n)
return n
def trained_cat_dog_RFCNN():
n = RecurrentNetwork()
d = get_cat_dog_trainset()
input_size = d.getDimension('input')
n.addInputModule(LinearLayer(input_size, name='in'))
n.addModule(SigmoidLayer(input_size+1500, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))
n.sortModules()
t = BackpropTrainer(n, d, learningrate=0.0001)#, momentum=0.75)
count = 0
while True:
globErr = t.train()
print globErr
count += 1
if globErr < 0.01:
break
if count == 30:
break
exportCatDogRFCNN(n)
return n
def get_class(arr):
len_arr = len(arr)
for i in range(len_arr):
if arr[i] > 0:
arr[i] = 1
else:
arr[i] = 0
return arr
def run():
# n = trainedANN()
# n1 = importANN()
total_first = []
total_second = []
total_both = []
total_neither = []
for i in range(10):
n2 = trainedRNN()
res = calculateCapacity(n2)
total_first.append(res[0])
total_second.append(res[1])
total_both.append(res[2])
total_neither.append(res[3])
print 'first: mean', np.mean(total_first), 'variance', np.var(total_first)
print 'second: mean', np.mean(total_second), 'variance', np.var(total_second)
print 'both: mean', np.mean(total_both), 'variance', np.var(total_both)
print 'neither: mean', np.mean(total_neither), 'variance', np.var(total_neither)
exit()
# n2 = importRNN()
# n = trainedRFCNN()
# n3 = importRFCNN()
# draw_graphics(n1, path_net=root.path() + '/Graphics/ANN/')
# draw_graphics(n2, path_net=root.path() + '/Graphics/RNMNN/')
# draw_graphics(n3, path_net=root.path() + '/Graphics/RFCNN/')
# calculateCapacity(n1)
# calculateCapacity(n3)
exit()
# print 'ann:'
# for x in [(1, 15, 150, 160), (1, 15, 150, 160),
# (100, 110, 150, 160), (150, 160, 10, 15),
# (150, 160, 10, 15), (200, 200, 100, 100),
# (10, 15, 300, 250), (250, 300, 15, 10)]:
# print("n.activate(%s) == %s\n" % (x, n.activate(x)))
# calculateCapacity(n)
# draw_graphics(n)
print "hello"
n = importCatDogANN()
# exit()
# n = importCatDogRFCNN()
# NetworkWriter.writeToFile(n, root.path()+'/res/text.xml')
# n = NetworkReader.readFrom(root.path()+'/res/text.xml')
print type(n)
# exit()
ds = get_cat_dog_testset()
for inp, targ in ds:
activate = n.activate(inp)
print "activate:", activate, "expected:", targ
# draw_graphics(n)
# n = 4
# print np.random.random_integers(0, 1, n)
# exit()
# generateTrainingData(saveAfter=True)
if __name__ == "__main__":
run()
"""
RNN(neuromodulation):
1st: 3095
2nd: 2643229
neither: 28162
both 3575514
RNN(neuromodulation new)
1st: 3533955
2nd: 1977645
neither: 0
both 738400
ANN:
1st: 9803
2nd: 46325
neither: 425659
both 5768213
Recurrent fully connected neural network
1st: 504753
2nd: 555727
neither: 1768
both 5187752
"""
|
vitaliykomarov/NEUCOGAR
|
old/main.py
|
Python
|
gpl-2.0
| 22,609
|
[
"NEURON"
] |
ebdc76bd68873a5af8558d49609ee457b10bb4e9ac2d3b93e8267d2949884edf
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os.path
from unittest.case import expectedFailure
from commoncode.testcase import FileBasedTesting
from cluecode_assert_utils import check_detection
import cluecode.copyrights
class TestTextPreparation(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_strip_numbers(self):
a = 'Python 2.6.6 (r266:84297, Aug 24 2010, 18:46:32) [MSC v.1500 32 bit (Intel)] on win32'
expected = u'Python 2.6.6 (r266:84297, Aug 2010, 18:46:32) [MSC v.1500 bit (Intel)] on win32'
assert expected == cluecode.copyrights.strip_numbers(a)
def test_prepare_text(self):
cp = ''' test (C) all rights reserved'''
result = cluecode.copyrights.prepare_text_line(cp)
assert 'test (c) all rights reserved' == result
def test_is_all_rights_reserved(self):
line = ''' "All rights reserved\\n"'''
assert cluecode.copyrights.is_all_rights_reserved(line)
def test_candidate_lines_simple(self):
lines = ''' test (C) all rights reserved'''.splitlines(False)
result = list(cluecode.copyrights.candidate_lines(lines))
expected = [[(1, ' test (C) all rights reserved')]]
assert expected == result
def test_candidate_lines_complex(self):
lines = '''
Apache Xalan (Xalan XSLT processor)
Copyright 1999-2006 The Apache Software Foundation
Apache Xalan (Xalan serializer)
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
=========================================================================
Portions of this software was originally based on the following:
- software copyright (c) 1999-2002, Lotus Development Corporation.,
http://www.lotus.com.
- software copyright (c) 2001-2002, Sun Microsystems.,
http://www.sun.com.
- software copyright (c) 2003, IBM Corporation.,
http://www.ibm.com.
=========================================================================
The binary distribution package (ie. jars, samples and documentation) of
this product includes software developed by the following:
'''.splitlines(False)
expected = [
[(2, ' Apache Xalan (Xalan XSLT processor)'),
(3, ' Copyright 1999-2006 The Apache Software Foundation')],
[(7, ' This product includes software developed at'),
(8, ' The Apache Software Foundation (http://www.apache.org/).')],
[(12, ' Portions of this software was originally based on the following:'),
(13, ' - software copyright (c) 1999-2002, Lotus Development Corporation.,'),
(14, ' http://www.lotus.com.'),
(15, ' - software copyright (c) 2001-2002, Sun Microsystems.,'),
(16, ' http://www.sun.com.'),
(17, ' - software copyright (c) 2003, IBM Corporation.,'),
(18, ' http://www.ibm.com.')],
[(21, ' The binary distribution package (ie. jars, samples and documentation) of'),
(22, ' this product includes software developed by the following:')]
]
result = list(cluecode.copyrights.candidate_lines(lines))
assert expected == result
class TestCopyrightDetector(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_copyright_detect(self):
location = self.get_test_loc('copyrights/copyright_essential_smoke-ibm_c.c')
expected = [
u'Copyright IBM and others (c) 2008',
u'Copyright Eclipse, IBM and others (c) 2008'
]
copyrights, _, _, _ = cluecode.copyrights.detect(location)
assert expected == copyrights
class TestCopyrightDetection(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_company_name_in_java(self):
test_file = self.get_test_loc('copyrights/company_name_in_java-9_java.java')
expected = [
u'Copyright (c) 2008-2011 Company Name Incorporated',
]
check_detection(expected, test_file)
def test_copyright_03e16f6c_0(self):
test_file = self.get_test_loc('copyrights/copyright_03e16f6c_0-e_f_c.0')
expected = [
u'Copyright (c) 1997 Microsoft Corp.',
u'Copyright (c) 1997 Microsoft Corp.',
u'(c) 1997 Microsoft',
]
check_detection(expected, test_file,
expected_in_results=True,
results_in_expected=False)
def test_copyright_3a3b02ce_0(self):
# this is a certificate and the actual copyright holder is not clear:
# could be either Wisekey or OISTE Foundation.
test_file = self.get_test_loc('copyrights/copyright_3a3b02ce_0-a_b_ce.0')
expected = [
u'Copyright (c) 2005',
u'Copyright (c) 2005',
]
check_detection(expected, test_file,
expected_in_results=True,
results_in_expected=False)
def test_copyright_ABC_cpp(self):
test_file = self.get_test_loc('copyrights/copyright_ABC_cpp-Case_cpp.cpp')
expected = [
u'Copyright (c) ABC Company',
]
check_detection(expected, test_file)
def test_copyright_ABC_file_cpp(self):
test_file = self.get_test_loc('copyrights/copyright_ABC_file_cpp-File_cpp.cpp')
expected = [
u'Copyright (c) ABC Company',
]
check_detection(expected, test_file)
def test_copyright_false_positive_in_c(self):
test_file = self.get_test_loc('copyrights/copyright_false_positive_in_c-false_positives_c.c')
expected = []
check_detection(expected, test_file)
def test_copyright_false_positive_in_js(self):
test_file = self.get_test_loc('copyrights/copyright_false_positive_in_js-editor_beta_de_js.js')
expected = []
check_detection(expected, test_file)
def test_copyright_false_positive_in_license(self):
test_file = self.get_test_loc('copyrights/copyright_false_positive_in_license-LICENSE')
expected = []
check_detection(expected, test_file)
def test_copyright_heunrich_c(self):
test_file = self.get_test_loc('copyrights/copyright_heunrich_c-c.c')
expected = [
u'Copyright (c) 2000 HEUNRICH HERTZ INSTITUTE',
]
check_detection(expected, test_file)
def test_copyright_isc(self):
test_file = self.get_test_loc('copyrights/copyright_isc-c.c')
expected = [
u'Copyright (c) 1998-2000 The Internet Software Consortium.',
]
check_detection(expected, test_file)
def test_copyright_json_phps_html_incorrect(self):
test_file = self.get_test_loc('copyrights/copyright_json_phps_html_incorrect-JSON_phps_html.html')
expected = []
check_detection(expected, test_file)
def test_copyright_no_copyright_in_class_file_1(self):
test_file = self.get_test_loc('copyrights/copyright_no_copyright_in_class_file_1-PersistentArrayHolder_class.class')
expected = []
check_detection(expected, test_file)
def test_copyright_sample_py(self):
test_file = self.get_test_loc('copyrights/copyright_sample_py-py.py')
expected = [
u'COPYRIGHT 2006',
]
check_detection(expected, test_file)
def test_copyright_abc(self):
test_file = self.get_test_loc('copyrights/copyright_abc')
expected = [
u'Copyright (c) 2006 abc.org',
]
check_detection(expected, test_file)
def test_copyright_abc_loss_of_holder_c(self):
test_file = self.get_test_loc('copyrights/copyright_abc_loss_of_holder_c-c.c')
expected = [
u'copyright abc 2001',
]
check_detection(expected, test_file)
def test_copyright_abiword_common_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_abiword_common_copyright-abiword_common_copyright.copyright')
expected = [
u'Copyright (c) 1998- AbiSource, Inc.',
u'Copyright (c) 2009 Masayuki Hatta',
u'Copyright (c) 2009 Patrik Fimml <patrik@fimml.at>',
]
check_detection(expected, test_file)
def test_copyright_acme_c(self):
test_file = self.get_test_loc('copyrights/copyright_acme_c-c.c')
expected = [
u'Copyright (c) 2000 ACME, Inc.',
]
check_detection(expected, test_file)
def test_copyright_activefieldattribute_cs(self):
test_file = self.get_test_loc('copyrights/copyright_activefieldattribute_cs-ActiveFieldAttribute_cs.cs')
expected = [
u'Web Applications Copyright 2009 - Thomas Hansen thomas@ra-ajax.org.',
]
check_detection(expected, test_file)
def test_copyright_addr_c(self):
test_file = self.get_test_loc('copyrights/copyright_addr_c-addr_c.c')
expected = [
u'Copyright 1999 Cornell University.',
u'Copyright 2000 Jon Doe.',
]
check_detection(expected, test_file)
def test_copyright_apostrophe_in_name(self):
test_file = self.get_test_loc('copyrights/copyright_with_apos.txt')
expected = [
u"Copyright Marco d'Itri <md@Linux.IT>",
u"Copyright Marco d'Itri",
]
check_detection(expected, test_file)
def test_copyright_adler_inflate_c(self):
test_file = self.get_test_loc('copyrights/copyright_adler_inflate_c-inflate_c.c')
expected = [
u'Not copyrighted 1992 by Mark Adler',
]
check_detection(expected, test_file)
def test_copyright_adobe_flashplugin_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_adobe_flashplugin_copyright_label-adobe_flashplugin_copyright_label.label')
expected = [
u'Copyright (c) 1996 - 2008. Adobe Systems Incorporated',
u'(c) 2001-2009, Takuo KITAME, Bart Martens, and Canonical, LTD',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_aleal(self):
test_file = self.get_test_loc('copyrights/copyright_aleal-c.c')
expected = [
u'copyright (c) 2006 by aleal',
]
check_detection(expected, test_file)
def test_copyright_andre_darcy(self):
test_file = self.get_test_loc('copyrights/copyright_andre_darcy-c.c')
expected = [
u'Copyright (c) 1995, Pascal Andre (andre@via.ecp.fr).',
u"copyright 1997, 1998, 1999 by D'Arcy J.M. Cain (darcy@druid.net)",
]
check_detection(expected, test_file)
def test_copyright_android_c(self):
test_file = self.get_test_loc('copyrights/copyright_android_c-c.c')
expected = [
u'Copyright (c) 2009 The Android Open Source Project',
u'Copyright 2003-2005 Colin Percival',
]
check_detection(expected, test_file)
def test_copyright_apache2_debian_trailing_name_missed(self):
test_file = self.get_test_loc('copyrights/copyright_apache2_debian_trailing_name_missed-apache_copyright_label.label')
expected = [
u'copyright Steinar H. Gunderson <sgunderson@bigfoot.com> and Knut Auvor Grythe <knut@auvor.no>',
u'Copyright (c) 1996-1997 Cisco Systems, Inc.',
u'Copyright (c) Ian F. Darwin',
u'Copyright (c) Ian F. Darwin 1986, 1987, 1989, 1990, 1991, 1992, 1994, 1995.',
u'copyright 1992 by Eric Haines, erich@eye.com',
u'Copyright (c) 1995, Board of Trustees of the University of Illinois',
u'Copyright (c) 1994, Jeff Hostetler, Spyglass, Inc.',
u'Copyright (c) 1993, 1994 by Carnegie Mellon University',
u'Copyright (c) 1991 Bell Communications Research, Inc.',
u'(c) Copyright 1993,1994 by Carnegie Mellon University',
u'Copyright (c) 1991 Bell Communications Research, Inc.',
u'Copyright RSA Data Security, Inc.',
u'Copyright (c) 1991-2, RSA Data Security, Inc.',
u'Copyright RSA Data Security, Inc.',
u'Copyright (c) 1991-2, RSA Data Security, Inc.',
u'copyright RSA Data Security, Inc.',
u'Copyright (c) 1991-2, RSA Data Security, Inc.',
u'copyright RSA Data Security, Inc.',
u'Copyright (c) 1991-2, RSA Data Security, Inc.',
u'Copyright (c) 2000-2002 The Apache Software Foundation',
u'copyright RSA Data Security, Inc.',
u'Copyright (c) 1990-2, RSA Data Security, Inc.',
u'Copyright 1991 by the Massachusetts Institute of Technology',
u'Copyright 1991 by the Massachusetts Institute of Technology',
u'Copyright (c) 1997-2001 University of Cambridge',
u'copyright by the University of Cambridge, England.',
u'Copyright (c) Zeus Technology Limited 1996.',
u'Copyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd and Clark Cooper',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_apache_notice(self):
test_file = self.get_test_loc('copyrights/copyright_apache_notice-NOTICE')
expected = [
u'Copyright 1999-2006 The Apache Software Foundation',
u'Copyright 1999-2006 The Apache Software Foundation',
u'Copyright 2001-2003,2006 The Apache Software Foundation.',
u'copyright (c) 2000 World Wide Web Consortium',
]
check_detection(expected, test_file)
def test_copyright_aptitude_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_aptitude_copyright_label-aptitude_copyright_label.label')
expected = [
u'Copyright 1999-2005 Daniel Burrows <dburrows@debian.org>',
]
check_detection(expected, test_file)
def test_copyright_atheros_spanning_lines(self):
test_file = self.get_test_loc('copyrights/copyright_atheros_spanning_lines-py.py')
expected = [
u'Copyright (c) 2000 Atheros Communications, Inc.',
u'Copyright (c) 2001 Atheros Communications, Inc.',
u'Copyright (c) 1994-1997 by Intel Corporation.',
]
check_detection(expected, test_file)
def test_copyright_att_in_c(self):
test_file = self.get_test_loc('copyrights/copyright_att_in_c-9_c.c')
expected = [
u'Copyright (c) 1991 by AT&T.',
]
check_detection(expected, test_file)
def test_copyright_audio_c(self):
test_file = self.get_test_loc('copyrights/copyright_audio_c-c.c')
expected = [
u'copyright (c) 1995, AudioCodes, DSP Group, France Telecom, Universite de Sherbrooke.',
]
check_detection(expected, test_file)
def test_copyright_babkin_txt(self):
test_file = self.get_test_loc('copyrights/copyright_babkin_txt.txt')
expected = [
u'Copyright (c) North',
u'Copyright (c) South',
u'Copyright (c) 2001 by the TTF2PT1 project',
u'Copyright (c) 2001 by Sergey Babkin',
]
check_detection(expected, test_file)
def test_copyright_blender_debian(self):
test_file = self.get_test_loc('copyrights/copyright_blender_debian-blender_copyright.copyright')
expected = [
u'Copyright (c) 2002-2008 Blender Foundation',
u'Copyright (c) 2004-2005 Masayuki Hatta <mhatta@debian.org>',
u'(c) 2005-2007 Florian Ernst <florian@debian.org>',
u'(c) 2007-2008 Cyril Brulebois <kibi@debian.org>',
]
check_detection(expected, test_file)
def test_copyright_blue_sky_dash_in_name(self):
test_file = self.get_test_loc('copyrights/copyright_blue_sky_dash_in_name-c.c')
expected = [
u'Copyright (c) 1995, 1996 - Blue Sky Software Corp. -',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_bouncy_license(self):
test_file = self.get_test_loc('copyrights/copyright_bouncy_license-LICENSE')
expected = [
u'Copyright (c) 2000-2005 The Legion Of The Bouncy Castle',
]
check_detection(expected, test_file)
def test_copyright_bouncy_notice(self):
test_file = self.get_test_loc('copyrights/copyright_bouncy_notice-9_NOTICE')
expected = [
u'Copyright (c) 2000-2005 The Legion Of The Bouncy Castle',
]
check_detection(expected, test_file)
def test_copyright_btt_plot1_py(self):
test_file = self.get_test_loc('copyrights/copyright_btt_plot1_py-btt_plot_py.py')
expected = [
u'(c) Copyright 2009 Hewlett-Packard Development Company',
]
check_detection(expected, test_file)
def test_copyright_camelcase_bug_br_fcc_thread_psipstack_c(self):
test_file = self.get_test_loc('copyrights/copyright_camelcase_bug_br_fcc_thread_psipstack_c-br_fcc_thread_psipstack_c.c')
expected = [
u'Copyright 2010-2011 by BitRouter',
]
check_detection(expected, test_file)
def test_copyright_ccube_txt(self):
test_file = self.get_test_loc('copyrights/copyright_ccube_txt.txt')
expected = [
u'Copyright (c) 2001 C-Cube Microsystems.',
]
check_detection(expected, test_file)
def test_copyright_cedrik_java(self):
test_file = self.get_test_loc('copyrights/copyright_cedrik_java-java.java')
expected = [
u'copyright (c) 2005-2006 Cedrik LIME',
]
check_detection(expected, test_file,
expected_in_results=True,
results_in_expected=False)
def test_copyright_cern(self):
test_file = self.get_test_loc('copyrights/copyright_cern-TestMatrix_D_java.java')
expected = [
u'Copyright 1999 CERN - European Organization for Nuclear Research.',
]
check_detection(expected, test_file)
def test_copyright_cern_matrix2d_java(self):
test_file = self.get_test_loc('copyrights/copyright_cern_matrix2d_java-TestMatrix_D_java.java')
expected = [
u'Copyright 1999 CERN - European Organization for Nuclear Research.',
u'Copyright (c) 1998 Company PIERSOL Engineering Inc.',
u'Copyright (c) 1998 Company PIERSOL Engineering Inc.',
]
check_detection(expected, test_file)
def test_copyright_chameleon_assembly(self):
test_file = self.get_test_loc('copyrights/copyright_chameleon_assembly-9_9_setjmp_S.S')
expected = [
u'Copyright Chameleon Systems, 1999',
]
check_detection(expected, test_file)
def test_copyright_co_cust(self):
test_file = self.get_test_loc('copyrights/copyright_co_cust-copyright_java.java')
expected = [
u'Copyright (c) 2009 Company Customer Identity Hidden',
]
check_detection(expected, test_file)
def test_copyright_colin_android(self):
test_file = self.get_test_loc('copyrights/copyright_colin_android-bsdiff_c.c')
expected = [
u'Copyright (c) 2009 The Android Open Source Project',
u'Copyright 2003-2005 Colin Percival',
]
check_detection(expected, test_file)
def test_copyright_company_in_txt(self):
test_file = self.get_test_loc('copyrights/copyright_company_in_txt-9.txt')
expected = [
u'Copyright (c) 2008-2011 Company Name Incorporated',
]
check_detection(expected, test_file)
def test_copyright_complex_4_line_statement_in_text(self):
test_file = self.get_test_loc('copyrights/copyright_complex_4_line_statement_in_text-9.txt')
expected = [
u'Copyright 2002 Jonas Borgstrom <jonas@codefactory.se> 2002 Daniel Lundin <daniel@codefactory.se> 2002 CodeFactory AB',
u'Copyright (c) 1994 The Regents of the University of California',
]
check_detection(expected, test_file)
def test_copyright_complex_notice(self):
test_file = self.get_test_loc('copyrights/copyright_complex_notice-NOTICE')
expected = [
u'Copyright (c) 2003, Steven G. Kargl',
u'Copyright (c) 2003 Mike Barcroft <mike@FreeBSD.org>',
u'Copyright (c) 2002, 2003 David Schultz <das@FreeBSD.ORG>',
u'Copyright (c) 2003 David Schultz <das@FreeBSD.ORG>',
u'Copyright (c) 2004 David Schultz <das@FreeBSD.ORG>',
u'Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>',
u'Copyright (c) 2005 David Schultz <das@FreeBSD.ORG>',
u'Copyright (c) 2002 David Schultz <das@FreeBSD.ORG>',
u'Copyright (c) 2004 Stefan Farfeleder',
u'Copyright (c) 2003 Dag-Erling Coidan Smrgrav',
u'Copyright (c) 1996 The NetBSD Foundation, Inc.',
u'Copyright (c) 1985, 1993',
u'Copyright (c) 1988, 1993',
u'Copyright (c) 1992, 1993 The Regents of the University of California.',
u'Copyright (c) 1993,94 Winning Strategies, Inc.',
u'Copyright (c) 1994 Winning Strategies, Inc.',
u'Copyright (c) 1993 by Sun Microsystems, Inc.',
u'Copyright (c) 1993 by Sun Microsystems, Inc.',
u'Copyright (c) 1993 by Sun Microsystems, Inc.',
u'Copyright (c) 2004 by Sun Microsystems, Inc.',
u'Copyright (c) 2004 Stefan Farfeleder',
u'Copyright (c) 2004 David Schultz <das@FreeBSD.org>',
u'Copyright (c) 2004, 2005 David Schultz <das@FreeBSD.org>',
u'Copyright (c) 2003 Mike Barcroft <mike@FreeBSD.org>',
u'Copyright (c) 2005 David Schultz <das@FreeBSD.org>',
u'Copyright (c) 2003, Steven G. Kargl',
u'Copyright (c) 1991 The Regents of the University of California.',
]
check_detection(expected, test_file)
def test_copyright_complex_notice_sun_microsystems_on_multiple_lines(self):
test_file = self.get_test_loc('copyrights/copyright_complex_notice_sun_microsystems_on_multiple_lines-NOTICE')
expected = [
u'Copyright 1999-2006 The Apache Software Foundation',
u'copyright (c) 1999-2002, Lotus Development',
u'copyright (c) 2001-2002, Sun Microsystems.',
u'copyright (c) 2003, IBM Corporation., http://www.ibm.com.',
u'copyright (c) 1999, IBM Corporation., http://www.ibm.com.',
u'copyright (c) 1999, Sun Microsystems.',
u'copyright (c) 1999, IBM Corporation., http://www.ibm.com.',
u'copyright (c) 1999, Sun Microsystems.',
]
check_detection(expected, test_file)
def test_copyright_config(self):
test_file = self.get_test_loc('copyrights/copyright_config-config_guess.guess')
expected = [
u'Copyright (c) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_config1_guess(self):
test_file = self.get_test_loc('copyrights/copyright_config1_guess-config_guess.guess')
expected = [
u'Copyright (c) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_copyright_camelcase_br_diagnostics_h(self):
test_file = self.get_test_loc('copyrights/copyright_copyright_camelcase_br_diagnostics_h-br_diagnostics_h.h')
expected = [
u'Copyright 2011 by BitRouter',
]
check_detection(expected, test_file)
def test_copyright_coreutils_debian(self):
test_file = self.get_test_loc('copyrights/copyright_coreutils_debian-coreutils_copyright.copyright')
expected = [
u'Copyright (c) 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.',
u'Copyright (c) 1990, 1993, 1994 The Regents of the University of California',
u'Copyright (c) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.',
u'Copyright (c) 1989, 1993 The Regents of the University of California',
u'Copyright (c) 1999-2006 Free Software Foundation, Inc.',
u'Copyright (c) 1997, 1998, 1999 Colin Plumb',
u'Copyright (c) 2005, 2006 Free Software Foundation, Inc.',
u'Copyright (c) 1996-1999 by Internet Software Consortium',
u'Copyright (c) 2004, 2006, 2007 Free Software Foundation, Inc.',
u'Copyright (c) 1997-2007 Free Software Foundation, Inc.',
u'Copyright (c) 1984 David M. Ihnat',
u'Copyright (c) 1996-2007 Free Software Foundation, Inc.',
u'Copyright (c) 1994, 1995, 1997, 1998, 1999, 2000 H. Peter Anvin',
u'Copyright (c) 1997-2005 Free Software Foundation, Inc.',
u'Copyright (c) 1984 David M. Ihnat',
u'Copyright (c) 1999-2007 Free Software Foundation, Inc.',
u'Copyright (c) 1997, 1998, 1999 Colin Plumb',
u'Copyright 1994-1996, 2000-2008 Free Software Foundation, Inc.',
u'Copyright (c) 1984-2008 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_dag_c(self):
test_file = self.get_test_loc('copyrights/copyright_dag_c-s_fabsl_c.c')
expected = [
u'Copyright (c) 2003 Dag-Erling Coidan Smrgrav',
]
check_detection(expected, test_file)
def test_copyright_dag_elring_notice(self):
test_file = self.get_test_loc('copyrights/copyright_dag_elring_notice-NOTICE')
expected = [
u'Copyright (c) 2003 Dag-Erling Codan Smrgrav',
]
check_detection(expected, test_file)
def test_copyright_dash_in_name(self):
test_file = self.get_test_loc('copyrights/copyright_dash_in_name-Makefile')
expected = [
u'(c) 2011 - Anycompany, LLC',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_dasher_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_dasher_copyright_label-dasher_copyright_label.label')
expected = [
u'Copyright (c) 1998-2008 The Dasher Project',
]
check_detection(expected, test_file)
def test_copyright_date_range_dahua_in_c(self):
test_file = self.get_test_loc('copyrights/copyright_date_range_dahua_in_c-c.c')
expected = [
u'(c) Copyright 2006 to 2007 Dahua Digital.',
]
check_detection(expected, test_file)
def test_copyright_date_range_in_c(self):
test_file = self.get_test_loc('copyrights/copyright_date_range_in_c-c.c')
expected = [
u'Copyright (c) ImageSilicon Tech. (2006 - 2007)',
]
check_detection(expected, test_file)
def test_copyright_date_range_in_c_2(self):
test_file = self.get_test_loc('copyrights/copyright_date_range_in_c_2-c.c')
expected = [
u'(c) Copyright 2005 to 2007 ImageSilicon? Tech.,ltd',
]
check_detection(expected, test_file)
def test_copyright_debian_archive_keyring_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_debian_archive_keyring_copyright-debian_archive_keyring_copyright.copyright')
expected = [
u'Copyright (c) 2006 Michael Vogt <mvo@debian.org>',
]
check_detection(expected, test_file)
def test_copyright_debian_lib_1(self):
test_file = self.get_test_loc('copyrights/copyright_debian_lib_1-libmono_cairo_cil_copyright_label.label')
expected = [
u'Copyright 2004 The Apache Software Foundation',
u'Copyright (c) 2001-2005 Novell',
u'Copyright (c) Microsoft Corporation',
u'Copyright (c) 2007 James Newton-King',
u'Copyright (c) 2002-2004 James W. Newkirk , Michael C. Two , Alexei A. Vorontsov , Charlie Poole',
u'Copyright (c) 2000-2004 Philip A. Craig',
u'Copyright (c) 2002-2004 James W. Newkirk , Michael C. Two , Alexei A. Vorontsov , Charlie Poole',
u'Copyright (c) 2000-2004 Philip A. Craig',
u'Copyright (c) 2007, 2008 LShift Ltd.',
u'Copyright (c) 2007, 2008 Cohesive Financial Technologies LLC.',
u'Copyright (c) 2007, 2008 Rabbit Technologies Ltd.',
u'Copyright (c) 2007, 2008 LShift Ltd. , Cohesive Financial Technologies', # LLC., and Rabbit Technologies Ltd.',
u'Copyright (c) 2007, 2008 LShift Ltd. , Cohesive Financial Technologies', # LLC., and Rabbit Technologies Ltd.',
u'Copyright (c) 2007 LShift Ltd. , Cohesive Financial Technologies', # LLC., and Rabbit Technologies Ltd.',
u'Copyright (c) ???? Simon Mourier <simonm@microsoft.com>',
]
check_detection(expected, test_file)
def test_copyright_debian_lib_2(self):
test_file = self.get_test_loc('copyrights/copyright_debian_lib_2-libmono_cairo_cil_copyright.copyright')
expected = [
u'Copyright 2004 The Apache Software Foundation',
u'Copyright (c) 2001-2005 Novell',
u'Copyright (c) Microsoft Corporation',
u'Copyright (c) 2007 James Newton-King',
u'Copyright (c) 2002-2004 James W. Newkirk , Michael C. Two , Alexei A. Vorontsov , Charlie Poole',
u'Copyright (c) 2000-2004 Philip A. Craig',
u'Copyright (c) 2002-2004 James W. Newkirk , Michael C. Two , Alexei A. Vorontsov , Charlie Poole',
u'Copyright (c) 2000-2004 Philip A. Craig',
u'Copyright (c) 2007, 2008 LShift Ltd.',
u'Copyright (c) 2007, 2008 Cohesive Financial Technologies LLC.',
u'Copyright (c) 2007, 2008 Rabbit Technologies Ltd.',
u'Copyright (c) 2007, 2008 LShift Ltd., Cohesive Financial Technologies LLC.',
u'Copyright (c) 2007, 2008 LShift Ltd. , Cohesive Financial Technologies',
u'Copyright (c) 2007 LShift Ltd. , Cohesive Financial Technologies',
u'Copyright (c) ???? Simon Mourier <simonm@microsoft.com>',
]
check_detection(expected, test_file)
def test_copyright_debian_lib_3(self):
test_file = self.get_test_loc('copyrights/copyright_debian_lib_3-libmono_security_cil_copyright.copyright')
expected = [
u'Copyright 2004 The Apache Software Foundation',
u'Copyright (c) 2001-2005 Novell',
u'Copyright (c) Microsoft Corporation',
u'Copyright (c) 2007 James Newton-King',
u'Copyright (c) 2002-2004 James W. Newkirk , Michael C. Two , Alexei A. Vorontsov , Charlie Poole',
u'Copyright (c) 2000-2004 Philip A. Craig',
u'Copyright (c) 2002-2004 James W. Newkirk , Michael C. Two , Alexei A. Vorontsov , Charlie Poole',
u'Copyright (c) 2000-2004 Philip A. Craig',
u'Copyright (c) 2007, 2008 LShift Ltd.',
u'Copyright (c) 2007, 2008 Cohesive Financial Technologies LLC.',
u'Copyright (c) 2007, 2008 Rabbit Technologies Ltd.',
u'Copyright (c) 2007, 2008 LShift Ltd. , Cohesive Financial Technologies',
u'Copyright (c) 2007, 2008 LShift Ltd. , Cohesive Financial Technologies',
u'Copyright (c) 2007 LShift Ltd., Cohesive Financial Technologies LLC., and Rabbit Technologies Ltd.',
u'Copyright (c) ???? Simon Mourier <simonm@microsoft.com>',
]
check_detection(expected, test_file)
def test_copyright_debian_multi_names_on_one_line(self):
test_file = self.get_test_loc('copyrights/copyright_debian_multi_names_on_one_line-libgdata__copyright.copyright')
expected = [
u'Copyright 1999-2004 Ximian, Inc. 1999-2005 Novell, Inc.',
u'copyright 2000-2003 Ximian, Inc. , 2003 Gergo Erdi',
u'copyright 2000 Eskil Heyn Olsen , 2000 Ximian, Inc.',
u'copyright 1998 The Free Software Foundation , 2000 Ximian, Inc.',
u'copyright 1998-2005 The OpenLDAP Foundation',
u'Copyright 1999-2003 The OpenLDAP Foundation , Redwood City, California',
u'Copyright 1999-2000 Eric Busboom , The Software Studio (http://www.softwarestudio.org) 2001 Critical Path Authors',
u'(c) Copyright 1996 Apple Computer, Inc. , AT&T Corp., International Business Machines Corporation and Siemens Rolm Communications Inc.',
u'Copyright (c) 1997 Theo de Raadt',
u'copyright 2000 Andrea Campi',
u'copyright 2002 Andrea Campi',
u'copyright 2003 Andrea Campi',
u'Copyright 2002 Jonas Borgstrom <jonas@codefactory.se> 2002 Daniel Lundin <daniel@codefactory.se> 2002 CodeFactory AB',
u'copyright 1996 Apple Computer, Inc. , AT&T Corp. , International Business Machines Corporation and Siemens Rolm Communications Inc.',
u'copyright 1986-2000 Hiram Clawson',
u'copyright 1997 Theo de Raadt',
u'Copyright (c) 1996-2002 Sleepycat Software',
u'Copyright (c) 1990, 1993, 1994, 1995, 1996 Keith Bostic',
u'Copyright (c) 1990, 1993, 1994, 1995 The Regents of the University of California',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_dionysos_c(self):
test_file = self.get_test_loc('copyrights/copyright_dionysos_c-c.c')
expected = [
u'COPYRIGHT (c) 2006 - 2009 DIONYSOS',
u'COPYRIGHT (c) ADIONYSOS 2006 - 2009',
u'COPYRIGHT (c) ADIONYSOS2 2006',
u'COPYRIGHT (c) MyCompany 2006 - 2009',
u'COPYRIGHT (c) 2006 MyCompany2',
u'COPYRIGHT (c) 2024 DIONYSOS2',
u'copyright (c) 2006 - 2009 DIONYSOS',
u'copyright (c) ADIONYSOS 2006 - 2009',
u'copyright (c) ADIONYSOS2 2006',
u'copyright (c) MyCompany 2006 - 2009',
u'copyright (c) 2006 MyCompany2',
u'copyright (c) 2024 DIONYSOS2',
]
check_detection(expected, test_file)
def test_copyright_disclaimed(self):
test_file = self.get_test_loc('copyrights/copyright_disclaimed-c.c')
expected = [
u'Copyright disclaimed 2003 by Andrew Clarke',
]
check_detection(expected, test_file)
def test_copyright_djvulibre_desktop_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_djvulibre_desktop_copyright-djvulibre_desktop_copyright.copyright')
expected = [
u'Copyright (c) 2002 Leon Bottou and Yann Le Cun',
u'Copyright (c) 2001 AT&T',
u'Copyright (c) 1999-2001 LizardTech, Inc.',
]
check_detection(expected, test_file)
def test_copyright_docbook_xsl_doc_html_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_docbook_xsl_doc_html_copyright-docbook_xsl_doc_html_copyright.copyright')
expected = [
u'Copyright (c) 1999-2007 Norman Walsh',
u'Copyright (c) 2003 Jiri Kosek',
u'Copyright (c) 2004-2007 Steve Ball',
u'Copyright (c) 2005-2008 The DocBook Project',
]
check_detection(expected, test_file)
def test_copyright_drand48_c(self):
test_file = self.get_test_loc('copyrights/copyright_drand48_c-drand_c.c')
expected = [
u'Copyright (c) 1993 Martin Birgmeier',
]
check_detection(expected, test_file)
def test_copyright_ed_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_ed_copyright-ed_copyright.copyright')
expected = [
u'Copyright (c) 1993, 1994 Andrew Moore , Talke Studio',
u'Copyright (c) 2006, 2007 Antonio Diaz Diaz',
u'Copyright (c) 1997-2007 James Troup',
u'Copyright (c) 1993, 2006, 2007 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_epiphany_browser_data_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_epiphany_browser_data_copyright_label-epiphany_browser_data_copyright_label.label')
expected = [
u'Copyright (c) 2004 the Initial Developer.',
u'(c) 2003-2007, the Debian GNOME team <pkg-gnome-maintainers@lists.alioth.debian.org>',
]
check_detection(expected, test_file)
def test_copyright_eric_young_c(self):
test_file = self.get_test_loc('copyrights/copyright_eric_young_c-c.c')
expected = [
u'Copyright (c) 1995-1997 Eric Young (eay@mincom.oz.au)',
]
check_detection(expected, test_file)
def test_copyright_errno_atheros(self):
test_file = self.get_test_loc('copyrights/copyright_errno_atheros-c.c')
expected = [
'Copyright (c) 2002-2006 Sam Leffler, Errno Consulting, Atheros Communications, Inc.',
]
check_detection(expected, test_file)
def test_copyright_errno_atheros_ah_h(self):
test_file = self.get_test_loc('copyrights/copyright_errno_atheros_ah_h-ah_h.h')
expected = [
u'Copyright (c) 2002-2006 Sam Leffler, Errno Consulting, Atheros Communications, Inc.',
]
check_detection(expected, test_file)
def test_copyright_errno_c(self):
test_file = self.get_test_loc('copyrights/copyright_errno_c-c.c')
expected = [
u'Copyright (c) 2002-2006 Sam Leffler, Errno Consulting, Atheros Communications, Inc.',
]
check_detection(expected, test_file)
def test_copyright_esmertec_java(self):
test_file = self.get_test_loc('copyrights/copyright_esmertec_java-java.java')
expected = [
u'Copyright (c) 2008 Esmertec AG',
u'Copyright (c) 2008 The Android Open Source Project',
]
check_detection(expected, test_file)
def test_copyright_essential_smoke(self):
test_file = self.get_test_loc('copyrights/copyright_essential_smoke-ibm_c.c')
expected = [
u'Copyright IBM and others (c) 2008',
u'Copyright Eclipse, IBM and others (c) 2008',
]
check_detection(expected, test_file)
def test_copyright_expat_h(self):
test_file = self.get_test_loc('copyrights/copyright_expat_h-expat_h.h')
expected = [
u'Copyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd',
]
check_detection(expected, test_file)
def test_copyright_ext_all_js(self):
test_file = self.get_test_loc('copyrights/copyright_ext_all_js-ext_all_js.js')
expected = [
u'Copyright (c) 2006-2009 Ext JS, LLC',
]
check_detection(expected, test_file)
def test_copyright_extjs_c(self):
test_file = self.get_test_loc('copyrights/copyright_extjs_c-c.c')
expected = [
u'Copyright (c) 2006-2007, Ext JS, LLC.',
]
check_detection(expected, test_file)
def test_copyright_fsf_py(self):
test_file = self.get_test_loc('copyrights/copyright_fsf_py-999_py.py')
expected = [
u'Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_gailly(self):
test_file = self.get_test_loc('copyrights/copyright_gailly-c.c')
expected = [
u'Copyright (c) 1992-1993 Jean-loup Gailly.',
u'Copyright (c) 1992-1993 Jean-loup Gailly',
u'Copyright (c) 1992-1993 Jean-loup Gailly',
]
check_detection(expected, test_file)
def test_copyright_geoff_js(self):
test_file = self.get_test_loc('copyrights/copyright_geoff_js-js.js')
expected = [
u'Copyright (c) 2007-2008 Geoff Stearns, Michael Williams, and Bobby van der Sluis',
]
check_detection(expected, test_file)
def test_copyright_gnome_session_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_gnome_session_copyright-gnome_session_copyright.copyright')
expected = [
u'Copyright (c) 1999-2009 Red Hat, Inc.',
u'Copyright (c) 1999-2007 Novell, Inc.',
u'Copyright (c) 2001-2003 George Lebl',
u'Copyright (c) 2001 Queen of England',
u'Copyright (c) 2007-2008 William Jon McCann',
u'Copyright (c) 2006 Ray Strode',
u'Copyright (c) 2008 Lucas Rocha',
u'Copyright (c) 2005 Raffaele Sandrini',
u'Copyright (c) 2006-2007 Vincent Untz',
u'Copyright (c) 1998 Tom Tromey',
u'Copyright (c) 1999 Free Software Foundation, Inc.',
u'Copyright (c) 2003 Sun Microsystems, Inc.',
]
check_detection(expected, test_file)
def test_copyright_gnome_system_monitor_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_gnome_system_monitor_copyright-gnome_system_monitor_copyright.copyright')
expected = [
u'Copyright Holders: Kevin Vandersloot <kfv101@psu.edu> Erik Johnsson <zaphod@linux.nu>',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_gnome_system_monitor_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_gnome_system_monitor_copyright_label-gnome_system_monitor_copyright_label.label')
expected = [
u'Copyright Holders: Kevin Vandersloot <kfv101@psu.edu> Erik Johnsson <zaphod@linux.nu>',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_gobjc_4_3_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_gobjc_4_3_copyright-gobjc__copyright.copyright')
expected = [
u'Copyright (c) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.',
u'copyright Free Software Foundation',
u'Copyright (c) 2004-2005 by Digital Mars , www.digitalmars.com',
u'Copyright (c) 1996-2003 Red Hat, Inc.',
]
check_detection(expected, test_file)
def test_copyright_google_closure_templates_java_html(self):
test_file = self.get_test_loc('copyrights/copyright_google_closure_templates_java_html-html.html')
expected = [
u'(c) 2009 Google',
]
check_detection(expected, test_file)
def test_copyright_google_view_layout1_xml(self):
test_file = self.get_test_loc('copyrights/copyright_google_view_layout1_xml-view_layout_xml.xml')
expected = [
u'Copyright (c) 2008 Google Inc.',
]
check_detection(expected, test_file)
def test_copyright_group(self):
test_file = self.get_test_loc('copyrights/copyright_group-c.c')
expected = [
u'Copyright (c) 2014 ARRis Group, Inc.',
u'Copyright (c) 2013 ARRIS Group, Inc.',
]
check_detection(expected, test_file)
def test_copyright_gsoap(self):
test_file = self.get_test_loc('copyrights/copyright_gsoap-gSOAP')
expected = [
u'Copyright (c) 2001-2004 Robert A. van Engelen, Genivia inc.',
u'Copyright (c) 2001-2004 Robert A. van Engelen, Genivia inc.',
]
check_detection(expected, test_file)
def test_copyright_gstreamer0_fluendo_mp3_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_gstreamer0_fluendo_mp3_copyright-gstreamer__fluendo_mp_copyright.copyright')
expected = [
u'Copyright (c) 2005,2006 Fluendo',
u'Copyright 2005 Fluendo',
]
check_detection(expected, test_file)
def test_copyright_hall(self):
test_file = self.get_test_loc('copyrights/copyright_hall-copyright.txt')
expected = [
u'Copyright (c) 2004, Richard S. Hall',
u'Copyright (c) 2004, Didier Donsez',
u'Copyright (c) 2002,2003, Stefan Haustein, Oberhausen',
]
check_detection(expected, test_file)
def test_copyright_hans_jurgen_htm(self):
test_file = self.get_test_loc('copyrights/copyright_hans_jurgen_htm-9_html.html')
expected = [
u'Copyright (c) 2006 by Hans-Jurgen Koch.',
]
check_detection(expected, test_file,
expected_in_results=True,
results_in_expected=False)
def test_copyright_hansen_cs(self):
test_file = self.get_test_loc('copyrights/copyright_hansen_cs-cs.cs')
expected = [
u'Web Applications Copyright 2009 - Thomas Hansen thomas@ra-ajax.org.',
]
check_detection(expected, test_file)
def test_copyright_hciattach_qualcomm1_c(self):
test_file = self.get_test_loc('copyrights/copyright_hciattach_qualcomm1_c-hciattach_qualcomm_c.c')
expected = [
u'Copyright (c) 2005-2010 Marcel Holtmann <marcel@holtmann.org>',
]
check_detection(expected, test_file)
def test_copyright_hibernate_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_hibernate_copyright_label-hibernate_copyright_label.label')
expected = [
u'Copyright (c) 2004-2006 Bernard Blackham <bernard@blackham.com.au>',
u'copyright (c) 2004-2006 Cameron Patrick <cameron@patrick.wattle.id.au>',
u'copyright (c) 2006- Martin F. Krafft <madduck@debian.org>',
]
check_detection(expected, test_file)
def test_copyright_holtmann(self):
test_file = self.get_test_loc('copyrights/copyright_holtmann-hciattach_qualcomm_c.c')
expected = [
u'Copyright (c) 2005-2010 Marcel Holtmann <marcel@holtmann.org>',
u'Copyright (c) 2010, Code Aurora Forum.',
]
check_detection(expected, test_file)
def test_copyright_hostapd_cli_c(self):
test_file = self.get_test_loc('copyrights/copyright_hostapd_cli_c-hostapd_cli_c.c')
expected = [
u'Copyright (c) 2004-2005, Jouni Malinen <jkmaline@cc.hut.fi>',
u'Copyright (c) 2004-2005, Jouni Malinen <jkmaline@cc.hut.fi>',
]
check_detection(expected, test_file)
def test_copyright_hp_notice(self):
test_file = self.get_test_loc('copyrights/copyright_hp_notice-NOTICE')
expected = [
u'(c) Copyright 2007 Hewlett-Packard Development Company',
u'(c) Copyright 2008 Hewlett-Packard Development Company',
u'Copyright (c) 2006 Alan D. Brunelle <Alan.Brunelle@hp.com>',
u'Copyright (c) 2007 Alan D. Brunelle <Alan.Brunelle@hp.com>',
u'(c) Copyright 2008 Hewlett-Packard Development Company',
u'(c) Copyright 2009 Hewlett-Packard Development Company',
u'Copyright (c) 1989, 1991 Free Software Foundation, Inc.',
u'copyrighted by the Free Software Foundation',
]
check_detection(expected, test_file)
def test_copyright_hpijs_ppds_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_hpijs_ppds_copyright_label-hpijs_ppds_copyright_label.label')
expected = [
u'Copyright (c) 2003-2004 by Torsten Landschoff <torsten@debian.org>',
u'Copyright (c) 2004-2006 by Henrique de Moraes Holschuh <hmh@debian.org>',
u'Copyright (c) 2001-2006 Hewlett-Packard Company',
u'Copyright (c) 2001-2006 Hewlett-Packard Development Company',
]
check_detection(expected, test_file)
def test_copyright_ibm_c(self):
test_file = self.get_test_loc('copyrights/copyright_ibm_c-ibm_c.c')
expected = [
u'Copyright (c) ibm technologies 2008',
u'Copyright (c) IBM Corporation 2008',
u'Copyright (c) Ibm Corp. 2008',
u'Copyright (c) ibm.com 2008',
u'Copyright (c) IBM technology 2008',
u'Copyright (c) IBM company 2008',
]
check_detection(expected, test_file)
def test_copyright_icedax_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_icedax_copyright_label-icedax_copyright_label.label')
expected = [
u'Copyright 1998-2003 Heiko Eissfeldt',
u'(c) Peter Widow',
u'(c) Thomas Niederreiter',
u'(c) RSA Data Security, Inc.',
u'Copyright 1993 Yggdrasil Computing, Incorporated',
u'Copyright (c) 1999,2000-2004 J. Schilling',
u'(c) 1998-2002 by Heiko Eissfeldt, heiko@colossus.escape.de',
u'(c) 2002 by Joerg Schilling',
u'(c) 1996, 1997 Robert Leslie',
u'Copyright (c) 2002 J. Schilling',
u'Copyright (c) 1987, 1995-2003 J. Schilling',
u'Copyright 2001 H. Peter Anvin',
]
check_detection(expected, test_file)
def test_copyright_ifrename_c(self):
test_file = self.get_test_loc('copyrights/copyright_ifrename_c-ifrename_c.c')
expected = [
u'Copyright (c) 2004 Jean Tourrilhes <jt@hpl.hp.com>',
]
check_detection(expected, test_file)
def test_copyright_illinois_html(self):
test_file = self.get_test_loc('copyrights/copyright_illinois_html-9_html.html')
expected = [
u'Copyright 1999,2000,2001,2002,2003,2004 The Board of Trustees of the University of Illinois',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_in_COPYING_gpl(self):
test_file = self.get_test_loc('copyrights/copyright_in_COPYING_gpl-COPYING_gpl.gpl')
expected = [
u'Copyright (c) 1989, 1991 Free Software Foundation, Inc.',
u'copyrighted by the Free Software Foundation',
]
check_detection(expected, test_file)
def test_copyright_in_COPYRIGHT_madwifi(self):
test_file = self.get_test_loc('copyrights/copyright_in_COPYRIGHT_madwifi-COPYRIGHT_madwifi.madwifi')
expected = [
u'Copyright (c) 2002-2006 Sam Leffler, Errno Consulting, Atheros Communications, Inc.',
]
check_detection(expected, test_file)
def test_copyright_in_README(self):
test_file = self.get_test_loc('copyrights/copyright_in_README-README')
expected = [
u'Copyright (c) 2002-2006, Jouni Malinen <jkmaline@cc.hut.fi>',
]
check_detection(expected, test_file)
def test_copyright_in_bash(self):
test_file = self.get_test_loc('copyrights/copyright_in_bash-shell_sh.sh')
expected = [
u'Copyright (c) 2008 Hewlett-Packard Development Company, L.P.',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_in_binary_lib(self):
test_file = self.get_test_loc('copyrights/copyright_in_binary_lib-php_embed_lib.lib')
expected = [
'Copyright nexB and others (c) 2012',
]
check_detection(expected, test_file)
def test_copyright_in_c(self):
test_file = self.get_test_loc('copyrights/copyright_in_c-c.c')
expected = [
u'COPYRIGHT (c) STMicroelectronics 2005.',
]
check_detection(expected, test_file)
def test_copyright_in_c_include(self):
test_file = self.get_test_loc('copyrights/copyright_in_c_include-h.h')
expected = [
u'COPYRIGHT (c) ST-Microelectronics 1998.',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_in_dll(self):
test_file = self.get_test_loc('copyrights/copyright_in_dll-9_msvci_dll.dll')
expected = [
'Copyright Myself and Me, Inc',
]
check_detection(expected, test_file)
def test_copyright_in_h(self):
test_file = self.get_test_loc('copyrights/copyright_in_h-h.h')
expected = [
u'COPYRIGHT (c) ST-Microelectronics 1998.',
]
check_detection(expected, test_file)
@expectedFailure
def test_copyright_in_html_comments(self):
test_file = self.get_test_loc('copyrights/copyright_in_html_comments-html.html')
expected = [
u'Copyright 2008 ABCD, LLC.',
]
check_detection(expected, test_file)
def test_copyright_in_html_incorrect(self):
test_file = self.get_test_loc('copyrights/copyright_in_html_incorrect-detail_9_html.html')
expected = [
'A12 Oe (c) 2004-2009',
]
check_detection(expected, test_file)
def test_copyright_in_maven_pom_xstream(self):
test_file = self.get_test_loc('copyrights/copyright_in_maven_pom_xstream-pom_xml.xml')
expected = [
u'Copyright (c) 2006 Joe Walnes.',
u'Copyright (c) 2006, 2007, 2008 XStream committers.',
]
check_detection(expected, test_file)
def test_copyright_in_media(self):
test_file = self.get_test_loc('copyrights/copyright_in_media-a_png.png')
expected = [
'Copyright nexB and others (c) 2012',
]
check_detection(expected, test_file)
def test_copyright_in_phps(self):
test_file = self.get_test_loc('copyrights/copyright_in_phps-phps.phps')
expected = [
u'copyright 2005 Michal Migurski',
]
check_detection(expected, test_file)
def test_copyright_in_postcript(self):
test_file = self.get_test_loc('copyrights/copyright_in_postcript-9__ps.ps')
expected = [
'Copyright 1999 Radical Eye Software',
]
check_detection(expected, test_file)
def test_copyright_in_txt(self):
test_file = self.get_test_loc('copyrights/copyright_in_txt.txt')
expected = [
u'Copyright ?2004-2006 Company',
]
check_detection(expected, test_file)
def test_copyright_in_visio_doc(self):
test_file = self.get_test_loc('copyrights/copyright_in_visio_doc-Glitch_ERD_vsd.vsd')
expected = []
check_detection(expected, test_file)
def test_copyright_inria_loss_of_holder_c(self):
test_file = self.get_test_loc('copyrights/copyright_inria_loss_of_holder_c-c.c')
expected = [
u'Copyright (c) 2000,2002,2003 INRIA, France Telecom',
]
check_detection(expected, test_file)
def test_copyright_java(self):
test_file = self.get_test_loc('copyrights/copyright_java-java.java')
expected = [
u'Copyright (c) 1992-2002 by P.J. Plauger.',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_jdoe(self):
test_file = self.get_test_loc('copyrights/copyright_jdoe-copyright_c.c')
expected = [
u'Copyright 2009 J-Doe.',
]
check_detection(expected, test_file)
@expectedFailure
def test_copyright_json_in_phps(self):
test_file = self.get_test_loc('copyrights/copyright_json_in_phps-JSON_phps.phps')
expected = [
u'copyright 2005 Michal Migurski',
]
check_detection(expected, test_file)
def test_copyright_json_in_phps_incorrect(self):
test_file = self.get_test_loc('copyrights/copyright_json_in_phps_incorrect-JSON_phps.phps')
expected = []
check_detection(expected, test_file)
@expectedFailure
def test_copyright_json_phps_html(self):
test_file = self.get_test_loc('copyrights/copyright_json_phps_html-JSON_phps_html.html')
expected = [
u'copyright 2005 Michal Migurski',
]
check_detection(expected, test_file)
def test_copyright_jsp_all_CAPS(self):
test_file = self.get_test_loc('copyrights/copyright_jsp_all_CAPS-jsp.jsp')
expected = [
u'copyright 2005-2006 Cedrik LIME',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_kaboom_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_kaboom_copyright-kaboom_copyright.copyright')
expected = [
u'Copyright (c) 2009 Sune Vuorela <sune@vuorela.dk>',
u'Copyright (c) 2007-2009 George Kiagiadakis <gkiagiad@csd.uoc.gr>',
u'Copyright (c) 2009 Modestas Vainius <modestas@vainius.eu>',
u'Copyright (c) 2009, Debian Qt/KDE Maintainers <debian-qt-kde@lists.debian.org>',
]
check_detection(expected, test_file)
def test_copyright_kbuild_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_kbuild_copyright-kbuild_copyright.copyright')
expected = [
u'Copyright (c) 2005-2009 Knut St. Osmundsen <bird-kBuild-spam@anduin.net>',
u'Copyright (c) 1991-1993 The Regents of the University of California',
u'Copyright (c) 1988-2009 Free Software Foundation, Inc.',
u'Copyright (c) 2003 Free Software Foundation, Inc.',
u'Copyright (c) 2007-2009 Torsten Werner <twerner@debian.org>',
u'(c) 2009 Daniel Baumann <daniel@debian.org>',
]
check_detection(expected, test_file)
def test_copyright_kde_l10n_zhcn_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_kde_l10n_zhcn_copyright-kde_l_n_zhcn_copyright.copyright')
expected = [
u'Copyright (c) 1996-2009 The KDE Translation teams <kde-i18n-doc@kde.org>',
u'(c) 2007-2009, Debian Qt/KDE Maintainers',
]
check_detection(expected, test_file)
def test_copyright_leonardo_c(self):
test_file = self.get_test_loc('copyrights/copyright_leonardo_c-c.c')
expected = [
u'Copyright (c) 1994 by Leonardo DaVinci Societe',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_libadns1_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libadns1_copyright-libadns_copyright.copyright')
expected = [
u'Copyright 1997-2000 Ian Jackson',
u'Copyright 1999 Tony Finch',
u'Copyright (c) 1991 Massachusetts Institute of Technology',
]
check_detection(expected, test_file)
def test_copyright_libc6_i686_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libc6_i686_copyright-libc_i_copyright.copyright')
expected = [
u'Copyright (c) 1991,92,93,94,95,96,97,98,99,2000,2001,2002,2003,2004,2005, 2006,2007,2008 Free Software Foundation, Inc.',
u'Copyright (c) 1991,92,93,94,95,96,97,98,99,2000,2001,2002,2003,2004,2005, 2006,2007,2008 Free Software Foundation, Inc.',
u'Copyright (c) 1991 Regents of the University of California',
u'Portions Copyright (c) 1993 by Digital Equipment Corporation',
u'Copyright (c) 1984, Sun Microsystems, Inc.',
u'Copyright (c) 1991,1990,1989 Carnegie Mellon University',
u'Copyright (c) 2000, Intel Corporation',
u'copyright (c) by Craig Metz',
]
check_detection(expected, test_file)
def test_copyright_libcdio10_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_libcdio10_copyright_label-libcdio_copyright_label.label')
expected = [
u'Copyright (c) 1999, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Rocky Bernstein <rocky@gnu.org>',
u'Copyright (c) 2000, 2001, 2003, 2004, 2005, 2008 Herbert Valerio Riedel',
u'Copyright (c) 1996, 1997, 1998 Gerd Knorr <kraxel@bytesex.org>',
u'Copyright (c) 2001 Xiph.org',
u'Copyright (c) 1994, 1995, 1996, 1997, 1998, 2001 Heiko Eifeldt <heiko@escape.colossus.de>',
u'Copyright (c) 1998, 1999, 2001 Monty',
u'Copyright (c) 2008 Robert W. Fuller <hydrologiccycle@gmail.com>',
u'Copyright (c) 2006, 2008 Burkhard Plaum <plaum@ipf.uni-stuttgart.de>',
u'Copyright (c) 2001, 2002 Ben Fennema <bfennema@falcon.csc.calpoly.edu>',
u'Copyright (c) 2001, 2002 Scott Long <scottl@freebsd.org>',
u'Copyright (c) 1993 Yggdrasil Computing, Incorporated',
u'Copyright (c) 1999, 2000 J. Schilling',
u'Copyright (c) 2001 Sven Ottemann <ac-logic@freenet.de>',
u'Copyright (c) 2003 Svend Sanjay Sorensen <ssorensen@fastmail.fm>',
u'Copyright (c) 1985, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc.',
u'Copyright (c) 2003 Matthias Drochner',
u'Copyright (c) 1998-2001 VideoLAN Johan Bilien <jobi@via.ecp.fr> and Gildas Bazin <gbazin@netcourrier.com>',
u'Copyright (c) 1992, 1993 Eric Youngdale',
u'Copyright (c) 2003, 2004, 2005, 2006, 2007, 2008 Rocky Bernstein and Herbert Valerio Riedel',
]
check_detection(expected, test_file)
def test_copyright_libcelt0_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libcelt0_copyright-libcelt_copyright.copyright')
expected = [
u'Copyright 2005-2007 Christopher Montgomery , Jean-Marc Valin , Timothy Terriberry',
u'(c) 2008, Ron',
]
check_detection(expected, test_file)
def test_copyright_libcompress_raw_zlib_perl_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libcompress_raw_zlib_perl_copyright-libcompress_raw_zlib_perl_copyright.copyright')
expected = [
u'Copyright 2005-2009, Paul Marquess <pmqs@cpan.org>',
u'Copyright 1995-2005, Jean-loup Gailly <jloup@gzip.org>',
u'Copyright 1995-2005, Mark Adler <madler@alumni.caltech.edu>',
u'Copyright 2004-2009, Marcus Holland-Moritz <mhx-cpan@gmx.net> 2001, Paul Marquess <pmqs@cpan.org>',
u'Copyright 2007-2009, Krzysztof Krzyzaniak <eloy@debian.org>',
]
check_detection(expected, test_file)
def test_copyright_libcpufreq0_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libcpufreq0_copyright-libcpufreq_copyright.copyright')
expected = [
u'Copyright 2004-2006 Dominik Brodowski',
]
check_detection(expected, test_file)
def test_copyright_libcrypt_ssleay_perl_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libcrypt_ssleay_perl_copyright-libcrypt_ssleay_perl_copyright.copyright')
expected = [
u'Copyright (c) 1999-2003 Joshua Chamas',
u'Copyright (c) 1998 Gisle Aas',
u'copyright (c) 2003 Stephen Zander <gibreel@debian.org>',
]
check_detection(expected, test_file)
def test_copyright_libepc_ui_1_0_1_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libepc_ui_1_0_1_copyright-libepc_ui__copyright.copyright')
expected = [
u'Copyright (c) 2007, 2008 Openismus GmbH',
]
check_detection(expected, test_file)
def test_copyright_libepc_ui_1_0_2_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_libepc_ui_1_0_2_copyright_label-libepc_ui__copyright_label.label')
expected = [
u'Copyright (c) 2007, 2008 Openismus GmbH',
]
check_detection(expected, test_file)
def test_copyright_libfltk1_1_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libfltk1_1_copyright-libfltk_copyright.copyright')
expected = [
u'Copyright (c) 1998-2009 Bill Spitzak spitzak@users.sourceforge.net',
]
check_detection(expected, test_file)
def test_copyright_libgail18_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_libgail18_copyright_label-libgail_copyright_label.label')
expected = []
check_detection(expected, test_file)
def test_copyright_libggiwmh0_target_x_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libggiwmh0_target_x_copyright-libggiwmh_target_x_copyright.copyright')
expected = [
u'Copyright (c) 2005 Eric Faurot eric.faurot@gmail.com',
u'Copyright (c) 2004 Peter Ekberg peda@lysator.liu.se',
u'Copyright (c) 2004 Christoph Egger',
u'Copyright (c) 1999 Marcus Sundberg marcus@ggi-project.org',
u'Copyright (c) 1998, 1999 Andreas Beck becka@ggi-project.org',
u'Copyright (c) 2008 Bradley Smith <brad@brad-smith.co.uk>',
]
check_detection(expected, test_file)
def test_copyright_libgnome_desktop_2_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libgnome_desktop_2_copyright-libgnome_desktop__copyright.copyright')
expected = [
u'Copyright (c) 1999, 2000 Red Hat Inc.',
u'Copyright (c) 2001 Sid Vicious',
u'Copyright (c) 1999 Free Software Foundation',
u'Copyright (c) 2002, Sun Microsystems, Inc.',
u'Copyright (c) 2003, Kristian Rietveld',
]
check_detection(expected, test_file)
def test_copyright_libgnome_media0_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libgnome_media0_copyright-libgnome_media_copyright.copyright')
expected = []
check_detection(expected, test_file)
def test_copyright_libgoffice_0_8_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_libgoffice_0_8_copyright_label-libgoffice__copyright_label.label')
expected = [
u'Copyright (c) 2003-2008 Jody Goldberg (jody@gnome.org)',
]
check_detection(expected, test_file)
def test_copyright_libgtkhtml2_0_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libgtkhtml2_0_copyright-libgtkhtml_copyright.copyright')
expected = [
u'Copyright 1999,2000,2001 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_libisc44_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libisc44_copyright-libisc_copyright.copyright')
expected = [
u'Copyright (c) 1996-2001 Internet Software Consortium.',
]
check_detection(expected, test_file)
def test_copyright_libisccfg30_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libisccfg30_copyright-libisccfg_copyright.copyright')
expected = [
u'Copyright (c) 1996-2001 Internet Software Consortium',
]
check_detection(expected, test_file)
def test_copyright_libisccfg40_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libisccfg40_copyright-libisccfg_copyright.copyright')
expected = [
u'Copyright (c) 1996-2001 Internet Software Consortium',
]
check_detection(expected, test_file)
def test_copyright_libjpeg62_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libjpeg62_copyright-libjpeg_copyright.copyright')
expected = [
u'copyright (c) 1991-1998, Thomas G. Lane',
u'copyright by the Free Software Foundation',
]
check_detection(expected, test_file)
def test_copyright_libkeyutils1_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_libkeyutils1_copyright_label-libkeyutils_copyright_label.label')
expected = [
u'Copyright (c) 2005 Red Hat',
u'Copyright (c) 2005 Red Hat',
u'Copyright (c) 2006-2009 Daniel Baumann <daniel@debian.org>',
]
check_detection(expected, test_file)
def test_copyright_liblocale_gettext_perl_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_liblocale_gettext_perl_copyright_label-liblocale_get_perl_copyright_label.label')
expected = [
u'Copyright 1996..2005 by Phillip Vandry <vandry@TZoNE.ORG>',
]
check_detection(expected, test_file)
def test_copyright_libopenraw1_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_libopenraw1_copyright_label-libopenraw_copyright_label.label')
expected = [
u'Copyright (c) 2007, David Paleino <d.paleino@gmail.com>',
u'Copyright (c) 2005-2009, Hubert Figuiere <hub@figuiere.net>',
u'Copyright (c) 2006, Hubert Figuiere <hub@figuiere.net>',
u'(c) 2001, Lutz Muller <lutz@users.sourceforge.net>',
u'Copyright (c) 2007, Hubert Figuiere <hub@figuiere.net>',
u'(c) 1994, Kongji Huang and Brian C. Smith , Cornell University',
u'(c) 1993, Brian C. Smith , The Regents',
u"(c) 1991-1992, Thomas G. Lane , Part of the Independent JPEG Group's",
u'Copyright (c) 2005, Hubert Figuiere <hub@figuiere.net>',
u'Copyright (c) 2007, Hubert Figuiere <hub@figuiere.net>',
]
check_detection(expected, test_file)
def test_copyright_libopenthreads12_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libopenthreads12_copyright-libopenthreads_copyright.copyright')
expected = [
u'Copyright (c) 2002 Robert Osfield',
u'Copyright (c) 1998 Julian Smart , Robert Roebling',
]
check_detection(expected, test_file)
def test_copyright_libpam_ck_connector_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libpam_ck_connector_copyright-libpam_ck_connector_copyright.copyright')
expected = [
u'Copyright (c) 2006 William Jon McCann <mccann@jhu.edu>',
u'Copyright (c) 2007 David Zeuthen <davidz@redhat.com>',
u'Copyright (c) 2007 William Jon McCann <mccann@jhu.edu>',
u'(c) 2007, Michael Biebl <biebl@debian.org>',
]
check_detection(expected, test_file)
def test_copyright_libpoppler3_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libpoppler3_copyright-libpoppler_copyright.copyright')
expected = [
u'Copyright (c) 1996-2003 Glyph & Cog, LLC',
]
check_detection(expected, test_file)
def test_copyright_libqt4_scripttools_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libqt4_scripttools_copyright-libqt_scripttools_copyright.copyright')
expected = [
u'(c) 2008-2009 Nokia Corporation',
u'(c) 1994-2008 Trolltech ASA',
]
check_detection(expected, test_file)
def test_copyright_libqtscript4_gui_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libqtscript4_gui_copyright-libqtscript_gui_copyright.copyright')
expected = [
u'Copyright (c) 2009 Modestas Vainius <modestas@vainius.eu>',
u'Copyright (c) Trolltech ASA',
u'Copyright (c) Roberto Raggi <roberto@kdevelop.org>',
u'Copyright (c) Harald Fernengel <harry@kdevelop.org>',
]
check_detection(expected, test_file)
def test_copyright_libsocks4_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libsocks4_copyright-libsocks_copyright.copyright')
expected = [
u'Copyright (c) 1989 Regents of the University of California.',
u'Portions Copyright (c) 1993, 1994, 1995 by NEC Systems Laboratory',
]
check_detection(expected, test_file)
def test_copyright_libsox_fmt_alsa_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libsox_fmt_alsa_copyright-libsox_fmt_alsa_copyright.copyright')
expected = [
u'Copyright 1991 Lance Norskog And Sundry Contributors',
]
check_detection(expected, test_file)
def test_copyright_libspeex1_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libspeex1_copyright-libspeex_copyright.copyright')
expected = [
u'Copyright 2002-2007 Xiph.org',
u'Copyright 2002-2007 Jean-Marc Valin',
u'Copyright 2005-2007 Analog Devices Inc.',
u'Copyright 2005-2007 Commonwealth',
u'Copyright 1993, 2002, 2006 David Rowe',
u'Copyright 2003 EpicGames',
u'Copyright 1992-1994 Jutta Degener , Carsten Bormann',
]
check_detection(expected, test_file)
def test_copyright_libstlport4_6ldbl_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_libstlport4_6ldbl_copyright_label-libstlport_ldbl_copyright_label.label')
expected = [
u'Copyright (c) 1994 Hewlett-Packard Company',
u'Copyright (c) 1996-1999 Silicon Graphics Computer Systems, Inc.',
u'Copyright (c) 1997 Moscow Center for SPARC Technology',
u'Copyright (c) 1999, 2000, 2001 Boris Fomitchev',
]
check_detection(expected, test_file)
def test_copyright_libtdb1_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libtdb1_copyright-libtdb_copyright.copyright')
expected = [
u'Copyright (c) Andrew Tridgell 1999-2004',
u'Copyright (c) Paul Rusty Russell 2000',
u'Copyright (c) Jeremy Allison 2000-2003',
]
check_detection(expected, test_file)
def test_copyright_libuim6_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libuim6_copyright-libuim_copyright.copyright')
expected = [
u'Copyright (c) 2003-2007 uim Project',
u'COPYRIGHT (c) 1988-1994 BY PARADIGM ASSOCIATES INCORPORATED',
u'Copyright (c) 2006, SHIMODA Hiroshi <piro@p.club.ne.jp>',
u'Copyright (c) 2006, FUJITA Yuji <yuji@webmasters.gr.jp>',
u'Copyright (c) 2006, Jun Mukai <mukai@jmuk.org>',
u'Copyright (c) 2006, Teppei Tamra <tam-t@par.odn.ne.jp>',
u'Copyright (c) 2005 UTUMI Hirosi <utuhiro78@yahoo.co.jp>',
u'Copyright (c) 2006 YAMAMOTO Kengo <yamaken@bp.iij4u.or.jp>',
u'Copyright (c) 2006 Jae-hyeon Park <jhyeon@gmail.com>',
u'Copyright (c) 2006 Etsushi Kato <ek.kato@gmail.com>',
]
check_detection(expected, test_file)
def test_copyright_libxext6_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libxext6_copyright-libxext_copyright.copyright')
expected = [
u'Copyright 1986, 1987, 1988, 1989, 1994, 1998 The Open Group',
u'Copyright (c) 1996 Digital Equipment Corporation, Maynard, Massachusetts',
u'Copyright (c) 1997 by Silicon Graphics Computer Systems, Inc.',
u'Copyright 1992 Network Computing Devices',
u'Copyright 1991,1993 by Digital Equipment Corporation, Maynard, Massachusetts',
u'Copyright 1986, 1987, 1988 by Hewlett-Packard Corporation',
u'Copyright (c) 1994, 1995 Hewlett-Packard Company',
u'Copyright Digital Equipment Corporation',
u'Copyright 1999, 2005, 2006 Sun Microsystems, Inc.',
]
check_detection(expected, test_file)
def test_copyright_libxmlrpc_c3_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_libxmlrpc_c3_copyright-libxmlrpc_c_copyright.copyright')
expected = [
u'Copyright (c) 2001 by First Peer, Inc.',
u'Copyright (c) 2001 by Eric Kidd.',
u'Copyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd',
u'Copyright (c) 2000 by Moez Mahfoudh <mmoez@bigfoot.com>',
u'Copyright 1991, 1992, 1993, 1994 by Stichting Mathematisch Centrum, Amsterdam',
]
check_detection(expected, test_file)
def test_copyright_libxt6_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_libxt6_copyright_label-libxt_copyright_label.label')
expected = [
u'Copyright 1987, 1988 by Digital Equipment Corporation , Maynard, Massachusetts',
u'Copyright 1993 by Sun Microsystems, Inc. Mountain View',
u'Copyright 1985, 1986, 1987, 1988, 1989, 1994, 1998, 2001 The Open Group',
u'(c) COPYRIGHT International Business Machines Corp. 1992,1997',
]
check_detection(expected, test_file)
@expectedFailure
def test_copyright_license__qpl_v1_0_perfect(self):
test_file = self.get_test_loc('copyrights/copyright_license_qpl_v1_0_perfect-QPL_v.0')
expected = [
u'Copyright (c) 1999 Trolltech AS, Norway.',
]
check_detection(expected, test_file)
def test_copyright_license_text_adaptive_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_adaptive_v1_0-Adaptive v.0')
expected = [
u'(c) Any Recipient',
u'(c) Each Recipient',
]
check_detection(expected, test_file)
def test_copyright_license_text_adobe(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_adobe-Adobe')
expected = [
u'Copyright (c) 2006 Adobe Systems Incorporated.',
]
check_detection(expected, test_file)
def test_copyright_license_text_adobeflex2sdk(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_adobeflex2sdk-Adobeflex_sdk')
expected = [
u'(c) Adobe AIR',
u'(c) Material Improvement',
]
check_detection(expected, test_file)
def test_copyright_license_text_afferogplv1(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_afferogplv1-AfferoGPLv')
expected = [
u'Copyright (c) 2002 Affero Inc.',
u'copyright (c) 1989, 1991 Free Software Foundation, Inc.',
u'copyrighted by Affero, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_afferogplv2(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_afferogplv2-AfferoGPLv')
expected = [
u'Copyright (c) 2007 Affero Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_afferogplv3(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_afferogplv3-AfferoGPLv')
expected = [
u'Copyright (c) 2007 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_afl_v3_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_afl_v3_0-AFL_v.0')
expected = [
u'Copyright (c) 2005 Lawrence Rosen.',
]
check_detection(expected, test_file)
def test_copyright_license_text_aladdin_free_public_license(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_aladdin_free_public_license-Aladdin Free Public License')
expected = [
u'Copyright (c) 1994, 1995, 1997, 1998, 1999, 2000 Aladdin Enterprises, Menlo Park, California',
]
check_detection(expected, test_file)
def test_copyright_license_text_amazondsb(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_amazondsb-AmazonDSb')
expected = [
u'(c) 2006 Amazon Digital Services, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_ampasbsd(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_ampasbsd-AMPASBSD')
expected = [
u'Copyright (c) 2006 Academy of Motion Picture Arts and Sciences',
]
check_detection(expected, test_file)
def test_copyright_license_text_apachev1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_apachev1_0-Apachev.0')
expected = [
u'Copyright (c) 1995-1999 The Apache Group.',
]
check_detection(expected, test_file)
def test_copyright_license_text_apachev1_1(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_apachev1_1-Apachev.1')
expected = [
u'Copyright (c) 2000 The Apache Software Foundation.',
]
check_detection(expected, test_file)
def test_copyright_license_text_apachev2_0b(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_apachev2_0b-Apachev_b.0b')
expected = [
u'Copyright 2000',
]
check_detection(expected, test_file)
def test_copyright_license_text_apple_common_documentation_license_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_apple_common_documentation_license_v1_0-Apple Common Documentation License v.0')
expected = [
u'Copyright (c) 2001 Apple Computer, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_apple_public_source_license_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_apple_public_source_license_v1_0-Apple Public Source License v.0')
expected = [
u'Copyright (c) 1999 Apple Computer, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_apple_public_source_license_v1_1(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_apple_public_source_license_v1_1-Apple Public Source License v.1')
expected = [
u'Copyright (c) 1999-2000 Apple Computer, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_apple_public_source_license_v1_2(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_apple_public_source_license_v1_2-Apple Public Source License v.2')
expected = [
u'Copyright (c) 1999-2003 Apple Computer, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_apslv2_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_apslv2_0-APSLv.0')
expected = [
u'Copyright (c) 1999-2007 Apple Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_artistic_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_artistic_v1_0-Artistic v.0')
expected = []
check_detection(expected, test_file)
def test_copyright_license_text_artistic_v1_0_short(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_artistic_v1_0_short-Artistic v_ short.0 short')
expected = []
check_detection(expected, test_file)
def test_copyright_license_text_artistic_v2_0beta4(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_artistic_v2_0beta4-Artistic v_beta.0beta4')
expected = [
u'Copyright (c) 2000, Larry Wall.',
]
check_detection(expected, test_file)
def test_copyright_license_text_artisticv2_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_artisticv2_0-Artisticv.0')
expected = [
u'Copyright (c) 2000-2006, The Perl Foundation.',
]
check_detection(expected, test_file)
def test_copyright_license_text_attributionassurancelicense(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_attributionassurancelicense-AttributionAssuranceLicense')
expected = [
u'Copyright (c) 2002 by AUTHOR',
]
check_detection(expected, test_file)
def test_copyright_license_text_bigelow_holmes(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_bigelow_holmes-Bigelow&Holmes')
expected = [
u'(c) Copyright 1989 Sun Microsystems, Inc.',
u'(c) Copyright Bigelow',
]
check_detection(expected, test_file)
def test_copyright_license_text_bitstream(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_bitstream-Bi_ream')
expected = [
u'Copyright (c) 2003 by Bitstream, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_bsdnrl(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_bsdnrl-BSDNRL')
expected = [
u'copyright by The Regents of the University of California.',
]
check_detection(expected, test_file)
def test_copyright_license_text_cnri(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_cnri-CNRI')
expected = [
u'Copyright (c) 1995-2000 Corporation for National Research Initiatives',
]
check_detection(expected, test_file)
def test_copyright_license_text_condor_extra_For(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_condor_extra_For-Condor')
expected = [
u'Copyright 1990-2006 Condor Team, Computer Sciences Department, University of Wisconsin-Madison, Madison',
]
check_detection(expected, test_file)
def test_copyright_license_text_doc(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_doc-DOC')
expected = [u'copyrighted by Douglas C. Schmidt',
# and his research group at Washington University, University of California, Irvine, and Vanderbilt University',
u'research group at Washington University, University of California, Irvine, and Vanderbilt University, Copyright (c) 1993-2008']
check_detection(expected, test_file)
def test_copyright_license_text_dual_mpl_gpl(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_dual_mpl_gpl-Dual MPL GPL')
expected = [
u'Copyright (c) 2002 the Initial Developer.',
]
check_detection(expected, test_file)
def test_copyright_license_text_dualmpl_mit(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_dualmpl_mit-DualMPL_MIT')
expected = [
u'Copyright (c) 1998-2001, Daniel Stenberg, <daniel@haxx.se>',
]
check_detection(expected, test_file)
def test_copyright_license_text_eclv1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_eclv1_0-ECLv.0')
expected = [
u'Copyright (c) YeAr Name',
]
check_detection(expected, test_file)
def test_copyright_license_text_ecosv2_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_ecosv2_0-eCosv.0')
expected = [
u'Copyright (c) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_entessa(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_entessa-Entessa')
expected = [
u'Copyright (c) 2003 Entessa, LLC.',
]
check_detection(expected, test_file)
def test_copyright_license_text_eplv1_0b(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_eplv1_0b-EPLv_b.0b')
expected = [
u'Copyright (c) 2003, 2005 IBM Corporation',
]
check_detection(expected, test_file)
def test_copyright_license_text_eudatagrid(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_eudatagrid-EUDatagrid')
expected = [
u'Copyright (c) 2001 EU DataGrid.',
]
check_detection(expected, test_file)
def test_copyright_license_text_eurosym_v2(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_eurosym_v2-Eurosym_v.v2')
expected = [
u'Copyright (c) 1999-2002 Henrik Theiling',
]
check_detection(expected, test_file)
def test_copyright_license_text_frameworxv1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_frameworxv1_0-Frameworxv.0')
expected = [
u'(c) Source Code',
u'(c) THE FRAMEWORX COMPANY 2003',
]
check_detection(expected, test_file)
def test_copyright_license_text_freebsd(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_freebsd-FreeBSD')
expected = [
u'Copyright 1994-2006 The FreeBSD Project.',
]
check_detection(expected, test_file)
def test_copyright_license_text_freetype(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_freetype-FreeType')
expected = [
u'Copyright 1996-2002, 2006 by David Turner, Robert Wilhelm, and Werner Lemberg',
u'copyright (c) The FreeType Project (www.freetype.org).',
u'copyright (c) 1996-2000 by David Turner, Robert Wilhelm, and Werner Lemberg.',
]
check_detection(expected, test_file)
def test_copyright_license_text_gfdlv1_2(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_gfdlv1_2-GFDLv.2')
expected = [
u'Copyright (c) 2000,2001,2002 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_gfdlv1_3(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_gfdlv1_3-GFDLv.3')
expected = [
u'Copyright (c) 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_glide(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_glide-Glide')
expected = [
u'copyright notice (3dfx Interactive, Inc. 1999)',
u'COPYRIGHT 3DFX INTERACTIVE, INC. 1999',
u'COPYRIGHT 3DFX INTERACTIVE, INC. 1999',
]
check_detection(expected, test_file)
def test_copyright_license_text_gnuplot(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_gnuplot-gnuplot')
expected = [
u'Copyright 1986 - 1993, 1998, 2004 Thomas Williams, Colin Kelley',
]
check_detection(expected, test_file)
def test_copyright_license_text_gpl_v1(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_gpl_v1-GPL_v')
expected = [
u'Copyright (c) 1989 Free Software Foundation, Inc.',
u'copyrighted by the Free Software Foundation',
]
check_detection(expected, test_file)
def test_copyright_license_text_gpl_v2(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_gpl_v2-GPL_v')
expected = [
u'Copyright (c) 1989, 1991 Free Software Foundation, Inc.',
u'copyrighted by the Free Software Foundation',
]
check_detection(expected, test_file)
def test_copyright_license_text_gpl_v3(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_gpl_v3-GPL_v')
expected = [
u'Copyright (c) 2007 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_gsoap(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_gsoap-gSOAP')
expected = [
u'Copyright (c) 2001-2004 Robert A. van Engelen, Genivia inc.',
u'Copyright (c) 2001-2004 Robert A. van Engelen, Genivia inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_helix(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_helix-Helix')
expected = [
u'Copyright (c) 1995-2002 RealNetworks, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_hewlett_packard(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_hewlett_packard-Hewlett_Packard')
expected = [
u'(c) HEWLETT-PACKARD COMPANY, 2004.',
]
check_detection(expected, test_file)
def test_copyright_license_text_ibmpl_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_ibmpl_v1_0-IBMPL_v.0')
expected = [
u'Copyright (c) 1996, 1999 International Business Machines Corporation',
]
check_detection(expected, test_file)
def test_copyright_license_text_ietf(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_ietf-IETF')
expected = [
u'Copyright (c) The Internet Society (2003).',
]
check_detection(expected, test_file)
def test_copyright_license_text_ijg(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_ijg-IJG')
expected = [
u'copyright (c) 1991-1998, Thomas G. Lane.',
u'copyright by the Free Software Foundation',
]
check_detection(expected, test_file)
def test_copyright_license_text_imatix(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_imatix-iMatix')
expected = [
u'Copyright 1991-2000 iMatix Corporation.',
u'Copyright 1991-2000 iMatix Corporation',
u'Copyright 1991-2000 iMatix Corporation',
u'Parts copyright (c) 1991-2000 iMatix Corporation.',
u'Copyright 1996-2000 iMatix Corporation',
]
check_detection(expected, test_file)
def test_copyright_license_text_imlib2(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_imlib2-Imlib')
expected = [
u'Copyright (c) 2000 Carsten Haitzler',
]
check_detection(expected, test_file)
def test_copyright_license_text_intel(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_intel-Intel')
expected = [
u'Copyright (c) 2006, Intel Corporation.',
]
check_detection(expected, test_file)
def test_copyright_license_text_jabber(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_jabber-Jabber')
expected = [
u'Copyright (c) 1999-2000 Jabber.com',
u'Copyright (c) 1998-1999 Jeremie Miller.',
]
check_detection(expected, test_file)
def test_copyright_license_text_jpython(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_jpython-JPython')
expected = [
u'Copyright 1996-1999 Corporation for National Research Initiatives',
]
check_detection(expected, test_file)
def test_copyright_license_text_larryrosen(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_larryrosen-LarryRosen')
expected = [
u'Copyright (c) 2002 Lawrence E. Rosen.',
]
check_detection(expected, test_file)
def test_copyright_license_text_latex_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_0-LaTeX_v.0')
expected = [
u'Copyright 1999 LaTeX3 Project',
u'Copyright 2001 M. Y. Name',
]
check_detection(expected, test_file)
def test_copyright_license_text_latex_v1_1(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_1-LaTeX_v.1')
expected = [
u'Copyright 1999 LaTeX3 Project',
u'Copyright 2001 M. Y. Name',
]
check_detection(expected, test_file)
def test_copyright_license_text_latex_v1_2(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_2-LaTeX_v.2')
expected = [
u'Copyright 1999 LaTeX3 Project',
u'Copyright 2001 M. Y. Name',
]
check_detection(expected, test_file)
def test_copyright_license_text_latex_v1_3a(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_3a-LaTeX_v_a.3a')
expected = [
u'Copyright 1999 2002-04 LaTeX3 Project',
u'Copyright 2003 M. Y. Name',
]
check_detection(expected, test_file)
def test_copyright_license_text_latex_v1_3a_ref(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_3a_ref-LaTeX_v_a_ref.3a_ref')
expected = [
u'Copyright 2003 Name',
]
check_detection(expected, test_file)
def test_copyright_license_text_latex_v1_3c(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_3c-LaTeX_v_c.3c')
expected = [
u'Copyright 1999 2002-2008 LaTeX3 Project',
u'Copyright 2005 M. Y. Name',
]
check_detection(expected, test_file)
def test_copyright_license_text_lgpl_v2_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_lgpl_v2_0-LGPL_v.0')
expected = [
u'Copyright (c) 1991 Free Software Foundation, Inc.',
u'copyrighted by the Free Software Foundation',
]
check_detection(expected, test_file)
def test_copyright_license_text_lgpl_v2_1(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_lgpl_v2_1-LGPL_v.1')
expected = [
u'Copyright (c) 1991, 1999 Free Software Foundation, Inc.',
u'copyrighted by the Free Software Foundation',
]
check_detection(expected, test_file)
def test_copyright_license_text_lgpl_v3(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_lgpl_v3-LGPL_v')
expected = [
u'Copyright (c) 2007 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_lgpl_wxwindows_library_licence_v3_0_variant(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_lgpl_wxwindows_library_licence_v3_0_variant-LGPL wxWindows Library Licence v_ variant.0 variant')
expected = [
u'Copyright (c) 1998 Julian Smart, Robert Roebling',
]
check_detection(expected, test_file)
def test_copyright_license_text_logica_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_logica_v1_0-Logica_v.0')
expected = [
u'Copyright (c) 1996-2001 Logica Mobile Networks Limited',
u'Copyright (c) 1996-2001 Logica Mobile Networks Limited',
]
check_detection(expected, test_file)
def test_copyright_license_text_luxi_fonts(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_luxi_fonts-Luxi_fonts')
expected = [
u'copyright (c) 2001 by Bigelow & Holmes Inc.',
u'copyright (c) 2001 by URW++ GmbH.',
]
check_detection(expected, test_file)
def test_copyright_license_text_maia(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_maia-Maia')
expected = [
u'Copyright 2004 by Robert LeBlanc',
]
check_detection(expected, test_file)
def test_copyright_license_text_mit_adobeglyph(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mit_adobeglyph-MIT_AdobeGlyph')
expected = [
u'Copyright (c) 1997,1998,2002,2007 Adobe Systems Incorporated',
]
check_detection(expected, test_file)
def test_copyright_license_text_mit_cmu(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mit_cmu-MIT_CMU')
expected = [
u'Copyright 1989, 1991, 1992 by Carnegie Mellon University',
u'Copyright 1996, 1998-2000 The Regents of the University of California',
]
check_detection(expected, test_file)
def test_copyright_license_text_mit_danse(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mit_danse-MIT_danse')
expected = [
u'Copyright (c) 2009 California Institute of Technology.',
]
check_detection(expected, test_file)
def test_copyright_license_text_mit_enna(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mit_enna-MIT_enna')
expected = [
u'Copyright (c) 2000 Carsten Haitzler',
]
check_detection(expected, test_file)
def test_copyright_license_text_mit_hylafax(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mit_hylafax-MIT_hylafax')
expected = [
u'Copyright (c) 1990-1996 Sam Leffler',
u'Copyright (c) 1991-1996 Silicon Graphics, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_mit_icu(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mit_icu-MIT_ICU')
expected = [
u'Copyright (c) 1995-2006 International Business Machines Corporation',
]
check_detection(expected, test_file)
def test_copyright_license_text_mit_lucent(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mit_lucent-MIT_Lucent')
expected = [
u'Copyright (c) 1989-1998 by Lucent Technologies',
]
check_detection(expected, test_file)
def test_copyright_license_text_mit_mlton(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mit_mlton-MIT_MLton')
expected = [
u'Copyright (c) 1999-2006 Henry Cejtin, Matthew Fluet, Suresh Jagannathan, and Stephen Weeks.',
u'Copyright (c) 1997-2000 by the NEC Research',
]
check_detection(expected, test_file)
def test_copyright_license_text_mit_oldstyle_disclaimer4(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mit_oldstyle_disclaimer4-MIT_OldStyle_disclaimer')
expected = [
u'Copyright (c) 2001, 2002, 2003, 2004, 2005 by The Regents of the University of California.',
]
check_detection(expected, test_file)
def test_copyright_license_text_mit_unicode(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mit_unicode-MIT_unicode')
expected = [
u'Copyright (c) 1991-2005 Unicode, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_mit_wordnet(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mit_wordnet-MIT_WordNet')
expected = [
u'Copyright 2006 by Princeton University.',
]
check_detection(expected, test_file)
def test_copyright_license_text_mitre(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mitre-MITRE')
expected = [
u'Copyright (c) 1994-1999. The MITRE Corporation',
]
check_detection(expected, test_file)
def test_copyright_license_text_ms_pl(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_ms_pl-Ms_PL')
expected = []
check_detection(expected, test_file)
def test_copyright_license_text_ms_rl(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_ms_rl-Ms_RL')
expected = []
check_detection(expected, test_file)
def test_copyright_license_text_ms_rsl(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_ms_rsl-Ms_RSL')
expected = []
check_detection(expected, test_file)
def test_copyright_license_text_msntp(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_msntp-MSNTP')
expected = [
u'(c) Copyright, University of Cambridge, 1996, 1997, 2000',
u'(c) Copyright University of Cambridge.',
]
check_detection(expected, test_file)
def test_copyright_license_text_mysql_gplexception(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_mysql_gplexception-MySQL_gplexception')
expected = []
check_detection(expected, test_file)
def test_copyright_license_text_naumen(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_naumen-Naumen')
expected = [
u'Copyright (c) NAUMEN (tm) and Contributors.',
]
check_detection(expected, test_file)
def test_copyright_license_text_netcomponents(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_netcomponents-NetComponents')
expected = [
u'Copyright (c) 1996-1999 Daniel F. Savarese.',
]
check_detection(expected, test_file)
def test_copyright_license_text_nethack(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_nethack-Nethack')
expected = [
u'Copyright (c) 1989 M. Stephenson',
u'copyright 1988 Richard M. Stallman',
]
check_detection(expected, test_file)
def test_copyright_license_text_nokia(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_nokia-Nokia')
expected = [
u'Copyright (c) Nokia',
]
check_detection(expected, test_file)
def test_copyright_license_text_npl_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_npl_v1_0-NPL_v.0')
expected = [
u'Copyright (c) 1998 Netscape Communications Corporation.',
]
check_detection(expected, test_file)
def test_copyright_license_text_nvidia_source(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_nvidia_source-Nvidia_source')
expected = [
u'Copyright (c) 1996-1998 NVIDIA, Corp.',
u'Copyright (c) 1996-1998 NVIDIA, Corp.',
]
check_detection(expected, test_file)
def test_copyright_license_text_oclc_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_oclc_v1_0-OCLC_v.0')
expected = [
u'Copyright (c) 2000. OCLC Research.',
u'Copyright (c) 2000- (insert then current year)',
]
check_detection(expected, test_file)
def test_copyright_license_text_oclc_v2_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_oclc_v2_0-OCLC_v.0')
expected = [
u'Copyright (c) 2002. OCLC Research.',
u'Copyright (c) 2000- (insert then current year)',
u'Copyright (c) 2000- (insert then current year)',
]
check_detection(expected, test_file)
def test_copyright_license_text_openldap(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_openldap-OpenLDAP')
expected = [
u'Copyright 1999-2003 The OpenLDAP Foundation, Redwood City, California',
]
check_detection(expected, test_file)
def test_copyright_license_text_openmotif(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_openmotif-OpenMotif')
expected = [
u'Copyright (c) date here, The Open Group Ltd.',
]
check_detection(expected, test_file)
def test_copyright_license_text_openpbs(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_openpbs-OpenPBS')
expected = [
u'Copyright (c) 1999-2000 Veridian Information Solutions, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_openpublicationref(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_openpublicationref-OpenPublicationref')
expected = [
u'Copyright (c) 2000 by ThisOldHouse.',
]
check_detection(expected, test_file)
def test_copyright_license_text_openssl(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_openssl-OpenSSL')
expected = [
u'Copyright (c) 1998-2000 The OpenSSL Project.',
]
check_detection(expected, test_file)
def test_copyright_license_text_osl_v3_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_osl_v3_0-OSL_v.0')
expected = [
u'Copyright (c) 2005 Lawrence Rosen.',
]
check_detection(expected, test_file)
def test_copyright_license_text_phorum(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_phorum-Phorum')
expected = [
u'Copyright (c) 2001 The Phorum Development Team.',
]
check_detection(expected, test_file)
def test_copyright_license_text_pine(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_pine-Pine')
expected = [
u'Copyright 1989-2007 by the University of Washington.',
]
check_detection(expected, test_file)
def test_copyright_license_text_python_v1_6(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_python_v1_6-Python_v.6')
expected = [
u'Copyright (c) 1995-2000 Corporation for National Research Initiatives',
]
check_detection(expected, test_file)
def test_copyright_license_text_python_v1_6_1(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_python_v1_6_1-Python_v.1')
expected = [
u'Copyright 1995-2001 Corporation for National Research Initiatives',
]
check_detection(expected, test_file)
def test_copyright_license_text_python_v2(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_python_v2-Python_v')
expected = [
u'Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation',
u'Copyright (c) 1995-2001 Corporation for National Research Initiatives',
]
check_detection(expected, test_file)
def test_copyright_license_text_qpl_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_qpl_v1_0-QPL_v.0')
expected = [
u'Copyright (c) 1999 Trolltech AS',
]
check_detection(expected, test_file)
def test_copyright_license_text_realcsl_v2_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_realcsl_v2_0-RealCSL_v.0')
expected = []
check_detection(expected, test_file)
def test_copyright_license_text_realpsl_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_realpsl_v1_0-RealPSL_v.0')
expected = [
u'Copyright (c) 1995-2002 RealNetworks, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_realpsl_v1_0ref(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_realpsl_v1_0ref-RealPSL_v_ref.0ref')
expected = [
u'Copyright (c) 1995-2004 RealNetworks, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_reciprocal_v1_5(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_reciprocal_v1_5-Reciprocal_v.5')
expected = [
u'Copyright (c) 2001-2007 Technical Pursuit Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_redhateula(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_redhateula-RedHatEULA')
expected = []
check_detection(expected, test_file)
def test_copyright_license_text_redhatref(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_redhatref-RedHatref')
expected = [
u'Copyright (c) 2005 Red Hat, Inc.',
u'Copyright (c) 1995-2005 Red Hat, Inc.',
u'copyrighted by Red Hat, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_ricoh_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_ricoh_v1_0-Ricoh_v.0')
expected = [
u'Ricoh Silicon Valley, Inc. are Copyright (c) 1995-1999.',
]
check_detection(expected, test_file)
def test_copyright_license_text_scilab(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_scilab-Scilab')
expected = [
u'Scilab (c) INRIA-ENPC.',
u'Scilab (c) INRIA-ENPC.',
u'Scilab (c) INRIA-ENPC.',
u'Scilab (c) INRIA-ENPC.',
u'Scilab inside (c) INRIA-ENPC',
u'Scilab (c) INRIA-ENPC',
u'Scilab (c) INRIA-ENPC',
]
check_detection(expected, test_file)
def test_copyright_license_text_sgi_cid_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_sgi_cid_v1_0-SGI_CID_v.0')
expected = [
u'Copyright (c) 1994-1999 Silicon Graphics, Inc.',
u'Copyright (c) 1994-1999 Silicon Graphics, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_sgi_glx_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_sgi_glx_v1_0-SGI_GLX_v.0')
expected = [
u'(c) 1991-9 Silicon Graphics, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_sissl_v1_1refa(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_sissl_v1_1refa-SISSL_v_refa.1refa')
expected = [
u'Copyright 2000 by Sun Microsystems, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_sleepycat(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_sleepycat-Sleepycat')
expected = [
u'Copyright (c) 1990-1999 Sleepycat Software.',
]
check_detection(expected, test_file)
def test_copyright_license_text_sybaseopenwatcom_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_sybaseopenwatcom_v1_0-SybaseOpenWatcom_v.0')
expected = [
u'Copyright (c) 1983-2002 Sybase, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_uofu_rfpl(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_uofu_rfpl-UofU_RFPL')
expected = [
u'Copyright (c) 2001, 1998 University of Utah.',
]
check_detection(expected, test_file)
def test_copyright_license_text_vovida_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_vovida_v1_0-Vovida_v.0')
expected = [
u'Copyright (c) 2000 Vovida Networks, Inc.',
]
check_detection(expected, test_file)
def test_copyright_license_text_wtfpl(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_wtfpl-WTFPL')
expected = [
u'Copyright (c) 2004 Sam Hocevar',
]
check_detection(expected, test_file)
def test_copyright_license_text_x_net(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_x_net-X_Net.Net')
expected = [
u'Copyright (c) 2000-2001 X.Net, Inc. Lafayette, California',
]
check_detection(expected, test_file)
def test_copyright_license_text_zend(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_zend-Zend')
expected = [
u'Copyright (c) 1999-2002 Zend Technologies Ltd.',
]
check_detection(expected, test_file)
def test_copyright_license_text_zliback(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_zliback-zLibAck')
expected = [
u'Copyright (c) 2002-2007 Charlie Poole',
u'Copyright (c) 2002-2004 James W. Newkirk, Michael C. Two, Alexei A. Vorontsov',
u'Copyright (c) 2000-2002 Philip A. Craig',
]
check_detection(expected, test_file)
def test_copyright_license_text_zope_v1_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_zope_v1_0-Zope_v.0')
expected = [
u'Copyright (c) Digital Creations.',
]
check_detection(expected, test_file)
def test_copyright_license_text_zope_v2_0(self):
test_file = self.get_test_loc('copyrights/copyright_license_text_zope_v2_0-Zope_v.0')
expected = [
u'Copyright (c) Zope Corporation (tm) and Contributors.',
]
check_detection(expected, test_file)
def test_copyright_linux_source_2_6_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_linux_source_2_6_copyright-linux_source__copyright.copyright')
expected = [
u'copyrighted by Linus Torvalds',
]
check_detection(expected, test_file)
def test_copyright_loss_of_holder_c(self):
test_file = self.get_test_loc('copyrights/copyright_loss_of_holder_c-c.c')
expected = [
u'COPYRIGHT (c) DIONYSOS 2006 - 2009',
]
check_detection(expected, test_file)
def test_copyright_matroska_demux1_c(self):
test_file = self.get_test_loc('copyrights/copyright_matroska_demux1_c-matroska_demux_c.c')
expected = [
u'(c) 2003 Ronald Bultje <rbultje@ronald.bitfreak.net>',
u'(c) 2011 Debarshi Ray <rishi@gnu.org>',
]
check_detection(expected, test_file)
def test_copyright_matroska_demux_c(self):
test_file = self.get_test_loc('copyrights/copyright_matroska_demux_c-matroska_demux_c.c')
expected = [
u'(c) 2006 Tim-Philipp Muller',
u'(c) 2008 Sebastian Droge <slomo@circular-chaos.org>',
]
check_detection(expected, test_file)
def test_copyright_matroska_demux_muller_c(self):
test_file = self.get_test_loc('copyrights/copyright_matroska_demux_muller_c-matroska_demux_c.c')
expected = [
u'(c) 2006 Tim-Philipp Muller',
u'(c) 2008 Sebastian Droge <slomo@circular-chaos.org>',
]
check_detection(expected, test_file)
def test_copyright_memcmp_assembly(self):
test_file = self.get_test_loc('copyrights/copyright_memcmp_assembly-9_9_memcmp_S.S')
expected = [
u'Copyright (c) 2007 ARC International (UK) LTD',
]
check_detection(expected, test_file)
def test_copyright_mergesort_java(self):
test_file = self.get_test_loc('copyrights/copyright_mergesort_java-MergeSort_java.java')
expected = [
u'Copyright (c) 1998 Sun Microsystems, Inc.',
]
check_detection(expected, test_file)
def test_copyright_michal_txt(self):
test_file = self.get_test_loc('copyrights/copyright_michal_txt.txt')
expected = [
u'copyright 2005 Michal Migurski',
]
check_detection(expected, test_file)
def test_copyright_mips1_be_elf_hal_o_uu(self):
test_file = self.get_test_loc('copyrights/copyright_mips1_be_elf_hal_o_uu-mips_be_elf_hal_o_uu.uu')
expected = [
u'Copyright (c) 2002-2006 Sam Leffler, Errno Consulting, Atheros Communications, Inc.',
]
check_detection(expected, test_file)
def test_copyright_missing_statement_file_txt(self):
test_file = self.get_test_loc('copyrights/copyright_missing_statement_file_txt-file.txt')
expected = [
u'Copyright 2003-2009 The Apache Geronimo development community',
u'Copyright (c) 2000-2005 The Legion Of The Bouncy Castle',
]
check_detection(expected, test_file)
def test_copyright_mit(self):
test_file = self.get_test_loc('copyrights/copyright_mit.txt')
expected = [
u'Copyright 2010-2011 by MitSomething',
]
check_detection(expected, test_file)
def test_copyright_mit_danse(self):
test_file = self.get_test_loc('copyrights/copyright_mit_danse-MIT_Danse')
expected = [
u'Copyright (c) 2009 California Institute of Technology.',
]
check_detection(expected, test_file)
def test_copyright_mixedcaps_c(self):
test_file = self.get_test_loc('copyrights/copyright_mixedcaps_c-mixedcaps_c.c')
expected = [
u'COPYRIGHT (c) 2006 MyCompany2 MYCOP',
u'copyright (c) 2006 MyCompany2 MYCOP',
u'COPYRIGHT (c) 2006 MYCOP MyCompany3',
u'copyright (c) 2006 MYCOP MyCompany3',
u'Copyright (c) 1993-95 NEC Systems Laboratory',
u'COPYRIGHT (c) 1988-1994 PARADIGM BY CAMBRIDGE asSOCIATES INCORPORATED',
u'Copyright (c) 2006, SHIMODA Hiroshi',
u'Copyright (c) 2006, FUJITA Yuji',
u'Copyright (c) 2007 GNOME i18n Project',
u'Copyright 1996-2007 Glyph & Cog, LLC.',
u'Copyright (c) 2002 Juan Carlos Arevalo-Baeza',
u'Copyright (c) 2000 INRIA, France Telecom',
u'Copyright (c) NEC Systems Laboratory 1993',
u'Copyright (c) 1984 NEC Systems Laboratory',
u'Copyright (c) 1996-2003 Glyph & Cog, LLC',
u'Copyright (c) 1996. Zeus Technology Limited',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_mixedcase_company_name_in_c(self):
test_file = self.get_test_loc('copyrights/copyright_mixedcase_company_name_in_c-lowercase_company_c.c')
expected = [
u'Copyright (c) 2001 nexB',
]
check_detection(expected, test_file)
def test_copyright_mkisofs_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_mkisofs_copyright-mkisofs_copyright.copyright')
expected = [
u'Copyright 1998-2003 Heiko Eissfeldt',
u'(c) Peter Widow',
u'(c) Thomas Niederreiter',
u'(c) RSA Data Security, Inc.',
u'Copyright 1993 Yggdrasil Computing, Incorporated',
u'Copyright (c) 1999,2000-2004 J. Schilling',
u'(c) 1998-2002 by Heiko Eissfeldt, heiko@colossus.escape.de',
u'(c) 2002 by Joerg Schilling',
u'(c) 1996, 1997 Robert Leslie',
u'Copyright (c) 2002 J. Schilling',
u'Copyright (c) 1987, 1995-2003 J. Schilling',
u'Copyright 2001 H. Peter Anvin',
]
check_detection(expected, test_file)
def test_copyright_moto_broad(self):
test_file = self.get_test_loc('copyrights/copyright_moto_broad-c.c')
expected = [
u'COPYRIGHT (c) 2005 MOTOROLA, BROADBAND COMMUNICATIONS SECTOR',
]
check_detection(expected, test_file)
def test_copyright_motorola_c(self):
test_file = self.get_test_loc('copyrights/copyright_motorola_c-c.c')
expected = [
u'Copyright (c) 2003, 2010 Motorola, Inc.',
]
check_detection(expected, test_file)
def test_copyright_motorola_mobility_c(self):
test_file = self.get_test_loc('copyrights/copyright_motorola_mobility_c-c.c')
expected = [
u'Copyright (c) 2009 Motorola, Inc.',
u'Copyright (c) 2011 Motorola Mobility, Inc.',
]
check_detection(expected, test_file)
def test_copyright_mplayer_skin_blue_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_mplayer_skin_blue_copyright-mplayer_skin_blue_copyright.copyright')
expected = [
u'Copyright (c) 2005-06 Franciszek Wilamowski, xenomorph@irc.pl',
]
check_detection(expected, test_file)
def test_copyright_muller(self):
test_file = self.get_test_loc('copyrights/copyright_muller-c.c')
expected = [
u'(c) 2003 Ronald Bultje <rbultje@ronald.bitfreak.net>',
u'(c) 2006 Tim-Philipp Muller',
u'(c) 2008 Sebastian Droge <slomo@circular-chaos.org>',
u'(c) 2011 Debarshi Ray <rishi@gnu.org>',
]
check_detection(expected, test_file)
def test_copyright_multiline(self):
test_file = self.get_test_loc('copyrights/copyright_multiline-Historical.txt')
expected = [
u'COPYRIGHT (c) 1990-1994 BY GEORGE J. CARRETTE, CONCORD, MASSACHUSETTS.',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_multiline_george(self):
test_file = self.get_test_loc('copyrights/copyright_multiline_george-Historical.txt')
expected = [
u'COPYRIGHT (c) 1990-1994 BY GEORGE',
]
check_detection(expected, test_file)
def test_copyright_mycorp_c(self):
test_file = self.get_test_loc('copyrights/copyright_mycorp_c-c.c')
expected = [
u'Copyright (c) 2012 MyCorp Inc.',
]
check_detection(expected, test_file)
def test_copyright_name_before_copyright_c(self):
test_file = self.get_test_loc('copyrights/copyright_name_before_copyright_c-c.c')
expected = [
u'Russ Dill <Russ.Dill@asu.edu> 2001-2003',
u'Vladimir Oleynik <dzo@simtreas.ru> (c) 2003'
]
check_detection(expected, test_file)
def test_copyright_name_sign_year(self):
test_file = self.get_test_loc('copyrights/copyright_name_sign_year_correct-c.c')
expected = [
'Copyright (c) 2008 Daisy Ltd.',
'Daisy (c) 1997 - 2008',
]
check_detection(expected, test_file)
def test_copyright_naumen_txt(self):
test_file = self.get_test_loc('copyrights/copyright_naumen_txt.txt')
expected = [
u'Copyright (c) NAUMEN (tm) and Contributors.',
]
check_detection(expected, test_file)
def test_copyright_ncurses_bin_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_ncurses_bin_copyright-ncurses_bin_copyright.copyright')
expected = [
u'Copyright (c) 1998 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
def test_copyright_nederlof(self):
test_file = self.get_test_loc('copyrights/copyright_nederlof.txt')
expected = [
u'(c) 2005 - Peter Nederlof',
]
check_detection(expected, test_file)
def test_copyright_trailing_copyleft(self):
test_file = self.get_test_loc('copyrights/copyright_trailing_copyleft.txt')
expected = [
u'Copyright (c) 1992 Ronald S. Karr',
]
check_detection(expected, test_file)
def test_copyright_no_copyright_in_c(self):
test_file = self.get_test_loc('copyrights/copyright_no_copyright_in_c-c.c')
expected = []
check_detection(expected, test_file)
def test_copyright_no_copyright_in_class_file_2(self):
test_file = self.get_test_loc('copyrights/copyright_no_copyright_in_class_file_2-PersistentElementHolder_class.class')
expected = []
check_detection(expected, test_file)
def test_copyright_no_copyright_in_class_file_3(self):
test_file = self.get_test_loc('copyrights/copyright_no_copyright_in_class_file_3-PersistentIndexedElementHolder_class.class')
expected = []
check_detection(expected, test_file)
def test_copyright_no_copyright_in_class_file_4(self):
test_file = self.get_test_loc('copyrights/copyright_no_copyright_in_class_file_4-PersistentListElementHolder_class.class')
expected = []
check_detection(expected, test_file)
def test_copyright_no_holder_java(self):
test_file = self.get_test_loc('copyrights/copyright_no_holder_java-java.java')
expected = [
u'Copyright (c) 2005',
]
check_detection(expected, test_file)
def test_copyright_nokia_cpp(self):
test_file = self.get_test_loc('copyrights/copyright_nokia_cpp-cpp.cpp')
expected = [
u'Copyright (c) 2002, Nokia Mobile Phones.',
]
check_detection(expected, test_file)
def test_copyright_north_c(self):
test_file = self.get_test_loc('copyrights/copyright_north_c-99_c.c')
expected = [
u'Copyright (c) 2010 42North Inc.',
]
check_detection(expected, test_file)
def test_copyright_notice2(self):
test_file = self.get_test_loc('copyrights/copyright_notice2-9_NOTICE')
expected = [
u'Copyright 2003-2009 The Apache Geronimo development community',
]
check_detection(expected, test_file)
def test_copyright_notice2_txt(self):
test_file = self.get_test_loc('copyrights/copyright_notice2_txt-NOTICE.txt')
expected = [
u'Copyright (c) 2004, Richard S. Hall',
u'Copyright (c) 2002,2003, Stefan Haustein, Oberhausen',
u'Copyright (c) 2002,2004, Stefan Haustein, Oberhausen',
u'Copyright (c) 2002,2003, Stefan Haustein, Oberhausen',
]
check_detection(expected, test_file)
def test_copyright_notice_name_before_statement(self):
test_file = self.get_test_loc('copyrights/copyright_notice_name_before_statement-NOTICE')
expected = [
u'iClick, Inc., software copyright (c) 1999.',
]
check_detection(expected, test_file)
def test_copyright_notice_txt(self):
test_file = self.get_test_loc('copyrights/copyright_notice_txt-NOTICE.txt')
expected = [
u'Copyright 2003-2010 The Knopflerfish Project',
u'Copyright (c) OSGi Alliance (2000, 2009).',
u'Copyright (c) 2000-2005 INRIA, France Telecom',
u'(c) 1999-2003.',
u'(c) 2001-2004',
u'Copyright (c) 2004, Didier Donsez',
u'(c) 2001-2004 http://commons.apache.org/logging',
u'(c) 1999-2003. http://xml.apache.org/dist/LICENSE.txt',
u'(c) 2001-2004',
u'Copyright (c) 2004, Richard S. Hall',
u'(c) 2001-2004 http://xml.apache.org/xalan-j',
u'(c) 2001-2004 http://xerces.apache.org',
]
check_detection(expected, test_file)
def test_copyright_o_brien_style_name(self):
test_file = self.get_test_loc('copyrights/copyright_o_brien_style_name.txt')
expected = [
u"Copyright (c) 2001-2003, Patrick K. O'Brien",
]
check_detection(expected, test_file)
def test_copyright_oberhummer_c_code(self):
test_file = self.get_test_loc('copyrights/copyright_oberhummer_c_code-c.c')
expected = [
'Copyright (c) 2005 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 2004 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 2003 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 2002 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 2001 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 2000 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 1999 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 1998 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 1997 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 1996 Markus Franz Xaver Johannes Oberhumer',
]
check_detection(expected, test_file)
def test_copyright_oberhummer_text(self):
test_file = self.get_test_loc('copyrights/copyright_oberhummer_text.txt')
expected = [
'Copyright (c) 2005 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 2004 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 2003 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 2002 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 2001 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 2000 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 1999 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 1998 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 1997 Markus Franz Xaver Johannes Oberhumer',
'Copyright (c) 1996 Markus Franz Xaver Johannes Oberhumer',
]
check_detection(expected, test_file)
def test_copyright_objectivec(self):
test_file = self.get_test_loc('copyrights/copyright_objectivec-objectiveC_m.m')
expected = [
u'Copyright (c) 2009',
]
check_detection(expected, test_file)
def test_copyright_openhackware_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_openhackware_copyright_label-openhackware_copyright_label.label')
expected = [
u'Copyright (c) 2004-2005 Jocelyn Mayer <l_indien@magic.fr>',
u'Copyright (c) 2004-2005 Fabrice Bellard',
]
check_detection(expected, test_file)
def test_copyright_openoffice_org_report_builder_bin_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_openoffice_org_report_builder_bin_copyright-openoffice_org_report_builder_bin_copyright.copyright')
expected = [
u'Copyright (c) 2002-2009 Software in the Public Interest, Inc.',
u'Copyright (c) 2002-2009 ooo-build/Go-OO Team',
u'(c) Sun Microsystems.',
u'Copyright 2002-2009 Sun Microsystems, Inc.',
u'Copyright 2002-2009 Sun Microsystems, Inc.',
u'Copyright (c) 2002-2005 Maxim Shemanarev',
u'Copyright 2001-2004 The Apache Software Foundation.',
u'Copyright 2003-2007 The Apache Software Foundation',
u'Copyright 2001-2007 The Apache Software Foundation',
u'Copyright 1999-2007 The Apache Software Foundation',
u'Copyright (c) 2000 Pat Niemeyer',
u'Copyright (c) 2000',
u'Copyright (c) 2002 France Telecom',
u'Copyright (c) 1990-2003 Sleepycat Software',
u'Copyright (c) 1990, 1993, 1994, 1995 The Regents of the University of California',
u'Copyright (c) 2003 by Bitstream, Inc.',
u'Cppyright Copyright (c) 2006 by Tavmjong Bah',
u'Copyright (c) 2007 Red Hat, Inc',
u'Copyright (c) 2007 Red Hat, Inc.',
u'Copyright 2000-2003 Beman Dawes',
u'Copyright (c) 1998-2003 Joel de Guzman',
u'Copyright (c) 2001-2003 Daniel Nuffer',
u'Copyright (c) 2001-2003 Hartmut Kaiser',
u'Copyright (c) 2002-2003 Martin Wille',
u'Copyright (c) 2002 Juan Carlos Arevalo-Baeza',
u'Copyright (c) 2002 Raghavendra Satish',
u'Copyright (c) 2002 Jeff Westfahl',
u'Copyright (c) 2001 Bruce Florman',
u'Copyright 1999 Tom Tromey',
u'Copyright 2002, 2003 University of Southern California, Information Sciences Institute',
u'Copyright 2004 David Reveman',
u'Copyright 2000, 2002, 2004, 2005 Keith Packard',
u'Copyright 2004 Calum Robinson',
u'Copyright 2004 Richard D. Worth',
u'Copyright 2004, 2005 Red Hat, Inc.',
u'Copyright 2004 David Reveman',
u'(c) Copyright 2000, Baptiste Lepilleur',
u'Copyright (c) 1996 - 2004, Daniel Stenberg',
u'Copyright (c) 1992,1994 by Dennis Vadura',
u'Copyright (c) 1996 by WTI Corp.',
u'Copyright 1999-2003 by Easy Software Products',
u'Copyright (c) 1998, 1999 Thai Open Source Software Center Ltd',
u'Copyright (c) 1987, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.',
u'Copyright (c) 2000,2001,2002,2003 by George William',
u'Copyright (c) 2001-2008, The HSQL Development Group',
u'Copyright (c) 1995, 1997, 2000, 2001, 2002 Free Software Foundation, Inc.',
u'Copyright (c) Kevin Hendricks',
u'Copyright (c) 2002-2008 Laszlo Nemeth',
u'Copyright (c) 2000 Bjoern Jacke',
u'Copyright 2000 by Sun Microsystems, Inc.',
u'Copyright (c) 1998 Raph Levien',
u'Copyright (c) 2001 ALTLinux, Moscow',
u'Copyright (c) 2006, 2007, 2008 Laszlo Nemeth',
u'Copyright (c) 2003-2006 The International Color Consortiu',
u'Copyright (c) 1995-2008 International Business Machines Corporation',
u'Copyright 2000-2005, by Object Refinery Limited',
u'Copyright 2005-2007, by Pentaho Corporation',
u'Copyright 1994-2002 World Wide Web Consortium',
u'Copyright (c) 1991-1998, Thomas G. Lane',
u'Copyright 1994-2002 World Wide Web Consortium',
u'Copyright (c) 2002 Anders Carlsson <andersca@gnu.org>',
u'Copyright (c) 2003, WiseGuys Internet',
u'Copyright (c) 2003, WiseGuys Internet',
u'Copyright 1997-1999 World Wide Web Consortium',
u'Copyright (c) 2002-2003 Aleksey Sanin',
u'Copyright (c) 2003 America Online, Inc.',
u'Copyright (c) 2001-2002 Daniel Veillard',
u'Copyright (c) 1998-2001 by the University of Florida',
u'Copyright (c) 1991, 2007 Free Software Foundation, Inc',
u'Copyright 2004 The Apache Software Foundation',
u'Copyright 2005 The Apache Software Foundation',
u'Copyright 2007 The Apache Software Foundation',
u'Copyright (c) 1999-2007 Brian Paul',
u'Copyright (c) 2007 The Khronos Group Inc.',
u'Copyright (c) 2003 Stuart Caie <kyzer@4u.net>',
u'Copyright (c) 1999-2006 Joe Orton <joe@manyfish.co.uk>',
u'Copyright (c) 1999-2000 Tommi Komulainen <Tommi.Komulainen@iki.fi>',
u'Copyright (c) 1999-2000 Peter Boos <pedib@colorfullife.com>',
u'Copyright (c) 1991, 1995, 1996, 1997 Free Software Foundation, Inc.',
u'Copyright (c) 2004 Aleix Conchillo Flaque <aleix@member.fsf.org>',
u'Copyright (c) 2004 Jiang Lei <tristone@deluxe.ocn.ne.jp>',
u'Copyright (c) 2004-2005 Vladimir Berezniker',
u'Copyright (c) 1998 Netscape Communications Corporation',
u'Copyright (c) 1998-2007 The OpenSSL Project',
u'Copyright (c) 1998-2007 The OpenSSL Project',
u'Copyright (c) 1995-1998 Eric Young (eay@cryptsoft.com)',
u'Copyright (c) 2001, 2002, 2003, 2004 Python Software Foundation',
u'Copyright (c) 2000 BeOpen.com',
u'Copyright (c) 1995-2001 Corporation for National Research Initiatives',
u'Copyright (c) 1991-1995 Stichting Mathematisch Centrum',
u'Copyright (c) 2000-2007 David Beckett',
u'Copyright (c) 2000-2005 University of Bristol',
u'Copyright (c) 1993, 94, 95, 96, 97, 98, 99 Free Software Foundation, Inc',
u'Copyright (c) 1997-2000 Netscape Communications Corporation',
u'Copyright (c) 2000 see Beyond Communications Corporation',
u'Copyright (c) 1997 David Mosberger-Tang and Andreas Beck',
u'Copyright (c) 1998, 1999 James Clark',
u'Copyright ? 1999',
u'Copyright (c) 2002-2003 Aleksey Sanin',
u'Copyright (c) 2003 America Online, Inc.',
u'Copyright (c) 2001-2002 Daniel Veillard',
u'Copyright (c) 1998-2001 by the University of Florida',
u'Copyright (c) 1991, 2007 Free Software Foundation, Inc',
u'Copyright 2004 The Apache Software Foundation',
u'Copyright 2005 The Apache Software Foundation',
u'Copyright 2007 The Apache Software Foundation',
u'Copyright (c) 1999-2007 Brian Paul',
u'Copyright (c) 2007 The Khronos Group Inc.',
u'Copyright (c) 2003 Stuart Caie <kyzer@4u.net>',
u'Copyright (c) 1999-2006 Joe Orton <joe@manyfish.co.uk>',
u'Copyright (c) 1999-2000 Tommi Komulainen <Tommi.Komulainen@iki.fi>',
u'Copyright (c) 1999-2000 Peter Boos <pedib@colorfullife.com>',
u'Copyright (c) 1991, 1995, 1996, 1997 Free Software Foundation, Inc.',
u'Copyright (c) 2004 Aleix Conchillo Flaque <aleix@member.fsf.org>',
u'Copyright (c) 2004 Jiang Lei <tristone@deluxe.ocn.ne.jp>',
u'Copyright (c) 2004-2005 Vladimir Berezniker',
u'Copyright (c) 1998 Netscape Communications Corporation',
u'Copyright (c) 1998-2007 The OpenSSL Project',
u'Copyright (c) 1998-2007 The OpenSSL Project',
u'Copyright (c) 1995-1998 Eric Young (eay@cryptsoft.com)',
u'Copyright (c) 2001, 2002, 2003, 2004 Python Software Foundation',
u'Copyright (c) 2000 BeOpen.com',
u'Copyright (c) 1995-2001 Corporation for National Research Initiatives',
u'Copyright (c) 1991-1995 Stichting Mathematisch Centrum',
u'Copyright (c) 2000-2007 David Beckett',
u'Copyright (c) 2000-2005 University of Bristol',
u'Copyright (c) 1993, 94, 95, 96, 97, 98, 99 Free Software Foundation, Inc',
u'Copyright (c) 1997-2000 Netscape Communications Corporation',
u'Copyright (c) 2000 see Beyond Communications Corporation',
u'Copyright (c) 1997 David Mosberger-Tang and Andreas Beck',
u'Copyright (c) 1998, 1999 James Clark',
u'Copyright ? 1999',
u'Copyright (c) 1994 Hewlett-Packard Company',
u'Copyright (c) 1996-1999 Silicon Graphics Computer Systems, Inc.',
u'Copyright (c) 1997 Moscow Center for SPARC Technology',
u'Copyright (c) 1999, 2000, 2001 Boris Fomitchev',
u'Copyright 1999-2002,2004 The Apache Software Foundation',
u'Copyright (c) 1991, 1992 TWAIN Working Group',
u'Copyright (c) 1997 TWAIN Working Group',
u'Copyright (c) 1998 TWAIN Working Group',
u'Copyright (c) 2000 TWAIN Working Group',
u'Copyright 1998-2001 by Ullrich Koethe',
u'Copyright 2004 by Urban Widmark',
u'Copyright 2002-2007 by Henrik Just',
u'Copyright (c) 2000, Compaq Computer Corporation',
u'Copyright (c) 2002, Hewlett Packard, Inc',
u'Copyright (c) 2000 SuSE, Inc.',
u'Copyright 1996-2007 Glyph & Cog, LLC.',
u'Copyright (c) 1995-2002 Jean-loup Gailly and Mark Adler',
]
check_detection(expected, test_file)
def test_copyright_openoffice_org_report_builder_bin_copyright2(self):
test_file = self.get_test_loc('copyrights/copyright_openoffice_org_report_builder_bin_copyright2-openoffice_org_report_builder_bin_copyright.copyright2')
expected = [
u'Copyright (c) 1990, 1993, 1994, 1995 The Regents of the University of California',
u'Copyright (c) 1995, 1996 The President and Fellows of Harvard University',
]
check_detection(expected, test_file)
def test_copyright_openssl(self):
test_file = self.get_test_loc('copyrights/copyright_openssl-c.c')
expected = [
'Copyright (c) 1995-1997 Eric Young (eay@mincom.oz.au)',
]
check_detection(expected, test_file)
def test_copyright_partial_detection(self):
test_file = self.get_test_loc('copyrights/copyright_partial_detection.txt')
expected = [
u'Copyright 1991 by the Massachusetts Institute of Technology',
u'Copyright (c) 2001 AT&T',
u'Copyright (c) 2004-2006 by Henrique de Moraes Holschuh <hmh@debian.org>',
u'Copyright 2005-2007 Christopher Montgomery , Jean-Marc Valin , Timothy Terriberry',
u'Copyright (c) 2007 James Newton-King',
u'Copyright (c) 2006, SHIMODA Hiroshi <piro@p.club.ne.jp>',
u'Copyright (c) 2006, FUJITA Yuji <yuji@webmasters.gr.jp>',
u'Copyright (c) 2002-2009 ooo-build/Go-OO Team',
u'Copyright (c) 2002-2009 Software in the Public Interest, Inc.',
u'Copyright (c) 2004 by the Perl 5 Porters',
u'Copyright (c) 2006 Academy of Motion Picture Arts and Sciences',
u'Copyright (c) 1995-2000 Corporation for National Research Initiatives',
u'Copyright (c) 2001 EU DataGrid',
u'Copyright (c) 2000. OCLC Research',
u'Copyright (c) 1999 Trolltech AS',
]
check_detection(expected, test_file)
def test_copyright_partial_detection_mit(self):
test_file = self.get_test_loc('copyrights/copyright_partial_detection_mit.txt')
expected = [
u'Copyright 1991 by the Massachusetts Institute of Technology',
u'Copyright (c) 2001 AT&T',
u'Copyright (c) 2004-2006 by Henrique de Moraes Holschuh <hmh@debian.org>',
u'Copyright 2005-2007 Christopher Montgomery , Jean-Marc Valin , Timothy Terriberry',
u'Copyright (c) 2007 James Newton-King',
u'Copyright (c) 2006, SHIMODA Hiroshi <piro@p.club.ne.jp>',
u'Copyright (c) 2006, FUJITA Yuji <yuji@webmasters.gr.jp>',
u'Copyright (c) 2002-2009 ooo-build/Go-OO Team',
u'Copyright (c) 2002-2009 Software in the Public Interest, Inc.',
u'Copyright (c) 2004 by the Perl 5 Porters',
u'Copyright (c) 2006 Academy of Motion Picture Arts and Sciences',
u'Copyright (c) 1995-2000 Corporation for National Research Initiatives',
u'Copyright (c) 2001 EU DataGrid',
u'Copyright (c) 2000. OCLC Research',
u'Copyright (c) 1999 Trolltech AS',
]
check_detection(expected, test_file)
def test_copyright_perl_base_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_perl_base_copyright-perl_base_copyright.copyright')
expected = [
u'Copyright 1989-2001, Larry Wall',
u'Copyright (c) 1995-2005 Jean-loup Gailly and Mark Adler',
u'Copyright (c) 1991-2006 Unicode, Inc.',
u'Copyright (c) 1991-2008 Unicode, Inc.',
u'Copyright (c) 2004 by the Perl 5 Porters',
u'copyright (c) 1994 by the Regents of the University of California',
u'Copyright (c) 1994 The Regents of the University of California',
u'Copyright (c) 1989, 1993 The Regents of the University of California',
u'copyright (c) 1996-2007 Julian R Seward',
]
check_detection(expected, test_file)
def test_copyright_perl_module(self):
test_file = self.get_test_loc('copyrights/copyright_perl_module-pm.pm')
expected = [
u'Copyright (c) 1995-2000 Name Surname',
]
check_detection(expected, test_file)
def test_copyright_peter_c(self):
test_file = self.get_test_loc('copyrights/copyright_peter_c-c.c')
expected = [
u'(c) 2005 - Peter Nederlof',
]
check_detection(expected, test_file)
def test_copyright_php_lib(self):
test_file = self.get_test_loc('copyrights/copyright_php_lib-php_embed_lib.lib')
expected = [
u'Copyright nexB and others (c) 2012',
]
check_detection(expected, test_file)
def test_copyright_piersol(self):
test_file = self.get_test_loc('copyrights/copyright_piersol-TestMatrix_D_java.java')
expected = [
u'Copyright (c) 1998 Company PIERSOL Engineering Inc.',
u'Copyright (c) 1998 Company PIERSOL Engineering Inc.',
]
check_detection(expected, test_file)
def test_copyright_postgresql_8_3_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_postgresql_8_3_copyright_label-postgresql__copyright_label.label')
expected = [
u'Portions Copyright (c) 1996-2003, The PostgreSQL Global Development Group',
u'Portions Copyright (c) 1994, The Regents of the University of California',
u'Copyright (c) 1998, 1999 Henry Spencer',
]
check_detection(expected, test_file)
def test_copyright_prof_informatics(self):
test_file = self.get_test_loc('copyrights/copyright_prof_informatics.txt')
expected = [
u'Professional Informatics (c) 1994',
]
check_detection(expected, test_file)
def test_copyright_professional_txt(self):
test_file = self.get_test_loc('copyrights/copyright_professional_txt-copyright.txt')
expected = [
u'Professional Informatics (c) 1994',
]
check_detection(expected, test_file)
def test_copyright_properties(self):
test_file = self.get_test_loc('copyrights/copyright_properties-properties.properties')
expected = [
u'(c) 2004-2007 Restaurant.',
]
check_detection(expected, test_file)
def test_copyright_psf_in_python(self):
test_file = self.get_test_loc('copyrights/copyright_psf_in_python-BitVector_py.py')
expected = [
u'copyright (c) 2008 Avinash Kak. Python Software Foundation.',
]
check_detection(expected, test_file)
def test_copyright_python_dateutil_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_python_dateutil_copyright-python_dateutil_copyright.copyright')
expected = [
u'Copyright (c) 2001, 2002 Python Software Foundation',
u'Copyright (c) 1995-2001 Corporation for National Research Initiatives',
u'Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam',
]
check_detection(expected, test_file)
def test_copyright_python_psyco_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_python_psyco_copyright-python_psyco_copyright.copyright')
expected = [
u'Copyright (c) 2001-2003 Armin Rigo',
]
check_detection(expected, test_file)
def test_copyright_python_reportbug_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_python_reportbug_copyright_label-python_report_copyright_label.label')
expected = [
u'Copyright (c) 1999-2006 Chris Lawrence',
u'Copyright (c) 2008-2009 Sandro Tosi <morph@debian.org>',
u'Copyright (c) 1996-2000 Christoph Lameter <clameter@debian.org>',
u'(c) 1996-2000 Nicolas Lichtmaier <nick@debian.org>',
u'(c) 2000 Chris Lawrence <lawrencc@debian.org>',
u'Copyright (c) 2008 Ben Finney <ben+debian@benfinney.id.au>',
u'Copyright (c) 2008 Ben Finney <ben+debian@benfinney.id.au>',
u'Copyright (c) 2008 Sandro Tosi <morph@debian.org>',
u'Copyright (c) 2006 Philipp Kern <pkern@debian.org>',
u'Copyright (c) 2008-2009 Luca Bruno <lethalman88@gmail.com>',
]
check_detection(expected, test_file)
def test_copyright_python_software_properties_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_python_software_properties_copyright-python_software_properties_copyright.copyright')
expected = [
u'Copyright 2004-2007 Canonical Ltd. 2004-2005 Michiel Sikkes 2006',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_red_hat_openoffice_org_report_builder_bin_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_red_hat_openoffice_org_report_builder_bin_copyright-openoffice_org_report_builder_bin_copyright.copyright')
expected = [
u'Copyright (c) 2007 Red Hat, Inc',
u'Copyright (c) 2007 Red Hat, Inc.',
]
check_detection(expected, test_file)
def test_copyright_regents_complex(self):
test_file = self.get_test_loc('copyrights/copyright_regents_complex-strtol_c.c')
expected = [
'Copyright (c) 1990 The Regents of the University of California.',
]
check_detection(expected, test_file)
@expectedFailure
def test_copyright_regents_license(self):
test_file = self.get_test_loc('copyrights/copyright_regents_license-LICENSE')
expected = [
u'copyrighted by The Regents of the University of California.',
u'Copyright 1979, 1980, 1983, 1986, 1988, 1989, 1991, 1992, 1993, 1994 The Regents of the University of California.',
u'copyright C 1988 by the Institute of Electrical and Electronics Engineers, Inc.',
]
check_detection(expected, test_file)
def test_copyright_resig_js(self):
test_file = self.get_test_loc('copyrights/copyright_resig_js-js.js')
expected = [
u'Copyright (c) 2009 John Resig',
]
check_detection(expected, test_file)
def test_copyright_rusty(self):
test_file = self.get_test_loc('copyrights/copyright_rusty.txt')
expected = [
u'(c) Rusty Russell, IBM 2002',
]
check_detection(expected, test_file)
def test_copyright_rusty_c(self):
test_file = self.get_test_loc('copyrights/copyright_rusty_c-c.c')
expected = [
u'(c) Rusty Russell, IBM 2002',
]
check_detection(expected, test_file)
def test_copyright_s_fabsl_c(self):
test_file = self.get_test_loc('copyrights/copyright_s_fabsl_c-s_fabsl_c.c')
expected = [
u'Copyright (c) 2003 Dag-Erling Coidan Smrgrav',
]
check_detection(expected, test_file)
def test_copyright_sample_java(self):
test_file = self.get_test_loc('copyrights/copyright_sample_java-java.java')
expected = [
u'Copyright (c) 2000-2007, Sample ABC Inc.',
]
check_detection(expected, test_file)
def test_copyright_sample_no_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_sample_no_copyright-c.c')
expected = []
check_detection(expected, test_file)
def test_copyright_seahorse_plugins(self):
test_file = self.get_test_loc('copyrights/copyright_seahorse_plugins-seahorse_plugins_copyright.copyright')
expected = [
u'Copyright (c) 2004-2007 Stefan Walter',
u'Copyright (c) 2004-2006 Adam Schreiber',
u'Copyright (c) 2001-2003 Jose Carlos Garcia Sogo',
u'Copyright (c) 2002, 2003 Jacob Perkins',
u'Copyright (c) 2004, 2006 Nate Nielsen',
u'Copyright (c) 2000-2004 Marco Pesenti Gritti',
u'Copyright (c) 2003-2006 Christian Persch',
u'Copyright (c) 2004, 2006 Jean-Francois Rameau',
u'Copyright (c) 2000, 2001 Eazel, Inc.',
u'Copyright (c) 2007, 2008 Jorge Gonzalez',
u'Copyright (c) 2007, 2008 Daniel Nylander',
u'Copyright (c) 2004-2005 Shaun McCance',
u'Copyright (c) 2007 Milo Casagrande',
u'Copyright (c) 2007-2008 Claude Paroz',
u'Copyright (c) 2007 GNOME',
# Copyright © 2008 <s>Василий Фаронов</s>
u'i18n Project for Vietnamese Copyright (c) 2008',
u'Copyright (c) 1992-2008 Free Software Foundation, Inc.',
u'Copyright (c) 1999 Dave Camp',
u'Copyright (c) 2005 Tecsidel S.A.',
u'Copyright (c) 2004-2005 Adam Weinberger',
u'Copyright (c) 2007, 2008 The GNOME Project',
u'Copyright (c) 2007 Swecha Telugu Localisation Team',
u'Copyright (c) 1995-1997 Ulrich Drepper',
u'Copyright (c) 2004-2008 Rodney Dawes',
u'Copyright (c) 1999, 2000 Anthony Mulcahy',
u'Copyright (c) 2007 Ihar Hrachyshka',
u'Copyright (c) 2004, 2005 Miloslav Trmac',
u'Copyright (c) 2003 Peter Mato',
u'Copyright (c) 2004, 2005 Danijel Studen , Denis Lackovic , Ivan Jankovic',
u'Copyright (c) 1994 X Consortium',
u'Copyright (c) 2006 Alexander Larsson',
u'Copyright (c) 2000-2003 Ximian Inc.',
u'Copyright (c) 1995-1997 Peter Mattis , Spencer Kimball and Josh MacDonald',
u'Copyright (c) 1999, 2000 Robert Bihlmeyer',
u'Copyright (c) Crispin Flowerday',
u'Copyright (c) 2008 Frederic Peters',
u'Copyright (c) 2008 Lucas Lommer',
u'Copyright (c) 2008 Mario Blattermann',
u'Copyright (c) 2001-2004 Red Hat, Inc.',
u'Copyright (c) 2004 Scott James Remnant',
# note this is not correct and severaly truncated
u'Copyright (c) 1998-2006 by the following: Dave Ahlswede, Manuel Amador, Matt Amato, Daniel Atallah, Paul Aurich, Patrick Aussems, Anibal Avelar, Alex Badea, John Bailey, Chris Banal, Luca Barbato, Levi Bard, Kevin Barry, Derek Battams, Martin Bayard, Curtis Beattie, Dave Bell, Igor Belyi, Brian Bernas, Paul Betts, Jonas Birme, Eric Blade, Ethan Blanton, Joshua Blanton, Rainer Blessing, Herman Bloggs, David Blue, Jason Boerner, Graham Booker, Paolo Borelli, Julien Bossart, Craig Boston, Chris Boyle, Derrick J Brashear, Matt Brenneke, Jeremy Brooks, Philip Brown, Sean Burke, Thomas Butter, Andrea Canciani, Damien Carbery, Michael Carlson, Keegan Carruthers-Smith, Steve Cavilia, Julien Cegarra, Cerulean Studios, LLC',
u'Copyright (c) 2008 Sebastien Bacher , Andreas Moog , Emilio Pozuelo Monfort and Josselin Mouette',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_simgear1_0_0_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_simgear1_0_0_copyright-simgear__copyright.copyright')
expected = [
u'Copyright (c) 1999-2000 Curtis L. Olson <curt@flightgear.org>',
u'Copyright (c) 2002-2004 Mark J. Harris',
]
check_detection(expected, test_file)
def test_copyright_snippet_no_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_snippet_no_copyright')
expected = []
check_detection(expected, test_file)
def test_copyright_snmptrapd_c(self):
test_file = self.get_test_loc('copyrights/copyright_snmptrapd_c-snmptrapd_c.c')
expected = [
u'Copyright 1989, 1991, 1992 by Carnegie Mellon University',
]
check_detection(expected, test_file)
def test_copyright_some_co(self):
test_file = self.get_test_loc('copyrights/copyright_some_co-9_h.h')
expected = [
u'Copyright Some Company, inc.',
]
check_detection(expected, test_file)
def test_copyright_somefile_cpp(self):
test_file = self.get_test_loc('copyrights/copyright_somefile_cpp-somefile_cpp.cpp')
expected = [
u'(c) 2005',
u'Copyright Private Company (PC) Property of Private Company',
u'Copyright (2003) Private Company',
]
check_detection(expected, test_file)
def test_copyright_source_auditor_projectinfo_java(self):
test_file = self.get_test_loc('copyrights/copyright_source_auditor_projectinfo_java-ProjectInfo_java.java')
expected = [
u'Copyright (c) 2009 Source Auditor Inc.',
]
check_detection(expected, test_file)
def test_copyright_stacktrace_cpp(self):
test_file = self.get_test_loc('copyrights/copyright_stacktrace_cpp-stacktrace_cpp.cpp')
expected = [
u'Copyright 2003, 2004 Rickard E. Faith (faith@dict.org)',
]
check_detection(expected, test_file)
def test_copyright_stmicro_in_h(self):
test_file = self.get_test_loc('copyrights/copyright_stmicro_in_h-h.h')
expected = [
u'COPYRIGHT (c) ST-Microelectronics 1998.',
]
check_detection(expected, test_file)
def test_copyright_stmicro_in_txt(self):
test_file = self.get_test_loc('copyrights/copyright_stmicro_in_txt.txt')
expected = [
u'COPYRIGHT (c) STMicroelectronics 2005.',
u'COPYRIGHT (c) ST-Microelectronics 1998.',
]
check_detection(expected, test_file)
def test_copyright_strchr_assembly(self):
test_file = self.get_test_loc('copyrights/copyright_strchr_assembly-9_9_strchr_S.S')
expected = [
u'Copyright (c) 2007 ARC International (UK) LTD',
]
check_detection(expected, test_file)
def test_copyright_super_tech_c(self):
test_file = self.get_test_loc('copyrights/copyright_super_tech_c-c.c')
expected = [
u'Copyright (c) $LastChangedDate$ Super Technologies Corporation, Cedar Rapids, Iowa',
u'Copyright (c) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp.',
]
check_detection(expected, test_file)
@expectedFailure
def test_copyright_tcl_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_tcl_copyright-tcl_copyright.copyright')
expected = [
u'copyrighted by the Regents of the University of California , Sun Microsystems, Inc. , Scriptics Corporation', # not found, rather complex
u'Copyright (c) 2007 Software in the Public Interest',
]
check_detection(expected, test_file)
def test_copyright_tech_sys(self):
test_file = self.get_test_loc('copyrights/copyright_tech_sys.txt')
expected = [
u'(c) Copyright 1985-1999 SOME TECHNOLOGY SYSTEMS',
]
check_detection(expected, test_file)
def test_copyright_texinfo_tex(self):
test_file = self.get_test_loc('copyrights/copyright_texinfo_tex-texinfo_tex.tex')
expected = [
u'Copyright (c) 1985, 1986, 1988, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_texlive_lang_greek_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_texlive_lang_greek_copyright-texlive_lang_greek_copyright.copyright')
expected = [
u'Copyright 1999 2002-2006 LaTeX3 Project',
u'Copyright 2005 M. Y. Name',
]
check_detection(expected, test_file)
def test_copyright_texlive_lang_spanish_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_texlive_lang_spanish_copyright-texlive_lang_spanish_copyright.copyright')
expected = [
u'Copyright 1999 2002-2006 LaTeX3 Project',
u'Copyright 2005 M. Y. Name',
]
check_detection(expected, test_file)
def test_copyright_texlive_lang_vietnamese_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_texlive_lang_vietnamese_copyright_label-texlive_lang_vietnamese_copyright_label.label')
expected = [
u'Copyright 1999 2002-2006 LaTeX3 Project',
u'Copyright 2005 M. Y. Name',
]
check_detection(expected, test_file)
def test_copyright_tfc_c(self):
test_file = self.get_test_loc('copyrights/copyright_tfc_c-c.c')
expected = [
u'Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001 Traditional Food Consortium, Inc.',
u'Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Traditional Food Consortium, Inc.',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_thirdpartyproject_prop(self):
test_file = self.get_test_loc('copyrights/copyright_thirdpartyproject_prop-ThirdPartyProject_prop.prop')
expected = [
u'Copyright 2010 Google Inc.',
]
check_detection(expected, test_file)
@expectedFailure
def test_copyright_trailing_For(self):
test_file = self.get_test_loc('copyrights/copyright_trailing_For-copyright_c.c')
expected = [
u'Copyright . 2008 Mycom Pany, inc.',
u'Copyright (c) 1995-2003 Jean-loup Gailly.',
]
check_detection(expected, test_file,
expected_in_results=True,
results_in_expected=False)
def test_copyright_trailing_name(self):
test_file = self.get_test_loc('copyrights/copyright_trailing_name-copyright.txt')
expected = [
u'Copyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd and Clark Cooper',
]
check_detection(expected, test_file,
expected_in_results=False,
results_in_expected=True)
def test_copyright_trailing_redistribution(self):
test_file = self.get_test_loc('copyrights/copyright_trailing_redistribution-bspatch_c.c')
expected = [
u'Copyright (c) 2008 The Android Open Source Project',
u'Copyright 2003-2005 Colin Percival',
]
check_detection(expected, test_file)
def test_copyright_transcode_doc_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_transcode_doc_copyright-transcode_doc_copyright.copyright')
expected = [
u'Copyright (c) 2001 Thomas Ostreich',
]
check_detection(expected, test_file)
def test_copyright_transfig_copyright_with_parts(self):
test_file = self.get_test_loc('copyrights/copyright_transfig_copyright_with_parts-transfig_copyright.copyright')
expected = [
u'Copyright (c) 1985-1988 Supoj Sutantavibul',
u'Copyright (c) 1991-1999 Micah Beck',
u'Copyright (c) 1989-2002 by Brian V. Smith',
u'Copyright (c) 1991 by Paul King',
u'Copyright (c) 1995 C. Blanc and C. Schlick',
u'Copyright (c) 1993 Anthony Starks',
u'Copyright (c) 1992 Uri Blumenthal',
u'Copyright (c) 1992 by Brian Boyter',
u'Copyright (c) 1995 Dane Dwyer',
u'Copyright (c) 1999 by Philippe Bekaert',
u'Copyright (c) 1999 by T. Sato',
u'Copyright (c) 1998 by Mike Markowski',
u'Copyright (c) 1994-2002 by Thomas Merz',
u'Copyright (c) 2002-2006 by Martin Kroeker',
u'Copyright 1990, David Koblas',
u'Copyright, 1987, Massachusetts Institute of Technology',
u'Copyright (c) 2006 Michael Pfeiffer p3fff@web.de',
]
check_detection(expected, test_file)
def test_copyright_treetablemodeladapter_java(self):
test_file = self.get_test_loc('copyrights/copyright_treetablemodeladapter_java-TreeTableModelAdapter_java.java')
expected = [
u'Copyright 1997, 1998 by Sun Microsystems, Inc.',
]
check_detection(expected, test_file)
def test_copyright_truncated_dmv_c(self):
test_file = self.get_test_loc('copyrights/copyright_truncated_dmv_c-9_c.c')
expected = [
u'Copyright (c) 1995 DMV - DigiMedia Vision',
]
check_detection(expected, test_file)
def test_copyright_truncated_doe(self):
test_file = self.get_test_loc('copyrights/copyright_truncated_doe-c.c')
expected = [
u'Copyright (c) 2008 by John Doe',
]
check_detection(expected, test_file)
def test_copyright_truncated_inria(self):
test_file = self.get_test_loc('copyrights/copyright_truncated_inria.txt')
expected = [
u'(c) 1998-2000 (W3C) MIT, INRIA, Keio University',
]
check_detection(expected, test_file)
def test_copyright_truncated_rusty(self):
test_file = self.get_test_loc('copyrights/copyright_truncated_rusty-c.c')
expected = [
u'(c) 1999-2001 Paul Rusty Russell',
]
check_detection(expected, test_file)
def test_copyright_truncated_swfobject_js(self):
test_file = self.get_test_loc('copyrights/copyright_truncated_swfobject_js-swfobject_js.js')
expected = [
u'Copyright (c) 2007-2008 Geoff Stearns, Michael Williams, and Bobby van der Sluis',
]
check_detection(expected, test_file)
def test_copyright_ttf_malayalam_fonts_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_ttf_malayalam_fonts_copyright-ttf_malayalam_fonts_copyright.copyright')
expected = [
u'Copyright (c) Jeroen Hellingman <jehe@kabelfoon.nl> , N.V Shaji <nvshaji@yahoo.com>',
u'Copyright (c) 2004 Kevin',
u'Copyright (c) Suresh',
u'Copyright (c) 2007 Hiran Venugopalan',
u'Copyright (c) 2007 Hussain',
u'Copyright (c) 2005 Rachana Akshara Vedi',
u'Copyright (c) CDAC, Mumbai Font Design',
u'Copyright (c) 2003 Modular Infotech, Pune',
u'Copyright (c) 2006 Modular Infotech Pvt Ltd.',
u'Copyright (c) 2009 Red Hat, Inc.',
]
check_detection(expected, test_file)
def test_copyright_tunnel_h(self):
test_file = self.get_test_loc('copyrights/copyright_tunnel_h-tunnel_h.h')
expected = [
u'Copyright (c) 2000 Frank Strauss <strauss@ibr.cs.tu-bs.de>',
]
check_detection(expected, test_file)
def test_copyright_two_digits_years(self):
test_file = self.get_test_loc('copyrights/copyright_two_digits_years-digits_c.c')
expected = [
'Copyright (c) 1987,88,89,90,91,92,93,94,96,97 Free Software Foundation, Inc.',
]
check_detection(expected, test_file)
@expectedFailure
def test_copyright_url_in_html(self):
test_file = self.get_test_loc('copyrights/copyright_url_in_html-detail_9_html.html')
expected = [
'(c) 2004-2009 pudn.com',
]
check_detection(expected, test_file)
def test_copyright_utilities_js(self):
test_file = self.get_test_loc('copyrights/copyright_utilities_js-utilities_js.js')
expected = [
u'Copyright (c) 2009, Yahoo! Inc.',
u'Copyright 2001 Robert Penner',
]
check_detection(expected, test_file)
def test_copyright_var_route_c(self):
test_file = self.get_test_loc('copyrights/copyright_var_route_c-var_route_c.c')
expected = [
u'Copyright 1988, 1989 by Carnegie Mellon University',
u'Copyright 1989 TGV, Incorporated',
u'Erik Schoenfelder (schoenfr@ibr.cs.tu-bs.de) 1994/1995.',
u'Simon Leinen (simon@switch.ch) 1997',
]
check_detection(expected, test_file)
def test_copyright_view_layout2_xml(self):
test_file = self.get_test_loc('copyrights/copyright_view_layout2_xml-view_layout_xml.xml')
expected = [
u'Copyright (c) 2008 Esmertec AG.',
]
check_detection(expected, test_file)
def test_copyright_warning_parsing_empty_text(self):
test_file = self.get_test_loc('copyrights/copyright_warning_parsing_empty_text-controlpanel_anjuta.anjuta')
expected = []
check_detection(expected, test_file)
def test_copyright_web_app_dtd__b_sun(self):
test_file = self.get_test_loc('copyrights/copyright_web_app_dtd_b_sun-web_app__dtd.dtd')
expected = [
u'Copyright 2000-2007 Sun Microsystems, Inc.',
]
check_detection(expected, test_file)
def test_copyright_web_app_dtd_sun_twice(self):
test_file = self.get_test_loc('copyrights/copyright_web_app_dtd_sun_twice-web_app__b_dtd.dtd')
expected = [
u'Copyright (c) 2000 Sun Microsystems, Inc.',
u'Copyright (c) 2000 Sun Microsystems, Inc.',
]
check_detection(expected, test_file)
def test_copyright_wide_c(self):
test_file = self.get_test_loc('copyrights/copyright_wide_c-c.c')
expected = [
u'Copyright (c) 1995, 1996, 1997, and 1998 WIDE Project.',
]
check_detection(expected, test_file)
def test_copyright_wide_txt(self):
test_file = self.get_test_loc('copyrights/copyright_wide_txt.txt')
expected = [
u'Copyright (c) 1995, 1996, 1997, and 1998 WIDE Project.',
]
check_detection(expected, test_file)
def test_copyright_with_verbatim_lf(self):
test_file = self.get_test_loc('copyrights/copyright_with_verbatim_lf-verbatim_lf_c.c')
expected = [
u'Copyright 2003-2005 Colin Percival',
]
check_detection(expected, test_file)
def test_copyright_xconsortium_sh(self):
test_file = self.get_test_loc('copyrights/copyright_xconsortium_sh-9_sh.sh')
expected = [
u'Copyright (c) 1994 X Consortium',
]
check_detection(expected, test_file)
def test_copyright_xfonts_utils_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_xfonts_utils_copyright-xfonts_utils_copyright.copyright')
expected = [
u'Copyright 1991, 1993, 1998 The Open Group',
u'Copyright 2005 Red Hat, Inc.',
u'Copyright 2005 Red Hat, Inc',
u'Copyright (c) 1991-2003 Unicode, Inc.',
u'Copyright (c) 2003 The NetBSD Foundation, Inc.',
u'Copyright (c) 2006 Martin Husemann.',
u'Copyright (c) 2007 Joerg Sonnenberger.',
u'Copyright (c) 2002-2008 by Juliusz Chroboczek',
u'Copyright (c) 1987, 1993 The Regents of the University of California.',
u'Copyright 1993, 1994, 1998 The Open Group',
u'Copyright (c) 2002-2008 by Juliusz Chroboczek',
u'Copyright 1999, 2001, 2002, 2004 Branden Robinson',
u'Copyright 2006 Steve Langasek',
u'Copyright 1999, 2001, 2002, 2004 Branden Robinson',
u'Copyright 1999-2002, 2004 Branden Robinson',
u'Copyright 2006 Steve Langasek',
]
check_detection(expected, test_file)
def test_copyright_xresprobe_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_xresprobe_copyright_label-xresprobe_copyright_label.label')
expected = [
u'copyright (c) 2004 Canonical Software',
u'Copyright (c) 2002 Terra Soft Solutions, Inc.',
u'Copyright (c) 1998 by Josh Vanderhoof',
u'Copyright (c) 1996-1999 SciTech Software, Inc.',
u'copyright (c) David Mosberger-Tang',
u'Copyright (c) 1999 Egbert Eich',
]
check_detection(expected, test_file)
def test_copyright_xsane_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_xsane_copyright_label-xsane_copyright_label.label')
expected = [
u'Copyright (c) 1998-2005 Oliver Rauch',
]
check_detection(expected, test_file)
def test_copyright_does_not_return_junk_in_pdf(self):
# from https://github.com/ttgurney/yocto-spdx/blob/master/doc/Yocto-SPDX_Manual_Install_Walkthrough.pdf
test_file = self.get_test_loc('copyrights/copyright_Yocto-SPDX.pdf')
expected = [
]
check_detection(expected, test_file)
def test_copyright_name_and_co(self):
test_file = self.get_test_loc('copyrights/copyright_nnp_and_co.txt')
expected = [
u'Copyright (c) 2001, Sandra and Klaus Rennecke.',
]
check_detection(expected, test_file)
def test_copyright_with_ascii_art(self):
test_file = self.get_test_loc('copyrights/copyright_with_ascii_art.txt')
expected = [
u'Copyright (c) 1996. The Regents of the University of California.',
]
check_detection(expected, test_file)
@expectedFailure
def test_copyright_should_not_be_detected_in_pixel_data_stream(self):
test_file = self.get_test_loc('copyrights/copyright_pixelstream.rgb')
expected = []
check_detection(expected, test_file)
def test_copyright_should_not_contain_leading_or_trailing_colon(self):
test_file = self.get_test_loc('copyrights/copyright_with_colon')
expected = ['copyright (c) 2013 by Armin Ronacher.']
check_detection(expected, test_file)
@expectedFailure
def test_copyright_in_markup_should_not_be_truncated(self):
test_file = self.get_test_loc('copyrights/copyright_in_html.html')
expected = [u'(c) Copyright 2010 by the WTForms Team']
check_detection(expected, test_file)
|
vinodpanicker/scancode-toolkit
|
tests/cluecode/test_copyrights.py
|
Python
|
apache-2.0
| 180,672
|
[
"Brian",
"VisIt"
] |
2fd6fcbe4cefc1e8ebb0f068357d3269606bf3487c6d1e3dc493767eea371ed0
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
GRO topology parser
===================
Read a list of atoms from a GROMOS/Gromacs GRO coordinate file to
build a basic topology.
Atom types and masses are guessed.
See Also
--------
:mod:`MDAnalysis.coordinates.GRO`
Classes
-------
.. autoclass:: GROParser
:members:
:inherited-members:
"""
import numpy as np
from ..lib.util import openany
from ..core.topologyattrs import (
Atomnames,
Atomtypes,
Atomids,
Masses,
Resids,
Resnames,
Resnums,
Segids,
)
from ..core.topology import Topology
from .base import TopologyReaderBase, change_squash
from . import guessers
class GROParser(TopologyReaderBase):
"""Reads a Gromacs GRO file
Reads the following attributes:
- resids
- resnames
- atomids
- atomnames
Guesses the following attributes
- atomtypes
- masses
"""
format = 'GRO'
def parse(self, **kwargs):
"""Return the *Topology* object for this file"""
# Gro has the following columns
# resid, resname, name, index, (x,y,z)
with openany(self.filename) as inf:
next(inf)
n_atoms = int(next(inf))
# Allocate shizznizz
resids = np.zeros(n_atoms, dtype=np.int32)
resnames = np.zeros(n_atoms, dtype=object)
names = np.zeros(n_atoms, dtype=object)
indices = np.zeros(n_atoms, dtype=np.int32)
for i, line in enumerate(inf):
if i == n_atoms:
break
try:
resids[i] = int(line[:5])
resnames[i] = line[5:10].strip()
names[i] = line[10:15].strip()
indices[i] = int(line[15:20])
except (ValueError, TypeError):
errmsg = (
f"Couldn't read the following line of the .gro file:\n"
f"{line}")
raise IOError(errmsg) from None
# Check all lines had names
if not np.all(names):
missing = np.where(names == '')
raise IOError("Missing atom name on line: {0}"
"".format(missing[0][0] + 3)) # 2 header, 1 based
# Fix wrapping of resids (if we ever saw a wrap)
if np.any(resids == 0):
# find places where resid hit zero again
wraps = np.where(resids == 0)[0]
# group these places together:
# find indices of first 0 in each block of zeroes
# 1) find large changes in index, (ie non sequential blocks)
diff = np.diff(wraps) != 1
# 2) make array of where 0-blocks start
starts = np.hstack([wraps[0], wraps[1:][diff]])
# remove 0 in starts, ie the first residue **can** be 0
if starts[0] == 0:
starts = starts[1:]
# for each resid after a wrap, add 100k (5 digit wrap)
for s in starts:
resids[s:] += 100000
# Guess types and masses
atomtypes = guessers.guess_types(names)
masses = guessers.guess_masses(atomtypes)
residx, (new_resids, new_resnames) = change_squash(
(resids, resnames), (resids, resnames))
# new_resids is len(residues)
# so resindex 0 has resid new_resids[0]
attrs = [
Atomnames(names),
Atomids(indices),
Atomtypes(atomtypes, guessed=True),
Resids(new_resids),
Resnums(new_resids.copy()),
Resnames(new_resnames),
Masses(masses, guessed=True),
Segids(np.array(['SYSTEM'], dtype=object))
]
top = Topology(n_atoms=n_atoms, n_res=len(new_resids), n_seg=1,
attrs=attrs,
atom_resindex=residx,
residue_segindex=None)
return top
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/topology/GROParser.py
|
Python
|
gpl-2.0
| 4,984
|
[
"GROMOS",
"Gromacs",
"MDAnalysis"
] |
c0d8d8222907cdae0dd2c8396a5c79cdbbb454dce5260de1325ad4c3a9affe68
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from PyQt4 import QtGui
from openlp.core.lib import SpellTextEdit, build_icon, translate
from openlp.core.lib.ui import UiStrings, create_button_box
from openlp.plugins.songs.lib import VerseType
class Ui_EditVerseDialog(object):
def setupUi(self, edit_verse_dialog):
edit_verse_dialog.setObjectName('edit_verse_dialog')
edit_verse_dialog.resize(400, 400)
edit_verse_dialog.setModal(True)
self.dialog_layout = QtGui.QVBoxLayout(edit_verse_dialog)
self.dialog_layout.setObjectName('dialog_layout')
self.verse_text_edit = SpellTextEdit(edit_verse_dialog)
self.verse_text_edit.setObjectName('verse_text_edit')
self.dialog_layout.addWidget(self.verse_text_edit)
self.verse_type_layout = QtGui.QHBoxLayout()
self.verse_type_layout.setObjectName('verse_type_layout')
self.split_button = QtGui.QPushButton(edit_verse_dialog)
self.split_button.setIcon(build_icon(':/general/general_add.png'))
self.split_button.setObjectName('split_button')
self.verse_type_layout.addWidget(self.split_button)
self.verse_type_label = QtGui.QLabel(edit_verse_dialog)
self.verse_type_label.setObjectName('verse_type_label')
self.verse_type_layout.addWidget(self.verse_type_label)
self.verse_type_combo_box = QtGui.QComboBox(edit_verse_dialog)
self.verse_type_combo_box.addItems(['', '', '', '', '', '', ''])
self.verse_type_combo_box.setObjectName('verse_type_combo_box')
self.verse_type_label.setBuddy(self.verse_type_combo_box)
self.verse_type_layout.addWidget(self.verse_type_combo_box)
self.verse_number_box = QtGui.QSpinBox(edit_verse_dialog)
self.verse_number_box.setMinimum(1)
self.verse_number_box.setObjectName('verse_number_box')
self.verse_type_layout.addWidget(self.verse_number_box)
self.insert_button = QtGui.QPushButton(edit_verse_dialog)
self.insert_button.setIcon(build_icon(':/general/general_add.png'))
self.insert_button.setObjectName('insert_button')
self.verse_type_layout.addWidget(self.insert_button)
self.verse_type_layout.addStretch()
self.dialog_layout.addLayout(self.verse_type_layout)
self.button_box = create_button_box(edit_verse_dialog, 'button_box', ['cancel', 'ok'])
self.dialog_layout.addWidget(self.button_box)
self.retranslateUi(edit_verse_dialog)
def retranslateUi(self, edit_verse_dialog):
edit_verse_dialog.setWindowTitle(translate('SongsPlugin.EditVerseForm', 'Edit Verse'))
self.verse_type_label.setText(translate('SongsPlugin.EditVerseForm', '&Verse type:'))
self.verse_type_combo_box.setItemText(VerseType.Verse, VerseType.translated_names[VerseType.Verse])
self.verse_type_combo_box.setItemText(VerseType.Chorus, VerseType.translated_names[VerseType.Chorus])
self.verse_type_combo_box.setItemText(VerseType.Bridge, VerseType.translated_names[VerseType.Bridge])
self.verse_type_combo_box.setItemText(VerseType.PreChorus, VerseType.translated_names[VerseType.PreChorus])
self.verse_type_combo_box.setItemText(VerseType.Intro, VerseType.translated_names[VerseType.Intro])
self.verse_type_combo_box.setItemText(VerseType.Ending, VerseType.translated_names[VerseType.Ending])
self.verse_type_combo_box.setItemText(VerseType.Other, VerseType.translated_names[VerseType.Other])
self.split_button.setText(UiStrings().Split)
self.split_button.setToolTip(UiStrings().SplitToolTip)
self.insert_button.setText(translate('SongsPlugin.EditVerseForm', '&Insert'))
self.insert_button.setToolTip(translate('SongsPlugin.EditVerseForm',
'Split a slide into two by inserting a verse splitter.'))
|
marmyshev/item_title
|
openlp/plugins/songs/forms/editversedialog.py
|
Python
|
gpl-2.0
| 5,909
|
[
"Brian"
] |
0113fe3bfbb50fa5874851549d8514d066d56da892ce2d816b6690b5bb822681
|
#!/usr/bin/env python
import numpy as np
from horton import *
# Load the Gaussian output from file
fn_fchk = context.get_fn('test/water_sto3g_hf_g03.fchk')
# Replace the previous line with any other fchk file, e.g. fn_fchk = 'yourfile.fchk'.
mol = IOData.from_file(fn_fchk)
# Partition the density with the Becke scheme
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, mode='only')
moldens = mol.obasis.compute_grid_density_dm(mol.get_dm_full(), grid.points)
wpart = BeckeWPart(mol.coordinates, mol.numbers, mol.pseudo_numbers, grid, moldens, local=True)
wpart.do_charges()
# Write the result to a file
np.savetxt('charges.txt', wpart['charges'])
|
eustislab/horton
|
data/examples/wpart/becke.py
|
Python
|
gpl-3.0
| 672
|
[
"Gaussian"
] |
eeeb897504fbcdb9e0227f6a25a09fd9f5d17c995ec1e137328eb975b483a0d5
|
import os,csv
folder = "/home8/kanika/moose/mat_files/"
output = '/home8/kanika/petsc2/src/ksp/ksp/examples/tutorials/moose_features/tutorials/properties_script.sh'
target = open(output,'w')
target.write("clear \n")
feat_file = open('/home8/kanika/petsc2/src/ksp/ksp/examples/tutorials/moose_features/tutorials/properties_results.csv','w')
#write the header to the new decoupled file before writing the feature values
feat_file.write("MinNonzerosPerRow" + "," + "RowVariance" + "," + "ColumnVariance" + "," +
"DiagonalVariance" + ","+ "Nonzeros " + ","+ "Dimension" + ","+ "FrobeniusNorm " + ","+
"SymmetricFrobeniusNorm " + ","+ "AntiSymmetricFrobeniusNorm" + "," + "OneNorm" + "," +
"InfinityNorm" + ","+ " SymmetricInfinityNorm" + ","+ "AntiSymmetricInfinityNorm" + ","+
"MaxNonzerosPerRow " + ","+ "Trace " + ","+ " AbsoluteTrace" + ","+ "MinNonzerosPerRow" +
","+ "AvgNonzerosPerRow" + ","+ "DummyRows" + ","+ "DummyRowsKind" + ","+ "NumericValueSymmetryV1" +
","+ "NonZeroPatternSymmetryV1" + "," + "NumericValueSymmetryV2" + ","+ "NonZeroPatternSymmetryV2" +
","+ "RowDiagonalDominance " + ","+ "ColumnDiagonalDominance " + ","+ "DiagonalAverage" + ","+
"DiagonalSign " + ","+ "DiagonalNonZeros " + ","+ "lowerBandwidth " + ","+ "upperBandwidth " +
","+ "MatrixSymmetric " + "," +"matrix"+ "\n")
for mat_file in os.listdir(folder):
target.write("./properties_moose -f " + folder + mat_file + " >> properties_results.csv" + "\n")
target.write("exit 0")
|
LighthouseHPC/lighthouse
|
sandbox/petsc/moose-petsc-features/compute_32_feat.py
|
Python
|
mit
| 1,480
|
[
"MOOSE"
] |
2981632c5a1ad025dcce0dc9ce68823e80c8efd7b21afb23952db2de895fcfb3
|
from typing import List, Dict
import numpy
from allennlp.common.util import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import FlagField, TextField, SequenceLabelField
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
@Predictor.register("sentence_tagger")
class SentenceTaggerPredictor(Predictor):
"""
Predictor for any model that takes in a sentence and returns
a single set of tags for it. In particular, it can be used with
the [`CrfTagger`](https://docs.allennlp.org/models/main/models/tagging/models/crf_tagger/)
model and also the [`SimpleTagger`](../models/simple_tagger.md) model.
Registered as a `Predictor` with name "sentence_tagger".
"""
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
self._tokenizer = SpacyTokenizer(language=language)
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"sentence": "..."}`.
Runs the underlying model, and adds the `"words"` to the output.
"""
sentence = json_dict["sentence"]
tokens = self._tokenizer.tokenize(sentence)
return self._dataset_reader.text_to_instance(tokens)
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
"""
This function currently only handles BIOUL tags.
Imagine an NER model predicts three named entities (each one with potentially
multiple tokens). For each individual entity, we create a new Instance that has
the label set to only that entity and the rest of the tokens are labeled as outside.
We then return a list of those Instances.
For example:
```text
Mary went to Seattle to visit Microsoft Research
U-Per O O U-Loc O O B-Org L-Org
```
We create three instances.
```text
Mary went to Seattle to visit Microsoft Research
U-Per O O O O O O O
Mary went to Seattle to visit Microsoft Research
O O O U-LOC O O O O
Mary went to Seattle to visit Microsoft Research
O O O O O O B-Org L-Org
```
We additionally add a flag to these instances to tell the model to only compute loss on
non-O tags, so that we get gradients that are specific to the particular span prediction
that each instance represents.
"""
predicted_tags = outputs["tags"]
predicted_spans = []
i = 0
while i < len(predicted_tags):
tag = predicted_tags[i]
# if its a U, add it to the list
if tag[0] == "U":
current_tags = [t if idx == i else "O" for idx, t in enumerate(predicted_tags)]
predicted_spans.append(current_tags)
# if its a B, keep going until you hit an L.
elif tag[0] == "B":
begin_idx = i
while tag[0] != "L":
i += 1
tag = predicted_tags[i]
end_idx = i
current_tags = [
t if begin_idx <= idx <= end_idx else "O"
for idx, t in enumerate(predicted_tags)
]
predicted_spans.append(current_tags)
i += 1
# Creates a new instance for each contiguous tag
instances = []
for labels in predicted_spans:
new_instance = instance.duplicate()
text_field: TextField = instance["tokens"] # type: ignore
new_instance.add_field(
"tags", SequenceLabelField(labels, text_field), self._model.vocab
)
new_instance.add_field("ignore_loss_on_o_tags", FlagField(True))
instances.append(new_instance)
return instances
|
allenai/allennlp
|
allennlp/predictors/sentence_tagger.py
|
Python
|
apache-2.0
| 4,294
|
[
"VisIt"
] |
f22e151003fbb1dc3226653ab77b588ff59f93dc01e930feaef21c2440ecc391
|
"""Unit test for util.py"""
import pysal
from pysal.common import *
import pysal.weights
import numpy as np
from scipy import sparse, float32
from scipy.spatial import KDTree
import os
import gc
class Testutil(unittest.TestCase):
def setUp(self):
self.w = pysal.rook_from_shapefile(
pysal.examples.get_path('10740.shp'))
def test_lat2W(self):
w9 = pysal.lat2W(3, 3)
self.assertEquals(w9.pct_nonzero, 29.62962962962963)
self.assertEquals(w9[0], {1: 1.0, 3: 1.0})
self.assertEquals(w9[3], {0: 1.0, 4: 1.0, 6: 1.0})
def test_lat2SW(self):
w9 = pysal.weights.lat2SW(3, 3)
rows, cols = w9.shape
n = rows * cols
pct_nonzero = w9.nnz / float(n)
self.assertEquals(pct_nonzero, 0.29629629629629628)
data = w9.todense().tolist()
self.assertEquals(data[0], [0, 1, 0, 1, 0, 0, 0, 0, 0])
self.assertEquals(data[1], [1, 0, 1, 0, 1, 0, 0, 0, 0])
self.assertEquals(data[2], [0, 1, 0, 0, 0, 1, 0, 0, 0])
self.assertEquals(data[3], [1, 0, 0, 0, 1, 0, 1, 0, 0])
self.assertEquals(data[4], [0, 1, 0, 1, 0, 1, 0, 1, 0])
self.assertEquals(data[5], [0, 0, 1, 0, 1, 0, 0, 0, 1])
self.assertEquals(data[6], [0, 0, 0, 1, 0, 0, 0, 1, 0])
self.assertEquals(data[7], [0, 0, 0, 0, 1, 0, 1, 0, 1])
self.assertEquals(data[8], [0, 0, 0, 0, 0, 1, 0, 1, 0])
def test_block_weights(self):
regimes = np.ones(25)
regimes[range(10, 20)] = 2
regimes[range(21, 25)] = 3
regimes = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 3., 3.,
3., 3.])
w = pysal.block_weights(regimes)
ww0 = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
self.assertEquals(w.weights[0], ww0)
wn0 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 20]
self.assertEquals(w.neighbors[0], wn0)
regimes = ['n', 'n', 's', 's', 'e', 'e', 'w', 'w', 'e']
n = len(regimes)
w = pysal.block_weights(regimes)
wn = {0: [1], 1: [0], 2: [3], 3: [2], 4: [5, 8], 5: [4, 8],
6: [7], 7: [6], 8: [4, 5]}
self.assertEquals(w.neighbors, wn)
def test_comb(self):
x = range(4)
l = []
for i in pysal.comb(x, 2):
l.append(i)
lo = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
self.assertEquals(l, lo)
def test_order(self):
w3 = pysal.order(self.w, kmax=3)
w3105 = [1, -1, 1, 2, 1]
self.assertEquals(w3105, w3[1][0:5])
def test_higher_order(self):
w10 = pysal.lat2W(10, 10)
w10_2 = pysal.higher_order(w10, 2)
w10_20 = {2: 1.0, 11: 1.0, 20: 1.0}
self.assertEquals(w10_20, w10_2[0])
w5 = pysal.lat2W()
w50 = {1: 1.0, 5: 1.0}
self.assertEquals(w50, w5[0])
w51 = {0: 1.0, 2: 1.0, 6: 1.0}
self.assertEquals(w51, w5[1])
w5_2 = pysal.higher_order(w5, 2)
w5_20 = {2: 1.0, 10: 1.0, 6: 1.0}
self.assertEquals(w5_20, w5_2[0])
def test_shimbel(self):
w5 = pysal.lat2W()
w5_shimbel = pysal.shimbel(w5)
w5_shimbel024 = 8
self.assertEquals(w5_shimbel024, w5_shimbel[0][24])
w5_shimbel004 = [-1, 1, 2, 3]
self.assertEquals(w5_shimbel004, w5_shimbel[0][0:4])
def test_full(self):
neighbors = {'first': ['second'], 'second': ['first',
'third'], 'third': ['second']}
weights = {'first': [1], 'second': [1, 1], 'third': [1]}
w = pysal.W(neighbors, weights)
wf, ids = pysal.full(w)
wfo = np.array([[0., 1., 0.], [1., 0., 1.], [0., 1., 0.]])
np.testing.assert_array_almost_equal(wfo, wf, decimal=8)
idso = ['first', 'second', 'third']
self.assertEquals(idso, ids)
def test_full2W(self):
a = np.zeros((4, 4))
for i in range(len(a)):
for j in range(len(a[i])):
if i != j:
a[i, j] = np.random.random(1)
w = pysal.weights.util.full2W(a)
np.testing.assert_array_equal(w.full()[0], a)
ids = ['myID0', 'myID1', 'myID2', 'myID3']
w = pysal.weights.util.full2W(a, ids=ids)
np.testing.assert_array_equal(w.full()[0], a)
w.full()[0] == a
def test_WSP2W(self):
sp = pysal.weights.lat2SW(2, 5)
wsp = pysal.weights.WSP(sp)
w = pysal.weights.WSP2W(wsp)
self.assertEquals(w.n, 10)
self.assertEquals(w[0], {1: 1, 5: 1})
w = pysal.open(pysal.examples.get_path('sids2.gal'), 'r').read()
wsp = pysal.weights.WSP(w.sparse, w.id_order)
w = pysal.weights.WSP2W(wsp)
self.assertEquals(w.n, 100)
self.assertEquals(w['37135'], {'37001': 1.0, '37033': 1.0,
'37037': 1.0, '37063': 1.0, '37145': 1.0})
def test_insert_diagonal(self):
w1 = pysal.weights.insert_diagonal(self.w)
r1 = {0: 1.0, 1: 1.0, 4: 1.0, 101: 1.0, 85: 1.0, 5: 1.0}
self.assertEquals(w1[0], r1)
w1 = pysal.weights.insert_diagonal(self.w, 20)
r1 = {0: 20, 1: 1.0, 4: 1.0, 101: 1.0, 85: 1.0, 5: 1.0}
self.assertEquals(w1[0], r1)
diag = np.arange(100, 100 + self.w.n)
w1 = pysal.weights.insert_diagonal(self.w, diag)
r1 = {0: 100, 1: 1.0, 4: 1.0, 101: 1.0, 85: 1.0, 5: 1.0}
self.assertEquals(w1[0], r1)
def test_remap_ids(self):
w = pysal.lat2W(3, 2)
wid_order = [0, 1, 2, 3, 4, 5]
self.assertEquals(wid_order, w.id_order)
wneighbors0 = [2, 1]
self.assertEquals(wneighbors0, w.neighbors[0])
old_to_new = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f'}
w_new = pysal.remap_ids(w, old_to_new)
w_newid_order = ['a', 'b', 'c', 'd', 'e', 'f']
self.assertEquals(w_newid_order, w_new.id_order)
w_newdneighborsa = ['c', 'b']
self.assertEquals(w_newdneighborsa, w_new.neighbors['a'])
def test_get_ids(self):
polyids = pysal.weights.util.get_ids(
pysal.examples.get_path('columbus.shp'), "POLYID")
polyids5 = [1, 2, 3, 4, 5]
self.assertEquals(polyids5, polyids[:5])
def test_get_points_array_from_shapefile(self):
xy = pysal.weights.util.get_points_array_from_shapefile(
pysal.examples.get_path('juvenile.shp'))
xy3 = np.array([[94., 93.], [80., 95.], [79., 90.]])
np.testing.assert_array_almost_equal(xy3, xy[:3], decimal=8)
xy = pysal.weights.util.get_points_array_from_shapefile(
pysal.examples.get_path('columbus.shp'))
xy3 = np.array([[8.82721847, 14.36907602], [8.33265837,
14.03162401], [9.01226541, 13.81971908]])
np.testing.assert_array_almost_equal(xy3, xy[:3], decimal=8)
def test_min_threshold_distance(self):
x, y = np.indices((5, 5))
x.shape = (25, 1)
y.shape = (25, 1)
data = np.hstack([x, y])
mint = 1.0
self.assertEquals(
mint, pysal.weights.util.min_threshold_distance(data))
suite = unittest.TestLoader().loadTestsFromTestCase(Testutil)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
|
spreg-git/pysal
|
pysal/weights/tests/test_util.py
|
Python
|
bsd-3-clause
| 7,381
|
[
"COLUMBUS"
] |
c841ba4b1532b16bde65e0488d0face386ab3e9df0af18ce2bc74a099601598f
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from functools import wraps
import numpy as np
import sympy
from sympy import lambdify
import warnings
from hyperspy.component import Component
from hyperspy.docstrings.parameters import FUNCTION_ND_DOCSTRING
_CLASS_DOC = \
"""%s component (created with Expression).
.. math::
f(x) = %s
"""
def _fill_function_args(fn):
@wraps(fn)
def fn_wrapped(self, x):
return fn(x, *[p.value for p in self.parameters])
return fn_wrapped
def _fill_function_args_2d(fn):
@wraps(fn)
def fn_wrapped(self, x, y):
return fn(x, y, *[p.value for p in self.parameters])
return fn_wrapped
def _parse_substitutions(string):
splits = map(str.strip, string.split(';'))
expr = sympy.sympify(next(splits))
# We substitute one by one manually, as passing all at the same time does
# not work as we want (subsitutions inside other substitutions do not work)
for sub in splits:
t = tuple(map(str.strip, sub.split('=')))
expr = expr.subs(t[0], sympy.sympify(t[1]))
return expr
class Expression(Component):
"""Create a component from a string expression.
"""
def __init__(self, expression, name, position=None, module="numpy",
autodoc=True, add_rotation=False, rotation_center=None,
rename_pars={}, compute_gradients=True, **kwargs):
"""Create a component from a string expression.
It automatically generates the partial derivatives and the
class docstring.
Parameters`
----------
expression : str
Component function in SymPy text expression format with
substitutions separated by `;`. See examples and the SymPy
documentation for details. The only additional constraint is that
the variable(s) must be `x` (for 1D components); or `x` and `y` for
2D components. Also, if `module` is "numexpr" the
functions are limited to those that numexpr support. See its
documentation for details.
name : str
Name of the component.
position : str, optional
The parameter name that defines the position of the component if
applicable. It enables interative adjustment of the position of the
component in the model. For 2D components, a tuple must be passed
with the name of the two parameters e.g. `("x0", "y0")`.
module : {"numpy", "numexpr"}, default "numpy"
Module used to evaluate the function. numexpr is often faster but
it supports fewer functions and requires installing numexpr.
add_rotation: bool, default False
This is only relevant for 2D components. If `True` it automatically
adds `rotation_angle` parameter.
rotation_center : {None, tuple}
If None, the rotation center is the center i.e. (0, 0) if `position`
is not defined, otherwise the center is the coordinates specified
by `position`. Alternatively a tuple with the (x, y) coordinates
of the center can be provided.
rename_pars: dictionary
The desired name of a parameter may sometimes coincide with e.g.
the name of a scientific function, what prevents using it in the
`expression`. `rename_parameters` is a dictionary to map the name
of the parameter in the `expression`` to the desired name of the
parameter in the `Component`. For example: {"_gamma": "gamma"}.
compute_gradients : bool, optional
If `True`, compute the gradient automatically using sympy. If sympy
does not support the calculation of the partial derivatives, for
example in case of expression containing a "where" condition,
it can be disabled by using `compute_gradients=False`.
**kwargs
Keyword arguments can be used to initialise the value of the
parameters.
Methods
-------
recompile: useful to recompile the function and gradient with a
a different module.
Note
----
As of version 1.4, Sympy's lambdify function—that the ``Expression`` components uses internally,
does not support the differentiation of some expressions, for example those
containing a "where" condition. In such cases, the gradients can be set manually if required.
Examples
--------
The following creates a Gaussian component and set the initial value
of the parameters:
>>> hs.model.components1D.Expression(
... expression="height * exp(-(x - x0) ** 2 * 4 * log(2)/ fwhm ** 2)",
... name="Gaussian",
... height=1,
... fwhm=1,
... x0=0,
... position="x0",)
Substitutions for long or complicated expressions are separated by
semicolumns:
>>> expr = 'A*B/(A+B) ; A = sin(x)+one; B = cos(y) - two; y = tan(x)'
>>> comp = hs.model.components1D.Expression(
... expression=expr,
... name='my function')
>>> comp.parameters
(<Parameter one of my function component>,
<Parameter two of my function component>)
"""
self._add_rotation = add_rotation
self._str_expression = expression
self._rename_pars = rename_pars
self._compute_gradients = compute_gradients
if rotation_center is None:
self.compile_function(module=module, position=position)
else:
self.compile_function(module=module, position=rotation_center)
# Initialise component
Component.__init__(self, self._parameter_strings)
# When creating components using Expression (for example GaussianHF)
# we shouldn't add anything else to the _whitelist as the
# component should be initizialized with its own kwargs.
# An exception is "module"
self._whitelist['module'] = ('init', module)
if self.__class__ is Expression:
self._whitelist['expression'] = ('init', expression)
self._whitelist['name'] = ('init', name)
self._whitelist['position'] = ('init', position)
self._whitelist['rename_pars'] = ('init', rename_pars)
self._whitelist['compute_gradients'] = ('init', compute_gradients)
if self._is2D:
self._whitelist['add_rotation'] = ('init', self._add_rotation)
self._whitelist['rotation_center'] = ('init', rotation_center)
self.name = name
# Set the position parameter
if position:
if self._is2D:
self._position_x = getattr(self, position[0])
self._position_y = getattr(self, position[1])
else:
self._position = getattr(self, position)
# Set the initial value of the parameters
if kwargs:
for kwarg, value in kwargs.items():
setattr(getattr(self, kwarg), 'value', value)
if autodoc:
self.__doc__ = _CLASS_DOC % (
name, sympy.latex(_parse_substitutions(expression)))
def compile_function(self, module="numpy", position=False):
expr = _parse_substitutions(self._str_expression)
# Extract x
x, = [symbol for symbol in expr.free_symbols if symbol.name == "x"]
# Extract y
y = [symbol for symbol in expr.free_symbols if symbol.name == "y"]
self._is2D = True if y else False
if self._is2D:
y = y[0]
if self._is2D and self._add_rotation:
position = position or (0, 0)
rotx = sympy.sympify(
"{0} + (x - {0}) * cos(rotation_angle) - (y - {1}) *"
" sin(rotation_angle)"
.format(*position))
roty = sympy.sympify(
"{1} + (x - {0}) * sin(rotation_angle) + (y - {1}) *"
"cos(rotation_angle)"
.format(*position))
expr = expr.subs({"x": rotx, "y": roty}, simultaneous=False)
rvars = sympy.symbols([s.name for s in expr.free_symbols], real=True)
real_expr = expr.subs(
{orig: real_ for (orig, real_) in zip(expr.free_symbols, rvars)})
# just replace with the assumption that all our variables are real
expr = real_expr
eval_expr = expr.evalf()
# Extract parameters
variables = ("x", "y") if self._is2D else ("x", )
parameters = [
symbol for symbol in expr.free_symbols
if symbol.name not in variables]
parameters.sort(key=lambda x: x.name) # to have a reliable order
# Create compiled function
variables = [x, y] if self._is2D else [x]
self._f = lambdify(variables + parameters, eval_expr,
modules=module, dummify=False)
if self._is2D:
def f(x, y): return self._f(
x, y, *[p.value for p in self.parameters])
else:
def f(x): return self._f(x, *[p.value for p in self.parameters])
setattr(self, "function", f)
parnames = [symbol.name if symbol.name not in self._rename_pars else self._rename_pars[symbol.name]
for symbol in parameters]
self._parameter_strings = parnames
if self._compute_gradients:
try:
ffargs = (_fill_function_args_2d if
self._is2D else _fill_function_args)
for parameter in parameters:
grad_expr = sympy.diff(eval_expr, parameter)
name = parameter.name if parameter.name not in self._rename_pars else self._rename_pars[
parameter.name]
setattr(self,
"_f_grad_%s" % name,
lambdify(variables + parameters,
grad_expr.evalf(),
modules=module,
dummify=False)
)
setattr(self,
"grad_%s" % name,
ffargs(
getattr(
self,
"_f_grad_%s" %
name)).__get__(
self,
Expression)
)
except SyntaxError:
warnings.warn("The gradients can not be computed with sympy.",
UserWarning)
def function_nd(self, *args):
"""%s
"""
if self._is2D:
x, y = args[0], args[1]
# navigation dimension is 0, f_nd same as f
if not self._is_navigation_multidimensional:
return self.function(x, y)
else:
return self._f(x[np.newaxis, ...], y[np.newaxis, ...],
*[p.map['values'][..., np.newaxis, np.newaxis]
for p in self.parameters])
else:
x = args[0]
if not self._is_navigation_multidimensional:
return self.function(x)
else:
return self._f(x[np.newaxis, ...],
*[p.map['values'][..., np.newaxis]
for p in self.parameters])
function_nd.__doc__ %= FUNCTION_ND_DOCSTRING
|
francisco-dlp/hyperspy
|
hyperspy/_components/expression.py
|
Python
|
gpl-3.0
| 12,316
|
[
"Gaussian"
] |
61860bdd91b7c1e4539119ac210f1e477e74aa5b1dbf78827420d84a8bbcd0f8
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import math
import numpy as np
from .core import Kernel1D, Kernel2D, Kernel
from .utils import has_even_axis, raise_even_kernel_exception, KernelSizeError
from astropy.modeling import models
from astropy.modeling.core import Fittable1DModel, Fittable2DModel
from astropy.utils.decorators import deprecated
__all__ = ['Gaussian1DKernel', 'Gaussian2DKernel', 'CustomKernel',
'Box1DKernel', 'Box2DKernel', 'Tophat2DKernel',
'Trapezoid1DKernel', 'RickerWavelet1DKernel', 'RickerWavelet2DKernel',
'AiryDisk2DKernel', 'Moffat2DKernel', 'Model1DKernel',
'Model2DKernel', 'TrapezoidDisk2DKernel', 'Ring2DKernel']
def _round_up_to_odd_integer(value):
i = math.ceil(value)
if i % 2 == 0:
return i + 1
else:
return i
class Gaussian1DKernel(Kernel1D):
"""
1D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
stddev : number
Standard deviation of the Gaussian kernel.
x_size : int, optional
Size of the kernel array. Default = ⌊8*stddev+1⌋.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin. Very slow.
factor : number, optional
Factor of oversampling. Default factor = 10. If the factor
is too large, evaluation can be very slow.
See Also
--------
Box1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian1DKernel
gauss_1D_kernel = Gaussian1DKernel(10)
plt.plot(gauss_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, stddev, **kwargs):
self._model = models.Gaussian1D(1. / (np.sqrt(2 * np.pi) * stddev),
0, stddev)
self._default_size = _round_up_to_odd_integer(8 * stddev)
super().__init__(**kwargs)
self._truncation = np.abs(1. - self._array.sum())
class Gaussian2DKernel(Kernel2D):
"""
2D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
x_stddev : float
Standard deviation of the Gaussian in x before rotating by theta.
y_stddev : float
Standard deviation of the Gaussian in y before rotating by theta.
theta : float or `~astropy.units.Quantity` ['angle']
Rotation angle. If passed as a float, it is assumed to be in radians.
The rotation angle increases counterclockwise.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*stddev + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*stddev + 1⌋.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian2DKernel
gaussian_2D_kernel = Gaussian2DKernel(10)
plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, x_stddev, y_stddev=None, theta=0.0, **kwargs):
if y_stddev is None:
y_stddev = x_stddev
self._model = models.Gaussian2D(1. / (2 * np.pi * x_stddev * y_stddev),
0, 0, x_stddev=x_stddev,
y_stddev=y_stddev, theta=theta)
self._default_size = _round_up_to_odd_integer(
8 * np.max([x_stddev, y_stddev]))
super().__init__(**kwargs)
self._truncation = np.abs(1. - self._array.sum())
class Box1DKernel(Kernel1D):
"""
1D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts when applied repeatedly to the same data.
The generated kernel is normalized so that it integrates to 1.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2. E.g a Box kernel with an effective
smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5].
Parameters
----------
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'linear_interp' (default)
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response function:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box1DKernel
box_1D_kernel = Box1DKernel(9)
plt.plot(box_1D_kernel, drawstyle='steps')
plt.xlim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box1D(1. / width, 0, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs['mode'] = 'linear_interp'
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class Box2DKernel(Kernel2D):
"""
2D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts when applied repeatedly to the same data.
The generated kernel is normalized so that it integrates to 1.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2.
Parameters
----------
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'linear_interp' (default)
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box2DKernel
box_2D_kernel = Box2DKernel(9)
plt.imshow(box_2D_kernel, interpolation='none', origin='lower',
vmin=0.0, vmax=0.015)
plt.xlim(-1, 9)
plt.ylim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box2D(1. / width ** 2, 0, 0, width, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs['mode'] = 'linear_interp'
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class Tophat2DKernel(Kernel2D):
"""
2D Tophat filter kernel.
The Tophat filter is an isotropic smoothing filter. It can produce
artifacts when applied repeatedly on the same data.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : int
Radius of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Tophat2DKernel
tophat_2D_kernel = Tophat2DKernel(40)
plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius, **kwargs):
self._model = models.Disk2D(1. / (np.pi * radius ** 2), 0, 0, radius)
self._default_size = _round_up_to_odd_integer(2 * radius)
super().__init__(**kwargs)
self._truncation = 0
class Ring2DKernel(Kernel2D):
"""
2D Ring filter kernel.
The Ring filter kernel is the difference between two Tophat kernels of
different width. This kernel is useful for, e.g., background estimation.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius_in : number
Inner radius of the ring kernel.
width : number
Width of the ring kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Ring2DKernel
ring_2D_kernel = Ring2DKernel(9, 8)
plt.imshow(ring_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius_in, width, **kwargs):
radius_out = radius_in + width
self._model = models.Ring2D(1. / (np.pi * (radius_out ** 2 - radius_in ** 2)),
0, 0, radius_in, width)
self._default_size = _round_up_to_odd_integer(2 * radius_out)
super().__init__(**kwargs)
self._truncation = 0
class Trapezoid1DKernel(Kernel1D):
"""
1D trapezoid kernel.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
width : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Trapezoid1DKernel
trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2)
plt.plot(trapezoid_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('amplitude')
plt.xlim(-1, 28)
plt.show()
"""
_is_bool = False
def __init__(self, width, slope=1., **kwargs):
self._model = models.Trapezoid1D(1, 0, width, slope)
self._default_size = _round_up_to_odd_integer(width + 2. / slope)
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class TrapezoidDisk2DKernel(Kernel2D):
"""
2D trapezoid kernel.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import TrapezoidDisk2DKernel
trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2)
plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, slope=1., **kwargs):
self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope)
self._default_size = _round_up_to_odd_integer(2 * radius + 2. / slope)
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class RickerWavelet1DKernel(Kernel1D):
"""
1D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat"
kernel).
The Ricker wavelet, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smooths the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The
normalization is the same as for `scipy.ndimage.gaussian_laplace`,
except for a minus sign.
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this kernel.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import RickerWavelet1DKernel
ricker_1d_kernel = RickerWavelet1DKernel(10)
plt.plot(ricker_1d_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_is_bool = True
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.sqrt(2 * np.pi) * width ** 3)
self._model = models.RickerWavelet1D(amplitude, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
self._truncation = np.abs(self._array.sum() / self._array.size)
class RickerWavelet2DKernel(Kernel2D):
"""
2D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat"
kernel).
The Ricker wavelet, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smooths the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (pi * width ** 4). The normalization
is the same as for `scipy.ndimage.gaussian_laplace`, except
for a minus sign.
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this kernel.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width +1⌋.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import RickerWavelet2DKernel
ricker_2d_kernel = RickerWavelet2DKernel(10)
plt.imshow(ricker_2d_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.pi * width ** 4)
self._model = models.RickerWavelet2D(amplitude, 0, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
self._truncation = np.abs(self._array.sum() / self._array.size)
class AiryDisk2DKernel(Kernel2D):
"""
2D Airy disk kernel.
This kernel models the diffraction pattern of a circular aperture.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : float
The radius of the Airy disk kernel (radius of the first zero).
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import AiryDisk2DKernel
airydisk_2D_kernel = AiryDisk2DKernel(10)
plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, **kwargs):
self._model = models.AiryDisk2D(1, 0, 0, radius)
self._default_size = _round_up_to_odd_integer(8 * radius)
super().__init__(**kwargs)
self.normalize()
self._truncation = None
class Moffat2DKernel(Kernel2D):
"""
2D Moffat kernel.
This kernel is a typical model for a seeing limited PSF.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Moffat2DKernel
moffat_2D_kernel = Moffat2DKernel(3, 2)
plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, gamma, alpha, **kwargs):
# Compute amplitude, from
# https://en.wikipedia.org/wiki/Moffat_distribution
amplitude = (alpha - 1.0) / (np.pi * gamma * gamma)
self._model = models.Moffat2D(amplitude, 0, 0, gamma, alpha)
self._default_size = _round_up_to_odd_integer(4.0 * self._model.fwhm)
super().__init__(**kwargs)
self.normalize()
self._truncation = None
class Model1DKernel(Kernel1D):
"""
Create kernel from 1D model.
The model has to be centered on x = 0.
Parameters
----------
model : `~astropy.modeling.Fittable1DModel`
Kernel response function model
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
Must be odd.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable1DModel`
See also
--------
Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian1D model:
>>> from astropy.modeling.models import Gaussian1D
>>> from astropy.convolution.kernels import Model1DKernel
>>> gauss = Gaussian1D(1, 0, 2)
And create a custom one dimensional kernel from it:
>>> gauss_kernel = Model1DKernel(gauss, x_size=9)
This kernel can now be used like a usual Astropy kernel.
"""
_separable = False
_is_bool = False
def __init__(self, model, **kwargs):
if isinstance(model, Fittable1DModel):
self._model = model
else:
raise TypeError("Must be Fittable1DModel")
super().__init__(**kwargs)
class Model2DKernel(Kernel2D):
"""
Create kernel from 2D model.
The model has to be centered on x = 0 and y = 0.
Parameters
----------
model : `~astropy.modeling.Fittable2DModel`
Kernel response function model
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
Must be odd.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width +1⌋.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable2DModel`
See also
--------
Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian2D model:
>>> from astropy.modeling.models import Gaussian2D
>>> from astropy.convolution.kernels import Model2DKernel
>>> gauss = Gaussian2D(1, 0, 0, 2, 2)
And create a custom two dimensional kernel from it:
>>> gauss_kernel = Model2DKernel(gauss, x_size=9)
This kernel can now be used like a usual astropy kernel.
"""
_is_bool = False
_separable = False
def __init__(self, model, **kwargs):
self._separable = False
if isinstance(model, Fittable2DModel):
self._model = model
else:
raise TypeError("Must be Fittable2DModel")
super().__init__(**kwargs)
class PSFKernel(Kernel2D):
"""
Initialize filter kernel from astropy PSF instance.
"""
_separable = False
def __init__(self):
raise NotImplementedError('Not yet implemented')
class CustomKernel(Kernel):
"""
Create filter kernel from list or array.
Parameters
----------
array : list or array
Filter kernel array. Size must be odd.
Raises
------
TypeError
If array is not a list or array.
`~astropy.convolution.KernelSizeError`
If array size is even.
See also
--------
Model2DKernel, Model1DKernel
Examples
--------
Define one dimensional array:
>>> from astropy.convolution.kernels import CustomKernel
>>> import numpy as np
>>> array = np.array([1, 2, 3, 2, 1])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
1
Define two dimensional array:
>>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
2
"""
def __init__(self, array):
self.array = array
super().__init__(self._array)
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
@array.setter
def array(self, array):
"""
Filter kernel array setter
"""
if isinstance(array, np.ndarray):
self._array = array.astype(np.float64)
elif isinstance(array, list):
self._array = np.array(array, dtype=np.float64)
else:
raise TypeError("Must be list or array.")
# Check if array is odd in all axes
if has_even_axis(self):
raise_even_kernel_exception()
# Check if array is bool
ones = self._array == 1.
zeros = self._array == 0
self._is_bool = bool(np.all(np.logical_or(ones, zeros)))
self._truncation = 0.0
@deprecated('4.0', alternative='RickerWavelet1DKernel')
class MexicanHat1DKernel(RickerWavelet1DKernel):
pass
@deprecated('4.0', alternative='RickerWavelet2DKernel')
class MexicanHat2DKernel(RickerWavelet2DKernel):
pass
|
lpsinger/astropy
|
astropy/convolution/kernels.py
|
Python
|
bsd-3-clause
| 33,934
|
[
"Gaussian"
] |
28b1175fa902264d61a0a5b67cfb5ae4b6a529e3d93eae9f80ad949e61df1ebf
|
# Copyright (C) 2011-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import pathlib
import sys
import math
import numpy as np
try:
import vtk
from vtk.util import numpy_support as VN
skipIfMissingPythonPackage = utx.no_skip
except ImportError:
skipIfMissingPythonPackage = ut.skip(
"Python module vtk not available, skipping test!")
import espressomd
import espressomd.electrokinetics
import espressomd.shapes
import ek_common
##########################################################################
# Set up the System #
##########################################################################
# Set the slit pore geometry. The width is the non-periodic part of the
# geometry. The padding is used to ensure that there is no field outside
# the slit.
params_base = dict([
('dt', 1.0 / 7),
('integration_length', 2300),
('agrid', 1. / 3),
('density_water', 26.15),
('friction', 1.9),
('width', 20.0),
('thickness', 3.0),
('sigma', -0.04),
('padding', 6.0),
('force', 0.07),
('temperature', 1.1),
('viscosity_kinematic', 1.7),
('bjerrum_length', 0.8),
('sigma', -0.04),
('valency', 1.0),
])
params_base['density_counterions'] = -2.0 * \
params_base['sigma'] / params_base['width']
axis = "@TEST_SUFFIX@"
params = {
"x": dict([
('box_x', params_base['thickness']),
('box_y', params_base['thickness']),
('box_z', params_base['width'] + 2 * params_base['padding']),
('ext_force_density', [params_base['force'], 0.0, 0.0]),
('wall_normal_1', [0, 0, 1]),
('wall_normal_2', [0, 0, -1]),
('periodic_dirs', (0, 1)),
('non_periodic_dir', 2),
('n_roll_index', 0),
('calculated_pressure_xy', 0.0),
('calculated_pressure_yz', 0.0)
]),
"y": dict([
('box_x', params_base['width'] + 2 * params_base['padding']),
('box_y', params_base['thickness']),
('box_z', params_base['thickness']),
('ext_force_density', [0.0, params_base['force'], 0.0]),
('wall_normal_1', [1, 0, 0]),
('wall_normal_2', [-1, 0, 0]),
('periodic_dirs', (1, 2)),
('non_periodic_dir', 0),
('n_roll_index', 1),
('calculated_pressure_xz', 0.0),
('calculated_pressure_yz', 0.0)
]),
"z": dict([
('box_x', params_base['thickness']),
('box_y', params_base['width'] + 2 * params_base['padding']),
('box_z', params_base['thickness']),
('ext_force_density', [0.0, 0.0, params_base['force']]),
('wall_normal_1', [0, 1, 0]),
('wall_normal_2', [0, -1, 0]),
('periodic_dirs', (0, 2)),
('non_periodic_dir', 1),
('n_roll_index', 2),
('calculated_pressure_xy', 0.0),
('calculated_pressure_xz', 0.0)
])
}[axis]
def bisection():
# initial parameters for bisection scheme
size = math.pi / (2.0 * params_base['width'])
pnt0 = 0.0
pntm = pnt0 + size
pnt1 = pnt0 + 1.9 * size
# the bisection scheme
tol = 1.0e-08
while size > tol:
val0 = ek_common.solve(
pnt0,
params_base['width'],
params_base['bjerrum_length'],
params_base['sigma'],
params_base['valency'])
val1 = ek_common.solve(
pnt1,
params_base['width'],
params_base['bjerrum_length'],
params_base['sigma'],
params_base['valency'])
valm = ek_common.solve(
pntm,
params_base['width'],
params_base['bjerrum_length'],
params_base['sigma'],
params_base['valency'])
if (val0 < 0.0 and val1 > 0.0):
if valm < 0.0:
pnt0 = pntm
size = size / 2.0
pntm = pnt0 + size
else:
pnt1 = pntm
size = size / 2.0
pntm = pnt1 - size
elif (val0 > 0.0 and val1 < 0.0):
if valm < 0.0:
pnt1 = pntm
size = size / 2.0
pntm = pnt1 - size
else:
pnt0 = pntm
size = size / 2.0
pntm = pnt0 + size
else:
sys.exit("Bisection method fails:\n"
"Tuning of regular boundaries may be required.")
return pntm
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(["ELECTROKINETICS", "EK_BOUNDARIES"])
class ek_eof_one_species(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
xi = bisection()
def parse_vtk(self, filepath, name, shape):
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName(filepath)
reader.ReadAllVectorsOn()
reader.ReadAllScalarsOn()
reader.Update()
data = reader.GetOutput()
points = data.GetPointData()
return VN.vtk_to_numpy(points.GetArray(name)).reshape(shape, order='F')
@classmethod
def setUpClass(cls):
system = cls.system
system.box_l = [params['box_x'], params['box_y'], params['box_z']]
system.time_step = params_base['dt']
system.cell_system.skin = 0.1
system.thermostat.turn_off()
# Set up the (LB) electrokinetics fluid
ek = cls.ek = espressomd.electrokinetics.Electrokinetics(
agrid=params_base['agrid'],
lb_density=params_base['density_water'],
viscosity=params_base['viscosity_kinematic'],
friction=params_base['friction'],
T=params_base['temperature'],
prefactor=params_base['bjerrum_length'] *
params_base['temperature'],
stencil="linkcentered")
counterions = cls.counterions = espressomd.electrokinetics.Species(
density=params_base['density_counterions'],
D=0.3,
valency=params_base['valency'],
ext_force_density=params['ext_force_density'])
ek.add_species(counterions)
# Set up the walls confining the fluid and carrying charge
ek_wall1 = espressomd.ekboundaries.EKBoundary(
charge_density=params_base['sigma'] /
params_base['padding'],
shape=espressomd.shapes.Wall(
normal=params['wall_normal_1'],
dist=params_base['padding']))
ek_wall2 = espressomd.ekboundaries.EKBoundary(
charge_density=params_base['sigma'] /
params_base['padding'],
shape=espressomd.shapes.Wall(
normal=params['wall_normal_2'],
dist=-(params_base['padding'] + params_base['width'])))
system.ekboundaries.add(ek_wall1)
system.ekboundaries.add(ek_wall2)
system.actors.add(ek)
# Integrate the system
system.integrator.run(params_base['integration_length'])
def test(self):
# compare the various quantities to the analytic results
total_velocity_difference = 0.0
total_density_difference = 0.0
total_pressure_difference_xx = 0.0
total_pressure_difference_yy = 0.0
total_pressure_difference_zz = 0.0
total_pressure_difference_xy = 0.0
total_pressure_difference_yz = 0.0
total_pressure_difference_xz = 0.0
system = self.system
ek = self.ek
counterions = self.counterions
for i in range(
int(system.box_l[params['non_periodic_dir']] / params_base['agrid'])):
if (i *
params_base['agrid'] >= params_base['padding'] and i *
params_base['agrid'] < system.box_l[params['non_periodic_dir']] -
params_base['padding']):
position = i * params_base['agrid'] - params_base['padding'] - \
params_base['width'] / 2.0 + params_base['agrid'] / 2.0
# density
index = np.array([int(system.box_l[params['periodic_dirs'][0]] /
(2 * params_base['agrid'])),
int(system.box_l[params['periodic_dirs'][1]] /
(2 * params_base['agrid'])), i])
index = np.roll(index, params['n_roll_index'])
measured_density = counterions[index].density
calculated_density = ek_common.density(
position, self.xi, params_base['bjerrum_length'])
density_difference = abs(measured_density - calculated_density)
total_density_difference += density_difference
# velocity
measured_velocity = ek[index].velocity[int(
np.nonzero(params['ext_force_density'])[0])]
calculated_velocity = ek_common.velocity(
position,
self.xi,
params_base['width'],
params_base['bjerrum_length'],
params_base['force'],
params_base['viscosity_kinematic'],
params_base['density_water'])
velocity_difference = abs(
measured_velocity - calculated_velocity)
total_velocity_difference = total_velocity_difference + \
velocity_difference
# diagonal pressure tensor
measured_pressure_xx = ek[index].pressure_tensor[(0, 0)]
calculated_pressure_xx = ek_common.hydrostatic_pressure(
ek,
(0, 0),
system.box_l[params['periodic_dirs'][0]],
system.box_l[params['periodic_dirs'][1]],
params['box_z'],
params_base['agrid'])
measured_pressure_yy = ek[index].pressure_tensor[(1, 1)]
calculated_pressure_yy = ek_common.hydrostatic_pressure(
ek,
(1, 1),
system.box_l[params['periodic_dirs'][0]],
system.box_l[params['periodic_dirs'][1]],
params['box_z'],
params_base['agrid'])
measured_pressure_zz = ek[index].pressure_tensor[(2, 2)]
calculated_pressure_zz = ek_common.hydrostatic_pressure(
ek,
(2, 2),
system.box_l[params['periodic_dirs'][0]],
system.box_l[params['periodic_dirs'][1]],
params['box_z'],
params_base['agrid'])
pressure_difference_xx = abs(
measured_pressure_xx - calculated_pressure_xx)
pressure_difference_yy = abs(
measured_pressure_yy - calculated_pressure_yy)
pressure_difference_zz = abs(
measured_pressure_zz - calculated_pressure_zz)
total_pressure_difference_xx = total_pressure_difference_xx + \
pressure_difference_xx
total_pressure_difference_yy = total_pressure_difference_yy + \
pressure_difference_yy
total_pressure_difference_zz = total_pressure_difference_zz + \
pressure_difference_zz
calculated_pressure_offdiagonal = ek_common.pressure_tensor_offdiagonal(
position, self.xi, params_base['bjerrum_length'], params_base['force'])
# xy component pressure tensor
measured_pressure_xy = ek[index].pressure_tensor[(0, 1)]
calculated_pressure_xy = 0.0
if 'calculated_pressure_xy' not in params:
calculated_pressure_xy = calculated_pressure_offdiagonal
pressure_difference_xy = abs(
measured_pressure_xy - calculated_pressure_xy)
total_pressure_difference_xy = total_pressure_difference_xy + \
pressure_difference_xy
# yz component pressure tensor
measured_pressure_yz = ek[index].pressure_tensor[(1, 2)]
calculated_pressure_yz = 0.0
if 'calculated_pressure_yz' not in params:
calculated_pressure_yz = calculated_pressure_offdiagonal
pressure_difference_yz = abs(
measured_pressure_yz - calculated_pressure_yz)
total_pressure_difference_yz = total_pressure_difference_yz + \
pressure_difference_yz
# xz component pressure tensor
measured_pressure_xz = ek[index].pressure_tensor[(0, 2)]
calculated_pressure_xz = 0.0
if 'calculated_pressure_xz' not in params:
calculated_pressure_xz = calculated_pressure_offdiagonal
pressure_difference_xz = abs(
measured_pressure_xz - calculated_pressure_xz)
total_pressure_difference_xz = total_pressure_difference_xz + \
pressure_difference_xz
scale_factor = params_base['agrid'] / params_base['width']
total_density_difference *= scale_factor
total_velocity_difference *= scale_factor
total_pressure_difference_xx *= scale_factor
total_pressure_difference_yy *= scale_factor
total_pressure_difference_zz *= scale_factor
total_pressure_difference_xy *= scale_factor
total_pressure_difference_yz *= scale_factor
total_pressure_difference_xz *= scale_factor
self.assertLess(total_density_difference, 1.0e-04,
"Density accuracy not achieved")
self.assertLess(total_velocity_difference, 1.0e-04,
"Velocity accuracy not achieved")
self.assertLess(total_pressure_difference_xx, 1.0e-04,
"Pressure accuracy xx component not achieved")
self.assertLess(total_pressure_difference_yy, 1.0e-04,
"Pressure accuracy yy component not achieved")
self.assertLess(total_pressure_difference_zz, 1.0e-04,
"Pressure accuracy zz component not achieved")
self.assertLess(total_pressure_difference_xy, 1.0e-04,
"Pressure accuracy xy component not achieved")
self.assertLess(total_pressure_difference_yz, 1.0e-04,
"Pressure accuracy yz component not achieved")
self.assertLess(total_pressure_difference_xz, 1.0e-04,
"Pressure accuracy xz component not achieved")
@skipIfMissingPythonPackage
def test_vtk(self):
ek = self.ek
counterions = self.counterions
grid_dims = list(
map(int, np.round(self.system.box_l / params_base['agrid'])))
# write VTK files
vtk_root = f"vtk_out/ek_eof_{axis}"
pathlib.Path(vtk_root).mkdir(parents=True, exist_ok=True)
path_vtk_boundary = f"{vtk_root}/boundary.vtk"
path_vtk_velocity = f"{vtk_root}/velocity.vtk"
path_vtk_potential = f"{vtk_root}/potential.vtk"
path_vtk_lbdensity = f"{vtk_root}/density.vtk"
path_vtk_lbforce = f"{vtk_root}/lbforce.vtk"
path_vtk_density = f"{vtk_root}/lbdensity.vtk"
path_vtk_flux = f"{vtk_root}/flux.vtk"
path_vtk_flux_link = f"{vtk_root}/flux_link.vtk"
if espressomd.has_features('EK_DEBUG'):
path_vtk_flux_fluc = f"{vtk_root}/flux_fluc.vtk"
ek.write_vtk_boundary(path_vtk_boundary)
ek.write_vtk_velocity(path_vtk_velocity)
ek.write_vtk_potential(path_vtk_potential)
ek.write_vtk_density(path_vtk_lbdensity)
ek.write_vtk_lbforce(path_vtk_lbforce)
counterions.write_vtk_density(path_vtk_density)
counterions.write_vtk_flux(path_vtk_flux)
if espressomd.has_features('EK_DEBUG'):
counterions.write_vtk_flux_fluc(path_vtk_flux_fluc)
counterions.write_vtk_flux_link(path_vtk_flux_link)
# load VTK files to check they are correctly formatted
get_vtk = self.parse_vtk
vtk_boundary = get_vtk(path_vtk_boundary, "boundary", grid_dims)
vtk_velocity = get_vtk(path_vtk_velocity, "velocity", grid_dims + [3])
vtk_potential = get_vtk(path_vtk_potential, "potential", grid_dims)
vtk_lbdensity = get_vtk(path_vtk_lbdensity, "density_lb", grid_dims)
get_vtk(path_vtk_lbforce, "lbforce", grid_dims + [3])
vtk_density = get_vtk(path_vtk_density, "density_1", grid_dims)
vtk_flux = get_vtk(path_vtk_flux, "flux_1", grid_dims + [3])
if espressomd.has_features('EK_DEBUG'):
get_vtk(path_vtk_flux_fluc, "flux_fluc_1", grid_dims + [4])
get_vtk(path_vtk_flux_link, "flux_link_1", grid_dims + [13])
# check VTK files against the EK grid
species_density = np.zeros(grid_dims)
species_flux = np.zeros(grid_dims + [3])
ek_potential = np.zeros(grid_dims)
ek_velocity = np.zeros(grid_dims + [3])
for i in range(grid_dims[0]):
for j in range(grid_dims[1]):
for k in range(grid_dims[2]):
index = np.array([i, j, k])
species_density[i, j, k] = counterions[index].density
species_flux[i, j, k] = counterions[index].flux
ek_potential[i, j, k] = ek[index].potential
ek_velocity[i, j, k] = ek[index].velocity
np.testing.assert_allclose(vtk_velocity, ek_velocity, atol=1e-6)
np.testing.assert_allclose(vtk_potential, ek_potential, atol=1e-6)
np.testing.assert_allclose(vtk_density, species_density, atol=1e-6)
np.testing.assert_allclose(vtk_flux, species_flux, atol=1e-6)
# check VTK files against the EK parameters
dens = params_base['density_water']
left_dist = int(params_base['padding'] / params_base['agrid'])
right_dist = int(-params_base['padding'] / params_base['agrid'])
thickness = int(params_base['thickness'] / params_base['agrid'])
i = np.roll([0, 0, right_dist], params['n_roll_index'])
j = np.roll([thickness, thickness, left_dist], params['n_roll_index'])
mask_left = np.zeros(grid_dims, dtype=bool)
mask_left[:j[0], :j[1], :j[2]] = True
mask_right = np.zeros(grid_dims, dtype=bool)
mask_right[i[0]:, i[1]:, i[2]:] = True
mask_outside = np.logical_or(mask_left, mask_right)
mask_inside = np.logical_not(mask_outside)
np.testing.assert_allclose(vtk_lbdensity[mask_inside], dens, atol=1e-4)
np.testing.assert_allclose(vtk_lbdensity[mask_outside], 0, atol=1e-6)
np.testing.assert_allclose(vtk_boundary[mask_left], 1, atol=1e-6)
np.testing.assert_allclose(vtk_boundary[mask_left], 1, atol=1e-6)
np.testing.assert_allclose(vtk_boundary[mask_right], 2, atol=1e-6)
np.testing.assert_allclose(vtk_boundary[mask_inside], 0, atol=1e-6)
if __name__ == "__main__":
ut.main()
|
pkreissl/espresso
|
testsuite/python/ek_eof_one_species.py
|
Python
|
gpl-3.0
| 19,793
|
[
"ESPResSo",
"VTK"
] |
703b80211f00478658fc13094cb203f3f5f0666daa57e7b50ae96db8a387081f
|
"""LAMMPS calculator for preparing and parsing single-point LAMMPS \
calculations."""
import subprocess
import numpy as np
# TODO: split LAMMPS input and data files into separate classes
def run_lammps(lammps_executable, input_file, output_file):
"""Runs a single point LAMMPS calculation.
:param lammps_executable: LAMMPS executable file.
:type lammps_executable: str
:param input_file: LAMMPS input file.
:type input_file: str
:param output_file: Desired LAMMPS output file.
:type output_file: str
"""
# run lammps
lammps_command = f"{lammps_executable} -in {input_file} "
print("run command:", lammps_command)
with open("tmp2False.out", "w+") as fout:
subprocess.call(lammps_command.split(), stdout=fout)
def lammps_parser(dump_file, std=False):
"""Parses LAMMPS dump file. Assumes the forces are the final quantities \
to get dumped.
:param dump_file: Dump file to be parsed.
:type dump_file: str
:return: Numpy array of forces on atoms.
:rtype: np.ndarray
"""
forces = []
stds = []
with open(dump_file, "r") as outf:
lines = outf.readlines()
for count, line in enumerate(lines):
if line.startswith("ITEM: ATOMS"):
force_start = count
for line in lines[force_start + 1 :]:
fline = line.split()
if std:
forces.append([float(fline[-4]), float(fline[-3]), float(fline[-2])])
stds.append(float(fline[-1]))
else:
forces.append([float(fline[-3]), float(fline[-2]), float(fline[-1])])
return np.array(forces), np.array(stds)
# -----------------------------------------------------------------------------
# data functions
# -----------------------------------------------------------------------------
def lammps_dat(structure, atom_types, atom_masses, species):
"""Create LAMMPS data file for an uncharged material.
:param structure: Structure object containing coordinates and cell.
:type structure: struc.Structure
:param atom_types: Atom types ranging from 1 to N.
:type atom_types: List[int]
:param atom_masses: Atomic masses of the atom types.
:type atom_masses: List[int]
:param species: Type of each atom.
:type species: List[int]
"""
dat_text = f"""Header of the LAMMPS data file
{structure.nat} atoms
{len(atom_types)} atom types
"""
dat_text += lammps_cell_text(structure)
dat_text += """
Masses
"""
mass_text = ""
for atom_type, atom_mass in zip(atom_types, atom_masses):
mass_text += f"{atom_type} {atom_mass}\n"
dat_text += mass_text
dat_text += """
Atoms
"""
dat_text += lammps_pos_text(structure, species)
return dat_text
def lammps_dat_charged(structure, atom_types, atom_charges, atom_masses, species):
"""Create LAMMPS data file for a charged material.
:param structure: Structure object containing coordinates and cell.
:type structure: struc.Structure
:param atom_types: List of atom types.
:type atom_types: List[int]
:param atom_charges: Charge of each atom.
:type atom_charges: List[float]
:param atom_masses: Mass of each atom type.
:type atom_masses: List[float]
:param species: Type of each atom.
:type species: List[int]
"""
dat_text = f"""Header of the LAMMPS data file
{structure.nat} atoms
{len(atom_types)} atom types
"""
dat_text += lammps_cell_text(structure)
dat_text += """
Masses
"""
mass_text = ""
for atom_type, atom_mass in zip(atom_types, atom_masses):
mass_text += f"{atom_type} {atom_mass}\n"
dat_text += mass_text
dat_text += """
Atoms
"""
dat_text += lammps_pos_text_charged(structure, atom_charges, species)
return dat_text
def lammps_cell_text(structure):
"""Write cell from structure object."""
cell_text = f"""
0.0 {structure.cell[0, 0]} xlo xhi
0.0 {structure.cell[1, 1]} ylo yhi
0.0 {structure.cell[2, 2]} zlo zhi
{structure.cell[1, 0]} {structure.cell[2, 0]} {structure.cell[2, 1]} xy xz yz
"""
return cell_text
def lammps_pos_text(structure, species):
"""Create LAMMPS position text for a system of uncharged particles."""
pos_text = "\n"
for count, (pos, spec) in enumerate(zip(structure.positions, species)):
pos_text += f"{count+1} {spec} {pos[0]} {pos[1]} {pos[2]}\n"
return pos_text
def lammps_pos_text_charged(structure, charges, species):
"""Create LAMMPS position text for a system of charged particles."""
pos_text = "\n"
for count, (pos, chrg, spec) in enumerate(
zip(structure.positions, charges, species)
):
pos_text += f"{count+1} {spec} {chrg} {pos[0]} {pos[1]} {pos[2]}\n"
return pos_text
def write_text(file, text):
"""Write text to file."""
with open(file, "w") as fin:
fin.write(text)
# -----------------------------------------------------------------------------
# input functions
# -----------------------------------------------------------------------------
def generic_lammps_input(
dat_file,
style_string,
coeff_string,
dump_file,
newton=False,
std_string="",
std_style=None,
):
"""Create text for generic LAMMPS input file."""
if newton:
ntn = "on"
else:
ntn = "off"
if std_string != "" and std_style is not None:
if std_style == "flare":
compute_cmd = f"compute std all uncertainty/atom {std_string}"
elif std_style == "flare_pp":
compute_cmd = f"compute std all flare/std/atom {std_string}"
else:
raise NotImplementedError
c_std = "c_std"
else:
compute_cmd = ""
c_std = ""
input_text = f"""# generic lammps input file
units metal
atom_style atomic
dimension 3
boundary p p p
newton {ntn}
read_data {dat_file}
pair_style {style_string}
pair_coeff {coeff_string}
thermo_style one
{compute_cmd}
dump 1 all custom 1 {dump_file} id type x y z fx fy fz {c_std}
dump_modify 1 sort id
run 0
"""
return input_text
def ewald_input(dat_file, short_cut, kspace_accuracy, dump_file, newton=True):
"""Create text for Ewald input file."""
if newton is True:
ntn = "on"
else:
ntn = "off"
input_text = f"""# Ewald input file
newton {ntn}
units metal
atom_style charge
dimension 3
boundary p p p
read_data {dat_file}
pair_style coul/long {short_cut}
pair_coeff * *
kspace_style ewald {kspace_accuracy}
thermo_style one
dump 1 all custom 1 {dump_file} id type x y z fx fy fz
dump_modify 1 sort id
run 0
"""
return input_text
|
mir-group/flare
|
flare/lammps/lammps_calculator.py
|
Python
|
mit
| 6,652
|
[
"LAMMPS"
] |
5ff906e2cf757ca02c28963a1ea54dbb8c48ad9308e56f0a9444d1dcf856bcab
|
""" The Bdii2CSAgent performs checking BDII for availability of CE
resources for a given or any configured VO. It detects resources not yet
present in the CS and notifies the administrators.
For the CEs already present in the CS, the agent is updating
if necessary settings which were changed in the BDII recently
The following options can be set for the Bdii2CSAgent.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN Bdii2CSAgent
:end-before: ##END
:dedent: 2
:caption: Bdii2CSAgent options
"""
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOs, getVOOption
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getQueues, getCESiteMapping
from DIRAC.ConfigurationSystem.Client.Utilities import getGridCEs, getSiteUpdates
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.Glue2 import getGlue2CEInfo
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
class Bdii2CSAgent(AgentModule):
def __init__(self, *args, **kwargs):
"""Defines default parameters"""
super(Bdii2CSAgent, self).__init__(*args, **kwargs)
self.addressTo = ""
self.addressFrom = ""
self.voName = []
self.subject = self.am_getModuleParam("fullName")
self.alternativeBDIIs = []
self.voBdiiCEDict = {}
self.voBdiiSEDict = {}
self.host = "cclcgtopbdii01.in2p3.fr:2170"
self.injectSingleCoreQueues = False
self.csAPI = None
# What to get
self.processCEs = True
self.selectedSites = []
# Update the CS or not?
self.dryRun = False
def initialize(self):
"""Gets run paramaters from the configuration"""
self.addressTo = self.am_getOption("MailTo", self.addressTo)
self.addressFrom = self.am_getOption("MailFrom", self.addressFrom)
# Create a list of alternative bdii urls
self.alternativeBDIIs = self.am_getOption("AlternativeBDIIs", self.alternativeBDIIs)
self.host = self.am_getOption("Host", self.host)
self.injectSingleCoreQueues = self.am_getOption("InjectSingleCoreQueues", self.injectSingleCoreQueues)
# Check if the bdii url is appended by a port number, if not append the default 2170
for index, url in enumerate(self.alternativeBDIIs):
if not url.split(":")[-1].isdigit():
self.alternativeBDIIs[index] += ":2170"
if self.addressTo and self.addressFrom:
self.log.info("MailTo", self.addressTo)
self.log.info("MailFrom", self.addressFrom)
if self.alternativeBDIIs:
self.log.info("AlternativeBDII URLs:", self.alternativeBDIIs)
self.processCEs = self.am_getOption("ProcessCEs", self.processCEs)
self.selectedSites = self.am_getOption("SelectedSites", [])
self.dryRun = self.am_getOption("DryRun", self.dryRun)
self.voName = self.am_getOption("VirtualOrganization", self.voName)
if not self.voName:
self.voName = self.am_getOption("VO", [])
if not self.voName or (len(self.voName) == 1 and self.voName[0].lower() == "all"):
# Get all VOs defined in the configuration
self.voName = []
result = getVOs()
if result["OK"]:
vos = result["Value"]
for vo in vos:
vomsVO = getVOOption(vo, "VOMSName")
if vomsVO:
self.voName.append(vomsVO)
if self.voName:
self.log.info("Agent will manage VO(s) %s" % self.voName)
else:
self.log.fatal("VirtualOrganization option not defined for agent")
return S_ERROR()
self.csAPI = CSAPI()
return self.csAPI.initialize()
def execute(self):
"""General agent execution method"""
self.voBdiiCEDict = {}
# Get a "fresh" copy of the CS data
result = self.csAPI.downloadCSData()
if not result["OK"]:
self.log.warn("Could not download a fresh copy of the CS data", result["Message"])
# Refresh the configuration from the master server
gConfig.forceRefresh(fromMaster=True)
if self.processCEs:
self.__lookForNewCEs()
self.__updateCEs()
return S_OK()
def __lookForNewCEs(self):
"""Look up BDII for CEs not yet present in the DIRAC CS"""
bannedCEs = self.am_getOption("BannedCEs", [])
for vo in self.voName:
# get the known CEs for a given VO, so we can know the unknowns, or no longer supported,
# for a VO
res = getQueues(community=vo)
if not res["OK"]:
return res
knownCEs = set()
for _site, ces in res["Value"].items():
knownCEs.update(ces)
knownCEs.update(bannedCEs)
result = self.__getGlue2CEInfo(vo)
if not result["OK"]:
continue
bdiiInfo = result["Value"]
result = getGridCEs(vo, bdiiInfo=bdiiInfo, ceBlackList=knownCEs)
if not result["OK"]:
self.log.error("Failed to get unused CEs", result["Message"])
continue # next VO
siteDict = result["Value"]
unknownCEs = set(result["UnknownCEs"]) - set(bannedCEs)
body = ""
for site in siteDict:
newCEs = set(siteDict[site]) # pylint: disable=no-member
if not newCEs:
continue
ceString = ""
for ce in newCEs:
queueString = ""
ceInfo = bdiiInfo[site]["CEs"][ce]
newCEString = "CE: %s, GOCDB Site Name: %s" % (ce, site)
systemTuple = siteDict[site][ce]["System"]
osString = "%s_%s_%s" % (systemTuple)
newCEString = "\n%s\n%s\n" % (newCEString, osString)
for queue in ceInfo["Queues"]:
queueStatus = ceInfo["Queues"][queue].get("GlueCEStateStatus", "UnknownStatus")
if "production" in queueStatus.lower():
ceType = ceInfo["Queues"][queue].get("GlueCEImplementationName", "")
queueString += " %s %s %s\n" % (queue, queueStatus, ceType)
if queueString:
ceString += newCEString
ceString += "Queues:\n"
ceString += queueString
if ceString:
body += ceString
if siteDict:
body = "\nWe are glad to inform You about new CE(s) possibly suitable for %s:\n" % vo + body
body += "\n\nTo suppress information about CE add its name to BannedCEs list.\n"
body += "Add new Sites/CEs for vo %s with the command:\n" % vo
body += "dirac-admin-add-resources --vo %s --ce\n" % vo
if unknownCEs:
body += "\n\n"
body += "There is no (longer) information about the following CEs for the %s VO.\n" % vo
body += "\n".join(sorted(unknownCEs))
body += "\n\n"
if body:
self.log.info(body)
if self.addressTo and self.addressFrom:
notification = NotificationClient()
result = notification.sendMail(
self.addressTo, self.subject, body, self.addressFrom, localAttempt=False
)
if not result["OK"]:
self.log.error("Can not send new site notification mail", result["Message"])
return S_OK()
def __getGlue2CEInfo(self, vo):
if vo in self.voBdiiCEDict:
return S_OK(self.voBdiiCEDict[vo])
self.log.info("Check for available CEs for VO", vo)
totalResult = S_OK({})
message = ""
mainResult = getGlue2CEInfo(vo, host=self.host)
if not mainResult["OK"]:
self.log.error("Failed getting information from default bdii", mainResult["Message"])
message = mainResult["Message"]
for bdii in reversed(self.alternativeBDIIs):
resultAlt = getGlue2CEInfo(vo, host=bdii)
if resultAlt["OK"]:
totalResult["Value"].update(resultAlt["Value"])
else:
self.log.error("Failed getting information from %s " % bdii, resultAlt["Message"])
message = (message + "\n" + resultAlt["Message"]).strip()
if mainResult["OK"]:
totalResult["Value"].update(mainResult["Value"])
if not totalResult["Value"] and message: # Dict is empty and we have an error message
self.log.error("Error during BDII request", message)
totalResult = S_ERROR(message)
else:
self.voBdiiCEDict[vo] = totalResult["Value"]
self.__purgeSites(totalResult["Value"])
return totalResult
def __updateCEs(self):
"""Update the Site/CE/queue settings in the CS if they were changed in the BDII"""
bdiiChangeSet = set()
bannedCEs = self.am_getOption("BannedCEs", [])
for vo in self.voName:
result = self.__getGlue2CEInfo(vo)
if not result["OK"]:
continue
ceBdiiDict = result["Value"]
for _siteName, ceDict in ceBdiiDict.items():
for bannedCE in bannedCEs:
ceDict["CEs"].pop(bannedCE, None)
result = getSiteUpdates(vo, bdiiInfo=ceBdiiDict, log=self.log, onecore=self.injectSingleCoreQueues)
if not result["OK"]:
continue
bdiiChangeSet = bdiiChangeSet.union(result["Value"])
# We have collected all the changes, consolidate VO settings
result = self.__updateCS(bdiiChangeSet)
return result
def __purgeSites(self, ceBdiiDict):
"""Remove all sites that are not in self.selectedSites.
Modifies the ceBdiiDict!
"""
if not self.selectedSites:
return
for site in list(ceBdiiDict):
ces = list(ceBdiiDict[site]["CEs"])
if not ces:
self.log.error("No CE information for site:", site)
continue
siteInCS = "Not_In_CS"
for ce in ces:
res = getCESiteMapping(ce)
if not res["OK"]:
self.log.error("Failed to get DIRAC site name for ce", "%s: %s" % (ce, res["Message"]))
continue
# if the ce is not in the CS the returned value will be empty
if ce in res["Value"]:
siteInCS = res["Value"][ce]
break
self.log.debug("Checking site %s (%s), aka %s" % (site, ces, siteInCS))
if siteInCS in self.selectedSites:
continue
self.log.info("Dropping site %s, aka %s" % (site, siteInCS))
ceBdiiDict.pop(site)
return
def __updateCS(self, bdiiChangeSet):
queueVODict = {}
changeSet = set()
for entry in bdiiChangeSet:
section, option, _value, new_value = entry
if option == "VO":
queueVODict.setdefault(section, set())
queueVODict[section] = queueVODict[section].union(set(new_value.split(",")))
else:
changeSet.add(entry)
for section, VOs in queueVODict.items(): # can be an iterator
changeSet.add((section, "VO", "", ",".join(VOs)))
if changeSet:
changeList = sorted(changeSet)
body = "\n".join(["%s/%s %s -> %s" % entry for entry in changeList])
if body and self.addressTo and self.addressFrom:
notification = NotificationClient()
result = notification.sendMail(self.addressTo, self.subject, body, self.addressFrom, localAttempt=False)
if body:
self.log.info("The following configuration changes were detected:")
self.log.info(body)
for section, option, value, new_value in changeSet:
if value == "Unknown" or not value:
self.csAPI.setOption(cfgPath(section, option), new_value)
else:
self.csAPI.modifyValue(cfgPath(section, option), new_value)
if self.dryRun:
self.log.info("Dry Run: CS won't be updated")
self.csAPI.showDiff()
else:
result = self.csAPI.commit()
if not result["OK"]:
self.log.error("Error while committing to CS", result["Message"])
else:
self.log.info("Successfully committed %d changes to CS" % len(changeList))
return result
else:
self.log.info("No changes found")
return S_OK()
|
DIRACGrid/DIRAC
|
src/DIRAC/ConfigurationSystem/Agent/Bdii2CSAgent.py
|
Python
|
gpl-3.0
| 13,250
|
[
"DIRAC"
] |
bca330d0d8a32ba6011c8fd7220f9244fd05044980a6672deeeea5d45f920c7c
|
import autograd.numpy as npa
import numpy as np
import matplotlib.pylab as plt
from autograd.extend import defjvp, defvjp
from scipy.linalg import dft
import sys
sys.path.append('../ceviche')
from ceviche import fdtd, jacobian
from ceviche.utils import my_fft
""" Autograd through spectrum computation """
Nx = 50
Ny = 50
Nz = 1
npml = 10
omega = 2*np.pi*200e12
dL = 5e-8
pml = [npml, npml, 0]
# source parameters
sigma = 10e-15
total_time = 0.5e-12
t0 = sigma * 10
source_amp = 1
source_pos = np.zeros((Nx, Ny, Nz))
source_pos[npml+10, Ny//2, Nz//2] = source_amp
# starting relative permittivity (random for debugging)
eps_r = np.random.random((Nx, Ny, Nz)) + 1
F = fdtd(eps_r, dL=dL, npml=pml)
dt = F.dt
steps = int(total_time / dt)
print('{} time steps'.format(steps))
gaussian = lambda t: source_amp * np.exp(-(t - t0 / dt)**2 / 2 / (sigma / dt)**2)
source = lambda t: source_pos * gaussian(t) * np.cos(omega * t * dt)
plt.plot(dt * np.arange(steps), np.sum(source(np.arange(steps)), axis=(0,1)))
plt.xlabel('time (sec)')
plt.ylabel('source amplitude')
plt.show()
measure_pos = np.zeros((Nx, Ny, Nz))
measure_pos[-npml-10, Ny//2, Nz//2] = 1
def objective(eps_space):
F.eps_r *= eps_space
measured = []
for t_index in range(steps):
fields = F.forward(Jz=source(t_index))
measured.append(npa.sum(fields['Ez'] * measure_pos))
measured_f = my_fft(npa.array(measured))
spectral_power = npa.square(npa.abs(measured_f))
return spectral_power
eps_space = 1.0
spectral_power = objective(eps_space)
jac_power = jacobian(objective, mode='forward')(eps_space)
jac_power_num = jacobian(objective, mode='numerical')(eps_space)
n_disp = 140
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
delta_f = 1 / steps / dt
freq_x = np.arange(n_disp) * delta_f
ax1.plot(freq_x, spectral_power[:n_disp], 'k-')
ax2.plot(freq_x, jac_power[:n_disp,0], 'g-', label='FMD')
ax2.plot(freq_x, jac_power_num[:n_disp,0], 'bo', label='numerical')
ax1.set_ylabel('spectral power', color='k')
ax2.set_ylabel('dP/depsilon', color='g')
ax2.spines['right'].set_color('g')
ax2.legend()
ax2.tick_params(axis='y', colors='g')
plt.show()
|
fancompute/ceviche
|
examples/autograd_fft.py
|
Python
|
mit
| 2,151
|
[
"Gaussian"
] |
72d2d0f150d914b755fb0ec3a434c8ae48475d709c716263ec3598363b688c90
|
# encoding: utf-8
import copy
import collections
from lkbutils import nodemodel, yamllib
class RedundantRelation(ValueError):
def __init__(self, src, dest, link=None, encoding=u'utf-8'):
self.src = src
self.dest = dest
self.link = link
msg = self.mkmsg(src, dest, link)
super(RedundantRelation, self).__init__(msg.encode(encoding))
def mkmsg(self, src, dest, link):
msg = u'link exists: {} -> {}'.format(src, dest)
if link is not None:
msg += u' on {}'.format(link)
return msg
class InterLink(ValueError):
pass
class Cyclic(ValueError):
def __init__(self, path, relation=u'?', encoding='utf-8'):
self.path = path
self.relation = relation
self.custom_msg = self.mkmsg(path, relation)
super(Cyclic, self).__init__(self.custom_msg.encode(encoding))
def mkmsg(self, path, relation):
return u'cyclic path found on "{}": {}'.format(
relation,
u' -> '.join(path),
)
class RelationChecker(object):
"""
Help create a graph of a single relation under a set of rules.
"""
def __init__(self, relation=None,
dry=False, nointerlinks=False, acyclic=False):
"""
Help create a graph of a single relation under a set of rules.
Options:
* relation: used for RelationChecker.relation property.
* dry: do not duplicate same links.
* noninterlinks: do not create interlinks.
* acyclic: do not create cycle.
"""
self._relation = relation
self._links = collections.defaultdict(list)
self._dry = dry
self._nointerlinks = nointerlinks
self._acyclic = acyclic
@property
def relation(self):
"""Topic relation."""
return self._relation
def add(self, src, dest):
"""Add a link from src to dest."""
if self._dry:
self._check_dry(src, dest)
if self._nointerlinks:
self._check_nointerlinks(src, dest)
if self._acyclic:
self._check_acyclic(src, dest)
self._links[src].append(dest)
return (src, dest)
def iterpairs(self):
"""Iterate over pairs of links."""
links = self._links
for src in links:
for dest in links[src]:
yield src, dest
def _check_dry(self, src, dest):
if dest in self._links[src]:
raise RedundantRelation(src, dest)
def _check_nointerlinks(self, src, dest):
if src in self._links[dest]:
raise InterLink(
u'inverse link found against {} -> {}'.format(src, dest)
)
def _check_acyclic(self, src, dest):
links = copy.deepcopy(self._links)
links[src].append(dest)
def visit(node):
ancestors.append(node)
for linked_node in links[node]:
if linked_node in ancestors:
raise Cyclic(
ancestors + [linked_node],
relation=self.relation,
)
if linked_node not in visited:
visit(linked_node)
ancestors.remove(node)
visited.add(node)
ancestors = []
visited = set()
for node in links.keys():
if node not in visited:
visit(node)
class RelationProvider(object):
"""
Manages a consistent relation graph.
"""
def __init__(self, relation=None,
dry=False, nointerlinks=False, acyclic=False):
"""
Manages a consistent relation graph.
Options:
relation: object used for linking nodes in the
underlying graph model.
dry, nointerlinks, acyclic:
options for connection rules / see RelationChecker.
"""
self._relation = relation
self._graph = self.create_graph()
self._relation_checker = RelationChecker(
relation=relation,
dry=dry, nointerlinks=nointerlinks, acyclic=acyclic,
)
self._options = dict(
dry=dry, nointerlinks=nointerlinks, acyclic=acyclic,
)
@property
def graph(self):
"""Entire nodes graph."""
return self._graph
@property
def relationchecker(self):
"""Proxy to self._relation_checker."""
return self._relation_checker
def create_graph(self):
"""Create an empty node graph."""
return self.depending_library.create_graph()
def add(self, src, dest, src_id=None, dest_id=None):
"""
Add a link from src to dest.
Options:
src_id/dest_id: Used for identifying src/dest nodes,
internally for RelationProvider._relation_checker.
For efficiency & error trace message.
"""
self._check_link(src, dest, src_id=src_id, dest_id=dest_id)
self.link(src, dest)
return (src, dest)
def _check_link(self, src, dest, src_id=None, dest_id=None):
"""Check link validity against the rules."""
if src_id is not None:
src = src_id
if dest_id is not None:
dest = dest_id
self._relation_checker.add(src, dest)
def link(self, src, dest):
"""Create a link with RelationProvider.relation."""
self.depending_library.link(self.graph, src, self._relation, dest)
def serialize(self, nodeprovider=None):
"""Serialize relations information as YAML."""
rel_map = {}
rel_map[u'options'] = self._options
identifier_getter = self._node_identifier_getter(nodeprovider)
rel_map[u'pairs'] = sorted(
[
u'{} {}'.format(
identifier_getter(src), identifier_getter(dest)
)
for src, dest in self.relationchecker.iterpairs()
]
)
return yamllib.fancydump(rel_map)
def _node_identifier_getter(self, nodeprovider):
if nodeprovider is None:
return lambda node: node
else:
def getter(node):
return nodeprovider.get_identifier_from(node)
return getter
class RDFLibRelationProvider(RelationProvider):
"""RelationProvider subclass using rdflib models."""
depending_library = nodemodel.RDFLib()
def noconflict_providers(providers, nodeprovider=None):
"""Check no conflicts exists among relation providers' pair sets."""
checkers = [(p._relation, p._relation_checker) for p in providers]
all_pairs = set()
for rel, checker in checkers:
pairs = set(checker.iterpairs())
redundants = all_pairs.intersection(pairs)
if redundants:
src, dest = redundants.pop()
if nodeprovider is None:
raise RedundantRelation(src, dest, link=rel)
src = nodeprovider.get_origin_name_from(src)
dest = nodeprovider.get_origin_name_from(dest)
link = nodeprovider.get_origin_name_from(rel)
raise RedundantRelation(src, dest, link=link)
else:
all_pairs.update(pairs)
return all_pairs
|
drowse314-dev-ymat/lexical-knowledge-base-for-japanese-civil-law
|
lkbutils/relationprovider/__init__.py
|
Python
|
mit
| 7,314
|
[
"VisIt"
] |
c34f5bfcbe843b7059d39b4cfcbfcc77e35f621879011cc3463f29aab8cce746
|
import gzip
import sys
# the location of the taxonomy files
defaultdir = '/home2/db/taxonomy/current/'
'''
From nodes.dmp
tax_id -- node id in GenBank taxonomy database
parent tax_id -- parent node id in GenBank taxonomy database
rank -- rank of this node (superkingdom, kingdom, ...)
embl code -- locus-name prefix; not unique
division id -- see division.dmp file
inherited div flag (1 or 0) -- 1 if node inherits division from parent
genetic code id -- see gencode.dmp file
inherited GC flag (1 or 0) -- 1 if node inherits genetic code from parent
mitochondrial genetic code id -- see gencode.dmp file
inherited MGC flag (1 or 0) -- 1 if node inherits mitochondrial gencode from parent
GenBank hidden flag (1 or 0) -- 1 if name is suppressed in GenBank entry lineage
hidden subtree root flag (1 or 0) -- 1 if this subtree has no sequence data yet
comments -- free-text comments and citations
'''
class taxonNode:
def __init__(self, t = None, p = None, r = None, e = None, d = None, i = None, gc = None, igc = False, mgc = None, imgc = False, gh = False, hs = False, c=None, *others):
self.parent = p
self.taxid = t
self.rank = r
self.embl = e
self.division = d
self.inherited = i
self.geneticCode = gc
self.inheritedGC = igc
self.mitochondrialGeneticCode = mgc
self.inheritedMitochondrialGeneticCode = imgc
self.GenBankHidden = gh
self.hiddenSubtree = hs
self.comments = c
if len(others) > 0:
print "WARNING: ", p, " :: ", others
'''
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...)
'''
class taxonName:
def __init__(self, t = None, n = None, u = None, nc = None):
self.taxid = t
self.name = n
self.unique = u
self.nameClass = nc
'''
Divisions file (division.dmp):
division id -- taxonomy database division id
division cde -- GenBank division code (three characters)
division name -- e.g. BCT, PLN, VRT, MAM, PRI...
comments
'''
class taxonDivision:
def __init__(self, i = None, c = None, n = None, co = None):
self.divid = i
self.name = n
self.code = c
self.comments = co
def readTaxa():
'''Read the taxonomy tree. An alias for readNodes()'''
return readNodes()
def readNodes():
'''Read the node information from the default location'''
taxa = {}
fin = open(defaultdir+'nodes.dmp', 'r')
for line in fin:
line = line.rstrip('\t|\n')
cols = line.split('\t|\t')
t=taxonNode(*cols)
taxa[cols[0]]=t
fin.close()
return taxa
def extendedNames():
'''
Extended names returns "genbank synonym" and "synonym" as well as
"scientific name" and "blast name". Because we are reading more
names it is slower and consumes more memory
'''
names = {}
blastname={}
genbankname={}
synonym={}
fin = open(defaultdir+'names.dmp', 'r')
for line in fin:
line = line.rstrip('\t|\n')
cols = line.split('\t|\t')
t=taxonName(*cols)
if "scientific name" in cols[3]:
names[cols[0]]=t
elif "blast name" in cols[3]:
blastname[cols[0]]=t
elif "genbank synonym" in cols[3]:
genbankname[cols[0]]=t
elif "synonym" in cols[3]:
synonym[cols[0]]=t
fin.close()
return names, blastname, genbankname, synonym
def readNames():
'''Read the name information from the default location'''
names = {}
blastname={}
fin = open(defaultdir+'names.dmp', 'r')
for line in fin:
line = line.rstrip('\t|\n')
cols = line.split('\t|\t')
t=taxonName(*cols)
if "scientific name" in cols[3]:
names[cols[0]]=t
if "blast name" in cols[3]:
blastname[cols[0]]=t
fin.close()
return names, blastname
def readDivisions():
'''Read the divisions.dmp file'''
divs = {}
fin = open(defaultdir+'division.dmp', 'r')
for line in fin:
line = line.rstrip('\t|\n')
cols = line.split('\t|\t')
t=taxonDivision(*cols)
divs[cols[0]]=t
fin.close()
return divs
def readGiTaxId(dtype='nucl', gz=True):
'''
Read gi_taxid.dmp. You can specify the type of database that you
want to parse, default is nucl (nucleotide), can also accept prot
(protein).
Returns a hash of gi and taxid
'''
if dtype != 'nucl' and dtype != 'prot':
sys.stderr.write("Type must be either nucl or prot, not " + dtype + "\n")
sys.exit(-1)
if gz:
fileIn = defaultdir + "/gi_taxid_" + dtype + ".dmp.gz"
fin = gzip.open(fileIn, 'r')
else:
fileIn = defaultdir + "/gi_taxid_" + dtype + ".dmp"
fin = open(fileIn, 'r')
taxid={}
#fin = gzip.open(fileIn, 'r')
for line in fin:
line = line.strip()
parts=line.split("\t")
taxid[parts[0]]=parts[1]
fin.close()
return taxid
def readTaxIdGi(dtype='nucl'):
'''
Read gi_taxid.dmp. You can specify the type of database that you
want to parse, default is nucl (nucleotide), can also accept prot
(protein).
NOTE: This method returns taxid -> gi not the other way around. This
may be a one -> many mapping (as a single taxid maps to more than
one gi), and so we return a list of gi's for each taxid.
Returns a hash of taxid and gi
'''
if dtype != 'nucl' and dtype != 'prot':
sys.stderr.write("Type must be either nucl or prot, not " + dtype + "\n")
sys.exit(-1)
fileIn = defaultdir + "/gi_taxid_" + dtype + ".dmp.gz"
taxid={}
fin = gzip.open(fileIn, 'r')
for line in fin:
line = line.strip()
parts=line.split("\t")
if parts[1] not in taxid:
taxid[parts[1]]=[]
taxid[parts[1]].append(parts[0])
fin.close()
return taxid
|
dacuevas/bioinformatics
|
ncbi_taxonomy/taxon.py
|
Python
|
mit
| 6,722
|
[
"BLAST"
] |
5468878954cd8636a68a482329917f8acd62c92850dc12d5c7f522f0ebd7a381
|
import sys
import numpy as np
import gzip
import pysam
import operator
import util
NUCLEOTIDES = set(['A', 'C', 'T', 'G'])
SNP_UNDEF = -1
# codes for CIGAR string
BAM_CMATCH = 0 # M - match/mismatch to ref M
BAM_CINS = 1 # I - insertion in read relative to ref
BAM_CDEL = 2 # D - deletion in read relative to ref
BAM_CREF_SKIP = 3 # N - skipped region from reference (e.g. intron)
BAM_CSOFT_CLIP = 4 # S - soft clipping (clipped sequence present in seq)
BAM_CHARD_CLIP = 5 # H - hard clipping (clipped sequence NOT present in seq)
BAM_CPAD = 6 # P - padding (silent deletion from padded reference)
BAM_CEQUAL = 7 # = - sequence match
BAM_CDIFF = 8 # X - sequence mismatch
class SNPTable(object):
def __init__(self):
self.clear()
def clear(self):
# snp_index and indel_index are arrays of length
# max(snp_pos, indel_pos) that provide lookup
# into snp_pos, snp_allele1, etc. by chromosome position.
# For example, if the first and second snps on the chromosome are
# at positions 1234, 1455 then elements 1233 and 1444 of the
# snp_index array will be 0 and 1 (and can be used to lookup
# info for the SNP in snp_pos, snp_allele1, snp_allele2 arrays)
self.snp_index = np.array([], dtype=np.int32)
self.snp_pos = np.array([], dtype=np.int32)
self.snp_allele1 = np.array([], dtype="|S10")
self.snp_allele2 = np.array([], dtype="|S10")
self.haplotypes = None
self.n_snp = 0
self.samples = []
def read_h5(self, snp_tab_h5, snp_index_h5, hap_h5, chrom_name,
samples=None):
"""read in SNPs and indels from HDF5 input files"""
node_name = "/%s" % chrom_name
if node_name not in snp_tab_h5:
sys.stderr.write("WARNING: chromosome %s is not "
"in snp_tab.h5 file, assuming no SNPs "
"for this chromosome\n" % chrom_name)
self.clear()
return
else:
# get numpy array of SNP idices
node = snp_index_h5.getNode(node_name)
self.snp_index = node[:]
# get numpy array of SNP positions
node = snp_tab_h5.getNode(node_name)
self.snp_pos = node[:]['pos']
self.snp_allele1 = node[:]['allele1']
self.snp_allele2 = node[:]['allele2']
self.n_snp = self.snp_pos.shape[0]
self.samples = self.get_h5_samples(hap_h5, chrom_name)
self.haplotypes = hap_h5.getNode(node_name)
if samples:
# reduce set of SNPs and indels to ones that are
# polymorphic in provided list of samples
samp_idx_dict, samp_idx = self.get_h5_sample_indices(hap_h5, chrom_name, samples)
hap_idx = np.empty(samp_idx.shape[0]*2, dtype=np.int)
hap_idx[0::2] = samp_idx*2
hap_idx[1::2] = samp_idx*2 + 1
haps = self.haplotypes[:,hap_idx]
# count number of ref and non-ref alleles,
# ignoring undefined (-1s)
nonref_count = np.apply_along_axis(np.sum, 1, haps == 1)
ref_count = np.apply_along_axis(np.sum, 1, haps == 0)
total_count = nonref_count + ref_count
is_polymorphic = (ref_count > 0) & (ref_count < total_count)
# reduce to set of polymorphic positions
sys.stderr.write("reducing %d SNPs on chromosome "
"%s to %d positions that are polymorphic in "
"sample of %d individuals\n" %
(haps.shape[0], chrom_name,
np.sum(is_polymorphic), len(samples)))
# make filtered and ordered samples for this chromosome
# that corresponds to order of haplotypes
sorted_samps = sorted(samp_idx_dict.items(),
key=operator.itemgetter(1))
self.samples = [x[0] for x in sorted_samps]
self.haplotypes = haps[is_polymorphic,]
self.snp_pos = self.snp_pos[is_polymorphic]
self.snp_allele1 = self.snp_allele1[is_polymorphic]
self.snp_allele2 = self.snp_allele2[is_polymorphic]
self.n_snp = self.snp_pos.shape[0]
# regenerate index to point to reduced set of polymorphic SNPs
self.snp_index[:] = -1
self.snp_index[self.snp_pos-1] = np.arange(self.n_snp,
dtype=np.int32)
def get_h5_samples(self, h5f, chrom_name):
"""Reads list of samples that are present in 'samples' table
from haplotype HDF5 file"""
samples = None
node_name = "/samples_%s" % chrom_name
if node_name in h5f:
node = h5f.getNode(node_name)
samples = [row["name"] for row in node]
else:
raise ValueError("Cannot retrieve haplotypes for "
"specified samples, because haplotype "
"file %s does not contain '%s' table. "
"May need to regenerate haplotype HDF5 file "
"using snp2h5" % (h5f.filename, node_name))
return samples
def get_h5_sample_indices(self, hap_h5, chrom_name, samples):
"""returns the indices of the the specified samples in the
HDF5 haplotype file. Indices are returned in a dictionary
keyed on sample and as an array. Samples that are not
found in the haplotype HDF5 file for the specified chromosome
are not included in the dict or the array."""
hap_samples = self.get_h5_samples(hap_h5, chrom_name)
not_seen_samples = set(samples)
seen_samples = set([])
samp_idx = []
samp_idx_dict = {}
# get haplotype table indices of samples
for i in range(len(hap_samples)):
if hap_samples[i] in seen_samples:
sys.stderr.write("WARNING: sample %s is present multiple "
"times in haplotype table\n" % hap_samples[i])
elif hap_samples[i] in not_seen_samples:
# record index of this sample, add to set of samples
# we have already observed
samp_idx.append(i)
samp_idx_dict[hap_samples[i]] = i
not_seen_samples.remove(hap_samples[i])
seen_samples.add(hap_samples[i])
else:
# this haplotype sample not in requested list
pass
if len(not_seen_samples) > 0:
sys.stderr.write("WARNING: the following samples are not "
"present in haplotype table for chromosome "
"%s: %s" %
(chrom_name, ",".join(not_seen_samples)))
return samp_idx_dict, np.array(samp_idx, dtype=np.int)
def is_snp(self, allele1, allele2):
"""returns True if alleles appear to be
single-nucleotide polymorphism, returns false
if appears to be an indel"""
if (len(allele1) == 1) and (len(allele2) == 1):
if allele1 in NUCLEOTIDES and allele2 in NUCLEOTIDES:
# this is a SNP
return True
else:
if ("-" in allele1) or ("-" in allele2):
# 1bp indel
return False
else:
sys.stderr.write("WARNING: unexpected character "
"in SNP alleles:\n%s/%s\n" %
(allele1, allele2))
return False
return False
def read_file(self, filename):
"""read in SNPs and indels from text input file"""
try:
if util.is_gzipped(filename):
f = gzip.open(filename)
else:
f = open(filename, "r")
except IOError:
sys.stderr.write("WARNING: unable to read from file '%s', "
"assuming no SNPs for this chromosome\n" %
filename)
self.clear()
return
snp_pos_list = []
snp_allele1_list = []
snp_allele2_list = []
max_pos = 0
for line in f:
words = line.split()
if(len(words) < 3):
raise ValueError("expected at least 3 values per SNP "
"file line but got %d:\n"
"%s\n" % (len(words), line))
pos = int(words[0])
a1 = words[1].upper().replace("-", "")
a2 = words[2].upper().replace("-", "")
if pos <= 0:
raise ValueError("expected SNP position to be >= 1:\n%s\n" %
line)
if pos > max_pos:
max_pos = pos
snp_pos_list.append(pos)
snp_allele1_list.append(a1)
snp_allele2_list.append(a2)
f.close()
# convert lists to numpy arrays, which allow for faster
# lookups and use less memory
self.snp_pos = np.array(snp_pos_list, dtype=np.int32)
del snp_pos_list
self.snp_allele1 = np.array(snp_allele1_list, dtype="|S10")
del snp_allele1_list
self.snp_allele2 = np.array(snp_allele2_list, dtype="|S10")
del snp_allele2_list
# make another array that makes it easy to lookup SNPs by their position
# on the chromosome
self.snp_index = np.empty(max_pos, dtype=np.int32)
self.snp_index[:] = SNP_UNDEF
self.snp_index[self.snp_pos-1] = np.arange(self.snp_pos.shape[0])
self.n_snp = self.snp_pos.shape[0]
# currently haplotypes can only be read from HDF5 file
self.haplotypes = None
def get_overlapping_snps(self, read):
"""Returns several lists:
[1] indices of SNPs that this read overlaps,
[2] positions in read sequence that overlap SNPs,
[3] indices for indels that read overlaps,
[4] positions in read sequence that overlap indels.
First base of read is position 1."""
# read.cigar is a list of tuples. Each tuple has two entries. The first
# entry specifies the character in the cigar and the second entry
# specifies the length of that character. The values are
# M BAM_CMATCH 0
# I BAM_CINS 1
# D BAM_CDEL 2
# N BAM_CREF_SKIP 3
# S BAM_CSOFT_CLIP 4
# H BAM_CHARD_CLIP 5
# P BAM_CPAD 6
# = BAM_CEQUAL 7
# X BAM_CDIFF 8
# E.g. (0, 5) means 5 matches, and (4, 2) means a soft clip of 2bp
read_start = 0
read_end = 0
genome_start = read.pos
genome_end = read.pos
# index into combined SNP/indel table for overlapping SNPs
snp_idx = []
# positions in read of overlapping SNPs
snp_read_pos = []
# index into combined SNP/indel table for overlapping indels
indel_idx = []
# positions in read of overlapping SNPs
indel_read_pos = []
for cigar in read.cigar:
op = cigar[0] # CIGAR 'operation'
op_len = cigar[1] # length of operation
if (op == BAM_CMATCH) or (op == BAM_CEQUAL) or (op == BAM_CDIFF):
# match or mismatch to reference
read_start = read_end + 1
read_end = read_start + op_len - 1
genome_start = genome_end + 1
genome_end = genome_start + op_len - 1
# check for SNP in this genome segment
s = genome_start - 1
e = min(genome_end, self.snp_index.shape[0])
s_idx = self.snp_index[s:e]
offsets = np.where(s_idx != SNP_UNDEF)[0]
if offsets.shape[0] > 0:
# there are overlapping SNPs and/or indels
for offset in offsets:
read_pos = offset + read_start
allele1 = self.snp_allele1[s_idx[offset]]
allele2 = self.snp_allele2[s_idx[offset]]
if self.is_snp(allele1, allele2):
snp_idx.append(s_idx[offset])
snp_read_pos.append(read_pos)
else:
indel_idx.append(s_idx[offset])
indel_read_pos.append(read_pos)
elif op == BAM_CINS:
# insert in read relative to reference
read_start = read_end + 1
read_end = read_start + op_len - 1
# Genome sequence does not advance, no possibility
# for read to overlap SNP, since these bases do
# not exist in reference.
# INDELs here should be picked up
# by one of flanking match segments
elif op == BAM_CDEL:
# deletion in read relative to reference
genome_start = genome_end + 1
genome_end = genome_start + op_len - 1
# Read sequence does not advance, no possibility
# for read to overlap SNP, since these bases do
# not exist in read
# in most cases deletion should be picked up
# by flanking match segment, but there could be
# nested indels
s = genome_start - 1
e = min(genome_end, self.snp_index.shape[0])
# check for INDEL in this genome segment
s_idx = self.snp_index[s:e]
offsets = np.where(s_idx != SNP_UNDEF)[0]
if offsets.shape[0] > 0:
# there are overlapping SNPs and/or indels
for offset in offsets:
read_pos = offset + read_start
allele1 = self.snp_allele1[s_idx[offset]]
allele2 = self.snp_allele2[s_idx[offset]]
if self.is_snp(allele1, allele2):
# ignore SNP
pass
else:
indel_idx.append(s_idx[offset])
# position in read is where we last left off
# in read sequence
indel_read_pos.append(read_end)
elif op == BAM_CREF_SKIP:
# section of skipped reference, such as intron
genome_end = genome_end + op_len
genome_start = genome_end
# do nothing with SNPs/indels in this region
# since they are skipped
elif op == BAM_CSOFT_CLIP:
# this part of read skipped
read_start = read_end + 1
read_end = read_start + op_len - 1
# This is like insert, but at end of the read.
# Sequence was not considered in alignment.
# Usually this is because bases at end of read
# were low quality. One option would be to
# pretend soft-clipped part of read was aligned
# like match/mismatch and to consider SNPs in this
# region. We have decided to not consider SNPs
# because this part of read is not actually aligned.
elif op == BAM_CHARD_CLIP:
# these bases not included in read or genome
pass
elif op == BAM_CPAD:
# like an insert, likely only used in multiple-sequence
# alignment where inserts may be of different lengths
# in different seqs
read_start += read_end + 1
read_end = read_start + op_len - 1
else:
raise ValueError("unknown CIGAR code %d" % op)
if read_end != len(read.seq):
raise ValueError("length of read segments in CIGAR %d "
"does not add up to query length (%d)" %
(read_end, len(read.seq)))
return snp_idx, snp_read_pos, indel_idx, indel_read_pos
|
smozaffari/WASP
|
mapping/snptable.py
|
Python
|
apache-2.0
| 16,983
|
[
"pysam"
] |
2716d9802150925f41c2389e9a31afe6d4712b59a55fbb4d91ca84c886d20fb1
|
#!/usr/bin/env python
# @package adjust_timeline
# \author Andy Aschwanden, University of Alaska Fairbanks, USA
# \brief Script adjusts a time axis of a file.
# \details Script adjusts the time axis of a file.
# Say you have monthly climate forcing from 1980-1-1 through 2001-1-1 in
# the forcing file foo_1980-1999.nc to be used with, e.g. -surface_given_file,
# but you want the model to run from 1991-1-1 through 2001-1-1.
#
# Usage:
#
# \verbatim $ adjust_timeline.py --start_date '1991-1-1'
# time_1991-2000.nc \endverbatim
import os
from argparse import ArgumentParser
from dateutil import rrule
from dateutil.parser import parse
import time
import numpy as np
try:
import netCDF4 as netCDF
except:
print "netCDF4 is not installed!"
sys.exit(1)
NC = netCDF.Dataset
from netcdftime import utime, datetime
# Set up the option parser
parser = ArgumentParser()
parser.description = '''Script adjusts the time file with time and time
bounds that can be used to determine to force PISM via command line
option -time_file or adjust the time axis for postprocessing.'''
parser.add_argument("FILE", nargs='*')
parser.add_argument("-p", "--periodicity", dest="periodicity",
help='''periodicity, e.g. monthly, daily, etc. Default=monthly''',
default="monthly")
parser.add_argument("-a", "--start_date", dest="start_date",
help='''Start date in ISO format. Default=1989-1-1''',
default='1989-1-1')
parser.add_argument("-c", "--calendar", dest="calendar",
choices=['standard', 'gregorian', 'no_leap', '365_day', '360_day', 'julian'],
help='''Sets the calendar. Default="standard".''',
default='standard')
parser.add_argument("-i", "--interval_type", dest="interval_type",
choices=['start', 'mid', 'end'],
help='''Defines whether the time values t_k are the end points of the time bounds tb_k or the mid points 1/2*(tb_k -tb_(k-1)). Default="mid".''',
default='mid')
parser.add_argument("-u", "--ref_unit", dest="ref_unit",
help='''Reference unit. Default=days. Use of months or
years is NOT recommended.''', default='days')
parser.add_argument("-d", "--ref_date", dest="ref_date",
help='''Reference date. Default=1960-1-1''',
default='1960-1-1')
options = parser.parse_args()
interval_type = options.interval_type
periodicity = options.periodicity.upper()
start_date = parse(options.start_date)
ref_unit = options.ref_unit
ref_date = options.ref_date
args = options.FILE
infile = args[0]
time1 = time.time()
nc = NC(infile, 'a')
nt = len(nc.variables['time'])
time_units = ("%s since %s" % (ref_unit, ref_date))
time_calendar = options.calendar
cdftime = utime(time_units, time_calendar)
# create a dictionary so that we can supply the periodicity as a
# command-line argument.
pdict = {}
pdict['SECONDLY'] = rrule.SECONDLY
pdict['MINUTELY'] = rrule.MINUTELY
pdict['HOURLY'] = rrule.HOURLY
pdict['DAILY'] = rrule.DAILY
pdict['WEEKLY'] = rrule.WEEKLY
pdict['MONTHLY'] = rrule.MONTHLY
pdict['YEARLY'] = rrule.YEARLY
prule = pdict[periodicity]
# reference date from command-line argument
r = time_units.split(' ')[2].split('-')
refdate = datetime(int(r[0]), int(r[1]), int(r[2]))
# create list with dates from start_date for nt counts
# periodicity prule.
bnds_datelist = list(rrule.rrule(prule, dtstart=start_date, count=nt+1))
# calculate the days since refdate, including refdate, with time being the
bnds_interval_since_refdate = cdftime.date2num(bnds_datelist)
if interval_type == 'mid':
# mid-point value:
# time[n] = (bnds[n] + bnds[n+1]) / 2
time_interval_since_refdate = (bnds_interval_since_refdate[0:-1] +
np.diff(bnds_interval_since_refdate) / 2)
elif interval_type == 'start':
time_interval_since_refdate = bnds_interval_since_refdate[:-1]
else:
time_interval_since_refdate = bnds_interval_since_refdate[1:]
# create a new dimension for bounds only if it does not yet exist
time_dim = "time"
if time_dim not in nc.dimensions.keys():
nc.createDimension(time_dim)
# create a new dimension for bounds only if it does not yet exist
bnds_dim = "nb2"
if bnds_dim not in nc.dimensions.keys():
nc.createDimension(bnds_dim, 2)
# variable names consistent with PISM
time_var_name = "time"
bnds_var_name = "time_bnds"
# create time variable
if time_var_name not in nc.variables:
time_var = nc.createVariable(time_var_name, 'd', dimensions=(time_dim))
else:
time_var = nc.variables[time_var_name]
time_var[:] = time_interval_since_refdate
time_var.bounds = bnds_var_name
time_var.units = time_units
time_var.calendar = time_calendar
time_var.standard_name = time_var_name
time_var.axis = "T"
# create time bounds variable
if bnds_var_name not in nc.variables:
time_bnds_var = nc.createVariable(bnds_var_name, 'd', dimensions=(time_dim, bnds_dim))
else:
time_bnds_var = nc.variables[bnds_var_name]
time_bnds_var[:, 0] = bnds_interval_since_refdate[0:-1]
time_bnds_var[:, 1] = bnds_interval_since_refdate[1::]
# writing global attributes
script_command = ' '.join([time.ctime(), ':', __file__.split('/')[-1],
' '.join([str(x) for x in args])])
nc.history = script_command
nc.Conventions = "CF 1.5"
nc.close()
time2 = time.time()
print('adjust_timeline.py took {}s'.format(time2 - time1))
|
talbrecht/pism_pik
|
util/adjust_timeline.py
|
Python
|
gpl-3.0
| 5,497
|
[
"NetCDF"
] |
babff84294a0bf39f8a9fbfd2e3a69612ff995257c2ac719d7c2c89b2e43b2e3
|
# -*- coding: utf-8 -*-
"""Test grounding."""
import unittest
from unittest import mock
import bioregistry
import pyobo
from pyobo.mocks import get_mock_id_name_mapping
from pybel.constants import (
ANNOTATIONS,
CONCEPT,
GMOD,
IDENTIFIER,
KIND,
MEMBERS,
NAME,
NAMESPACE,
PMOD,
VARIANTS,
)
from pybel.grounding import (
_NAME_REMAPPING,
_process_annotations,
_process_concept,
_process_node,
)
from pybel.language import Entity
def _failer(*_, **__):
"""Fail for all calls to this function."""
raise ValueError("Called a PyOBO function that should be mocked")
pyobo.getters.get = _failer
pyobo.api.names.cached_mapping = _failer
pyobo.api.names.cached_multidict = _failer
mock_id_name_data = {
"mesh": {
"D009474": "Neurons",
"D010300": "Parkinson Disease",
"D013378": "Substantia Nigra",
},
"doid": {
"14330": "Parkinson's disease",
},
"go": {
"0006468": "protein phosphorylation",
},
"complexportal": {
"CPX-1829": "Checkpoint clamp complex",
},
"ncbitaxon": {
"9606": "Homo sapiens",
},
"cl": {
"0000030": "glioblast",
},
"fplx": {
"TAP": "TAP",
"Gamma_secretase": "Gamma_secretase",
},
}
mock_id_name_mapping = get_mock_id_name_mapping(mock_id_name_data)
_mock_mnemonic_data = {
"O60921": "HUS1_HUMAN",
"Q99638": "RAD9A_HUMAN",
"O60671": "RAD1_HUMAN",
}
_mock_reverse_mnemonic_data = {v: k for k, v in _mock_mnemonic_data.items()}
def _mock_get_mnemonic(identifier, *_, **__):
return _mock_mnemonic_data[identifier]
mock_get_mnemonic = mock.patch("pybel.grounding.get_mnemonic", side_effect=_mock_get_mnemonic)
mock_get_id_from_mnemonic = mock.patch(
"pybel.grounding.get_id_from_mnemonic",
side_effect=_mock_reverse_mnemonic_data.get,
)
@mock_id_name_mapping
@mock_get_mnemonic
@mock_get_id_from_mnemonic
class TestProcessConcept(unittest.TestCase):
"""Test the :func:`_process_concept` function."""
def _help(self, expected, original, msg=None):
expected = {CONCEPT: expected}
d = {CONCEPT: original}
self.assertIsNotNone(
bioregistry.normalize_prefix(expected[CONCEPT][NAMESPACE]),
msg="Unrecognized namespace",
)
_process_concept(concept=d[CONCEPT], node=d)
self.assertEqual(expected[CONCEPT], d[CONCEPT], msg=msg)
def test_normalize_prefix_case(self, *_):
"""Test normalizing the prefix to the correct case."""
self._help(
{NAMESPACE: "mesh", NAME: "Neurons", IDENTIFIER: "D009474"},
{NAMESPACE: "MESH", NAME: "Neurons", IDENTIFIER: "D009474"},
)
def test_normalize_prefix_synonym(self, *_):
"""Test normalizing the prefix based on the synonym dictionary."""
self._help(
{NAMESPACE: "mesh", NAME: "Neurons", IDENTIFIER: "D009474"},
{NAMESPACE: "MESHA", NAME: "Neurons", IDENTIFIER: "D009474"},
)
def test_lookup_identifier(self, *_):
"""Test look up of the identifier when given the name."""
self._help(
{NAMESPACE: "mesh", NAME: "Neurons", IDENTIFIER: "D009474"},
{NAMESPACE: "MESH", NAME: "Neurons"},
)
def test_lookup_name_as_identifier(self, *_):
"""Test look up of the name when given as the identifier."""
self._help(
{NAMESPACE: "mesh", NAME: "Neurons", IDENTIFIER: "D009474"},
{NAMESPACE: "MESH", IDENTIFIER: "Neurons"},
)
def test_lookup_uniprot_identifier(self, *_):
"""Test looking up a uniprot identifier."""
self._help(
{NAMESPACE: "uniprot", NAME: "HUS1_HUMAN", IDENTIFIER: "O60921"},
{NAMESPACE: "UniProt", NAME: "HUS1_HUMAN"},
)
def test_fix_uniprot_identifier_as_name(self, *_):
"""Test lookup of the UniProt identifier when given a UniProt identifier as the name."""
self._help(
{NAMESPACE: "uniprot", NAME: "HUS1_HUMAN", IDENTIFIER: "O60921"},
{NAMESPACE: "UniProt", NAME: "O60921"},
)
def test_fix_wrong_name(self, *_):
"""Test overwriting a wrong name (not UniProt)."""
self._help(
{NAMESPACE: "mesh", NAME: "Neurons", IDENTIFIER: "D009474"},
{NAMESPACE: "MESH", NAME: "Nonsense name!", IDENTIFIER: "D009474"},
)
def test_fix_wrong_uniprot_name(self, *_):
"""Test overwriting a wrong name (UniProt)."""
self._help(
{NAMESPACE: "uniprot", NAME: "HUS1_HUMAN", IDENTIFIER: "O60921"},
{NAMESPACE: "UniProt", NAME: "WRONG!!!!", IDENTIFIER: "O60921"},
)
def test_remap_sfam(self, *_):
"""Test remapping SFAM to FPLX."""
self.assertIn(("bel", "TAP Family"), _NAME_REMAPPING)
self._help(
{NAMESPACE: "fplx", NAME: "TAP", IDENTIFIER: "TAP"},
{NAMESPACE: "SFAM", NAME: "TAP Family"},
)
def test_remap_scomp(self, *_):
"""Test remapping SFAM to FPLX."""
self.assertIsNotNone(bioregistry.normalize_prefix("BEL"))
self.assertIn(
("bel", "gamma Secretase Complex"),
_NAME_REMAPPING,
msg="name remapping is not populated properly",
)
self._help(
{NAMESPACE: "fplx", NAME: "Gamma_secretase", IDENTIFIER: "Gamma_secretase"},
{NAMESPACE: "SCOMP", NAME: "gamma Secretase Complex"},
)
@mock_id_name_mapping
@mock_get_mnemonic
@mock_get_id_from_mnemonic
class TestGround(unittest.TestCase):
"""Test grounding."""
def _help(self, expected, result):
_process_node(result)
self.assertEqual(expected, result)
def test_lookup_identifier_member(self, *_):
"""Test looking up the identifier of a member by name."""
self._help(
{
MEMBERS: [
{
CONCEPT: {
NAMESPACE: "mesh",
NAME: "Neurons",
IDENTIFIER: "D009474",
}
}
]
},
{MEMBERS: [{CONCEPT: {NAMESPACE: "MESH", NAME: "Neurons"}}]},
)
def test_lookup_identifier_complex(self, *_):
"""Test looking up the identifier of a named complex and its members at the same time."""
self._help(
{
CONCEPT: {
NAMESPACE: "complexportal",
NAME: "Checkpoint clamp complex",
IDENTIFIER: "CPX-1829",
},
MEMBERS: [
{
CONCEPT: {
NAMESPACE: "uniprot",
NAME: "HUS1_HUMAN",
IDENTIFIER: "O60921",
}
},
{
CONCEPT: {
NAMESPACE: "uniprot",
NAME: "RAD9A_HUMAN",
IDENTIFIER: "Q99638",
}
},
{
CONCEPT: {
NAMESPACE: "uniprot",
NAME: "RAD1_HUMAN",
IDENTIFIER: "O60671",
}
},
],
},
{
CONCEPT: {NAMESPACE: "complexportal", NAME: "Checkpoint clamp complex"},
MEMBERS: [
{CONCEPT: {NAMESPACE: "uniprot", NAME: "HUS1_HUMAN"}},
{CONCEPT: {NAMESPACE: "uniprot", NAME: "RAD9A_HUMAN"}},
{CONCEPT: {NAMESPACE: "uniprot", NAME: "RAD1_HUMAN"}},
],
},
)
def test_lookup_identifier_protein(self, *_):
"""Test looking up the identifier based on a protein's name."""
self._help(
{CONCEPT: {NAMESPACE: "uniprot", NAME: "HUS1_HUMAN", IDENTIFIER: "O60921"}},
{CONCEPT: {NAMESPACE: "uniprot", NAME: "HUS1_HUMAN"}},
)
def test_lookup_name_protein(self, *_):
"""Test looking up the name based on a protein's identifier."""
self._help(
{CONCEPT: {NAMESPACE: "uniprot", NAME: "HUS1_HUMAN", IDENTIFIER: "O60921"}},
{CONCEPT: {NAMESPACE: "uniprot", IDENTIFIER: "O60921"}},
)
def test_fix_name_protein(self, *_):
"""Test fixing a wrong name by overwriting by identifier-based lookup."""
self._help(
{CONCEPT: {NAMESPACE: "uniprot", NAME: "HUS1_HUMAN", IDENTIFIER: "O60921"}},
{CONCEPT: {NAMESPACE: "uniprot", IDENTIFIER: "O60921", NAME: "wrong!!!"}},
)
def test_lookup_identifier_pmod(self, *_):
"""Test looking up a protein modification's identifier by name."""
self._help(
{
CONCEPT: {
NAMESPACE: "uniprot",
NAME: "HUS1_HUMAN",
IDENTIFIER: "O60921",
},
VARIANTS: [
{
KIND: PMOD,
CONCEPT: {
NAMESPACE: "go",
IDENTIFIER: "0006468",
NAME: "protein phosphorylation",
},
},
],
},
{
CONCEPT: {NAMESPACE: "uniprot", NAME: "HUS1_HUMAN"},
VARIANTS: [
{
KIND: PMOD,
CONCEPT: {NAMESPACE: "GO", NAME: "protein phosphorylation"},
},
],
},
)
def test_lookup_name_pmod(self, *_):
"""Test looking up a protein modification's name by identifier."""
self._help(
{
CONCEPT: {
NAMESPACE: "uniprot",
NAME: "HUS1_HUMAN",
IDENTIFIER: "O60921",
},
VARIANTS: [
{
KIND: PMOD,
CONCEPT: {
NAMESPACE: "go",
IDENTIFIER: "0006468",
NAME: "protein phosphorylation",
},
},
],
},
{
CONCEPT: {NAMESPACE: "uniprot", IDENTIFIER: "O60921"},
VARIANTS: [
{
KIND: PMOD,
CONCEPT: {NAMESPACE: "GO", IDENTIFIER: "0006468"},
},
],
},
)
def test_fix_pmod_name(self, *_):
"""Test fixing a wrong name in a pmod."""
self._help(
{
CONCEPT: {
NAMESPACE: "uniprot",
NAME: "HUS1_HUMAN",
IDENTIFIER: "O60921",
},
VARIANTS: [
{
KIND: PMOD,
CONCEPT: {
NAMESPACE: "go",
IDENTIFIER: "0006468",
NAME: "protein phosphorylation",
},
},
],
},
{
CONCEPT: {NAMESPACE: "uniprot", NAME: "HUS1_HUMAN"},
VARIANTS: [
{
KIND: PMOD,
CONCEPT: {
NAMESPACE: "GO",
IDENTIFIER: "0006468",
NAME: "WRONG!",
},
},
],
},
)
def test_normalize_pmod_default(self, *_):
"""Test normalizing a pmod using the default bel namespace."""
self._help(
{
CONCEPT: {
NAMESPACE: "uniprot",
NAME: "HUS1_HUMAN",
IDENTIFIER: "O60921",
},
VARIANTS: [
{
KIND: PMOD,
CONCEPT: {
NAMESPACE: "go",
IDENTIFIER: "0006468",
NAME: "protein phosphorylation",
},
},
],
},
{
CONCEPT: {NAMESPACE: "uniprot", NAME: "HUS1_HUMAN"},
VARIANTS: [
{
KIND: PMOD,
CONCEPT: {NAMESPACE: "bel", NAME: "Ph"},
},
],
},
)
def test_normalize_pmod_default_methylation(self, *_):
"""Test normalizing the default namespace's Me entry because of conflict with gmods."""
self._help(
{
CONCEPT: {
NAMESPACE: "uniprot",
NAME: "HUS1_HUMAN",
IDENTIFIER: "O60921",
},
VARIANTS: [
{
KIND: PMOD,
CONCEPT: {
NAMESPACE: "go",
IDENTIFIER: "0006479",
NAME: "protein methylation",
},
},
],
},
{
CONCEPT: {NAMESPACE: "uniprot", NAME: "HUS1_HUMAN"},
VARIANTS: [
{KIND: PMOD, CONCEPT: {NAMESPACE: "bel", NAME: "Me"}},
],
},
)
def normalize_gmod_default_methylation(self, *_):
"""Test normalizing the default namespace's Me entry because of conflict with pmods."""
self._help(
{
CONCEPT: {NAMESPACE: "hgnc", NAME: "MAPT", IDENTIFIER: "6893"},
VARIANTS: [
{
CONCEPT: {
NAMESPACE: "go",
IDENTIFIER: "0006306",
NAME: "DNA methylation",
},
KIND: GMOD,
},
],
},
{
CONCEPT: {NAMESPACE: "HGNC", NAME: "MAPT"},
VARIANTS: [
{CONCEPT: {NAMESPACE: "bel", NAME: "Me"}, KIND: GMOD},
],
},
)
@mock_id_name_mapping
class TestAnnotations(unittest.TestCase):
"""Test processing annotations."""
def _help(self, expected_data, data):
expected_data = {ANNOTATIONS: expected_data}
data = {ANNOTATIONS: data}
_process_annotations(data)
self.assertEqual(expected_data, data)
def test_lookup_by_identifier(self, *_):
"""Test lookup by identifier."""
self._help(
{"Disease": [Entity(namespace="mesh", identifier="D010300", name="Parkinson Disease")]},
{"Disease": [Entity(namespace="mesh", identifier="D010300")]},
)
def test_lookup_by_name(self, *_):
"""Test lookup by name."""
self._help(
{"Disease": [Entity(namespace="mesh", identifier="D010300", name="Parkinson Disease")]},
{"Disease": [Entity(namespace="mesh", name="Parkinson Disease")]},
)
def test_lookup_by_name_as_identifier(self, *_):
"""Test lookup by name if it's accidentally in the identifier slot."""
self._help(
{"Disease": [Entity(namespace="mesh", identifier="D010300", name="Parkinson Disease")]},
{"Disease": [Entity(namespace="mesh", identifier="Parkinson Disease")]},
)
def test_upgrade_category(self, *_):
"""Test upgrading the category."""
self._help(
{"Disease": [Entity(namespace="mesh", identifier="D010300", name="Parkinson Disease")]},
{"MeSHDisease": [Entity(namespace="mesh", identifier="D010300", name="Parkinson Disease")]},
)
def test_upgrade_category_and_namespace(self, *_):
"""Test upgrading the category and the namespace simultaneously."""
self._help(
{"Disease": [Entity(namespace="mesh", identifier="D010300", name="Parkinson Disease")]},
{
"MeSHDisease": [
Entity(
namespace="MeSHDisease",
identifier="D010300",
name="Parkinson Disease",
)
]
},
)
def test_upgrade_with_name_as_identifier(self, *_):
"""Test upgrading MeSH disease, MeSH anatomy, and Species tags and lookup by name, in the identifiers space."""
self._help(
{ # Expected
"Disease": [Entity(namespace="mesh", identifier="D010300", name="Parkinson Disease")],
"Anatomy": [Entity(namespace="mesh", identifier="D013378", name="Substantia Nigra")],
"Species": [Entity(namespace="ncbitaxon", identifier="9606", name="Homo sapiens")],
},
{ # Original
"MeSHDisease": [Entity(namespace="MeSHDisease", identifier="Parkinson Disease")],
"MeSHAnatomy": [Entity(namespace="MeSHAnatomy", identifier="Substantia Nigra")],
"Species": [Entity(namespace="Species", identifier="Homo sapiens")],
},
)
def test_upgrade_by_identifier(self, *_):
"""Test upgrading and lookup by identifier."""
self._help(
{ # Expected
"Disease": [Entity(namespace="mesh", identifier="D010300", name="Parkinson Disease")],
"Anatomy": [Entity(namespace="mesh", identifier="D013378", name="Substantia Nigra")],
"Species": [Entity(namespace="ncbitaxon", identifier="9606", name="Homo sapiens")],
},
{ # Original
"MeSHDisease": [Entity(namespace="MeSHDisease", identifier="D010300")],
"MeSHAnatomy": [Entity(namespace="MeSHAnatomy", identifier="D013378")],
"Species": [Entity(namespace="Species", identifier="9606")],
},
)
def test_upgrade_by_name(self, *_):
"""Test upgrading and lookup by name."""
self._help(
{ # Expected
"Disease": [Entity(namespace="mesh", identifier="D010300", name="Parkinson Disease")],
"Anatomy": [Entity(namespace="mesh", identifier="D013378", name="Substantia Nigra")],
"Species": [Entity(namespace="ncbitaxon", identifier="9606", name="Homo sapiens")],
},
{ # Original
"MeSHDisease": [Entity(namespace="MeSHDisease", name="Parkinson Disease")],
"MeSHAnatomy": [Entity(namespace="MeSHAnatomy", name="Substantia Nigra")],
"Species": [Entity(namespace="Species", name="Homo sapiens")],
},
)
def test_unmappable_category(self, *_):
"""Test when the category can't be mapped."""
self._help(
{ # Expected
"Custom Annotation": [Entity(namespace="Custom Annotation", identifier="Custom Value")],
},
{
"Custom Annotation": [Entity(namespace="Custom Annotation", identifier="Custom Value")],
},
)
def test_unmappable_identifier(self, *_):
"""Test when the identifier can not be resolved."""
self._help(
{ # Expected
"Disease": [Entity(namespace="doid", identifier="Failure")],
},
{
"Disease": [Entity(namespace="Disease", identifier="Failure")],
},
)
def test_unmappable_name(self, *_):
"""Test when the identifier can not be looked up by name."""
self._help(
{ # Expected
"Disease": [Entity(namespace="doid", name="Failure")],
},
{
"Disease": [Entity(namespace="Disease", name="Failure")],
},
)
|
pybel/pybel
|
tests/test_grounding.py
|
Python
|
mit
| 20,364
|
[
"Pybel"
] |
eb6e14ce4f26aa0701399ba676a228488df767fa6a85b43edd1ca3eeef21f05c
|
from __future__ import unicode_literals
from ..person import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{prefix}} {{first_name}} {{last_name}}',
'{{first_name}} {{last_name}} {{suffix}}',
'{{prefix}} {{first_name}} {{last_name}} {{suffix}}'
)
first_names = (
'Aaliyah', 'Aaron', 'Abagail', 'Abbey', 'Abbie', 'Abbigail', 'Abby', 'Abdiel', 'Abdul', 'Abdullah', 'Abe',
'Abel', 'Abelardo', 'Abigail', 'Abigale', 'Abigayle', 'Abner', 'Abraham', 'Ada', 'Adah', 'Adalberto', 'Adaline',
'Adam', 'Adan', 'Addie', 'Addison', 'Adela', 'Adelbert', 'Adele', 'Adelia', 'Adeline', 'Adell', 'Adella',
'Adelle', 'Aditya', 'Adolf', 'Adolfo', 'Adolph', 'Adolphus', 'Adonis', 'Adrain', 'Adrian', 'Adriana',
'Adrianna', 'Adriel', 'Adrien', 'Adrienne', 'Afton', 'Aglae', 'Agnes', 'Agustin', 'Agustina', 'Ahmad', 'Ahmed',
'Aida', 'Aidan', 'Aiden', 'Aileen', 'Aimee', 'Aisha', 'Aiyana', 'Akeem', 'Al', 'Alaina', 'Alan', 'Alana',
'Alanis', 'Alanna', 'Alayna', 'Alba', 'Albert', 'Alberta', 'Albertha', 'Alberto', 'Albin', 'Albina', 'Alda',
'Alden', 'Alec', 'Aleen', 'Alejandra', 'Alejandrin', 'Alek', 'Alena', 'Alene', 'Alessandra', 'Alessandro',
'Alessia', 'Aletha', 'Alex', 'Alexa', 'Alexander', 'Alexandra', 'Alexandre', 'Alexandrea', 'Alexandria',
'Alexandrine', 'Alexandro', 'Alexane', 'Alexanne', 'Alexie', 'Alexis', 'Alexys', 'Alexzander', 'Alf', 'Alfonso',
'Alfonzo', 'Alford', 'Alfred', 'Alfreda', 'Alfredo', 'Ali', 'Alia', 'Alice', 'Alicia', 'Alisa', 'Alisha',
'Alison', 'Alivia', 'Aliya', 'Aliyah', 'Aliza', 'Alize', 'Allan', 'Allen', 'Allene', 'Allie', 'Allison', 'Ally',
'Alphonso', 'Alta', 'Althea', 'Alva', 'Alvah', 'Alvena', 'Alvera', 'Alverta', 'Alvina', 'Alvis', 'Alyce',
'Alycia', 'Alysa', 'Alysha', 'Alyson', 'Alysson', 'Amalia', 'Amanda', 'Amani', 'Amara', 'Amari', 'Amaya',
'Amber', 'Ambrose', 'Amelia', 'Amelie', 'Amely', 'America', 'Americo', 'Amie', 'Amina', 'Amir', 'Amira',
'Amiya', 'Amos', 'Amparo', 'Amy', 'Amya', 'Ana', 'Anabel', 'Anabelle', 'Anahi', 'Anais', 'Anastacio',
'Anastasia', 'Anderson', 'Andre', 'Andreane', 'Andreanne', 'Andres', 'Andrew', 'Andy', 'Angel', 'Angela',
'Angelica', 'Angelina', 'Angeline', 'Angelita', 'Angelo', 'Angie', 'Angus', 'Anibal', 'Anika', 'Anissa',
'Anita', 'Aniya', 'Aniyah', 'Anjali', 'Anna', 'Annabel', 'Annabell', 'Annabelle', 'Annalise', 'Annamae',
'Annamarie', 'Anne', 'Annetta', 'Annette', 'Annie', 'Ansel', 'Ansley', 'Anthony', 'Antoinette', 'Antone',
'Antonetta', 'Antonette', 'Antonia', 'Antonietta', 'Antonina', 'Antonio', 'Antwan', 'Antwon', 'Anya', 'April',
'Ara', 'Araceli', 'Aracely', 'Arch', 'Archibald', 'Ardella', 'Arden', 'Ardith', 'Arely', 'Ari', 'Ariane',
'Arianna', 'Aric', 'Ariel', 'Arielle', 'Arjun', 'Arlene', 'Arlie', 'Arlo', 'Armand', 'Armando', 'Armani',
'Arnaldo', 'Arne', 'Arno', 'Arnold', 'Arnoldo', 'Arnulfo', 'Aron', 'Art', 'Arthur', 'Arturo', 'Arvel', 'Arvid',
'Arvilla', 'Aryanna', 'Asa', 'Asha', 'Ashlee', 'Ashleigh', 'Ashley', 'Ashly', 'Ashlynn', 'Ashton', 'Ashtyn',
'Asia', 'Assunta', 'Astrid', 'Athena', 'Aubree', 'Aubrey', 'Audie', 'Audra', 'Audreanne', 'Audrey', 'August',
'Augusta', 'Augustine', 'Augustus', 'Aurelia', 'Aurelie', 'Aurelio', 'Aurore', 'Austen', 'Austin', 'Austyn',
'Autumn', 'Ava', 'Avery', 'Avis', 'Axel', 'Ayana', 'Ayden', 'Ayla', 'Aylin',
'Baby', 'Bailee', 'Bailey', 'Barbara', 'Barney', 'Baron', 'Barrett', 'Barry', 'Bart', 'Bartholome', 'Barton',
'Baylee', 'Beatrice', 'Beau', 'Beaulah', 'Bell', 'Bella', 'Belle', 'Ben', 'Benedict', 'Benjamin', 'Bennett',
'Bennie', 'Benny', 'Benton', 'Berenice', 'Bernadette', 'Bernadine', 'Bernard', 'Bernardo', 'Berneice',
'Bernhard', 'Bernice', 'Bernie', 'Berniece', 'Bernita', 'Berry', 'Bert', 'Berta', 'Bertha', 'Bertram',
'Bertrand', 'Beryl', 'Bessie', 'Beth', 'Bethany', 'Bethel', 'Betsy', 'Bette', 'Bettie', 'Betty', 'Bettye',
'Beulah', 'Beverly', 'Bianka', 'Bill', 'Billie', 'Billy', 'Birdie', 'Blair', 'Blaise', 'Blake', 'Blanca',
'Blanche', 'Blaze', 'Bo', 'Bobbie', 'Bobby', 'Bonita', 'Bonnie', 'Boris', 'Boyd', 'Brad', 'Braden', 'Bradford',
'Bradley', 'Bradly', 'Brady', 'Braeden', 'Brain', 'Brandi', 'Brando', 'Brandon', 'Brandt', 'Brandy', 'Brandyn',
'Brannon', 'Branson', 'Brant', 'Braulio', 'Braxton', 'Brayan', 'Breana', 'Breanna', 'Breanne', 'Brenda',
'Brendan', 'Brenden', 'Brendon', 'Brenna', 'Brennan', 'Brennon', 'Brent', 'Bret', 'Brett', 'Bria', 'Brian',
'Briana', 'Brianne', 'Brice', 'Bridget', 'Bridgette', 'Bridie', 'Brielle', 'Brigitte', 'Brionna', 'Brisa',
'Britney', 'Brittany', 'Brock', 'Broderick', 'Brody', 'Brook', 'Brooke', 'Brooklyn', 'Brooks', 'Brown', 'Bruce',
'Bryana', 'Bryce', 'Brycen', 'Bryon', 'Buck', 'Bud', 'Buddy', 'Buford', 'Bulah', 'Burdette', 'Burley',
'Burnice', 'Buster',
'Cade', 'Caden', 'Caesar', 'Caitlyn', 'Cale', 'Caleb', 'Caleigh', 'Cali', 'Calista', 'Callie', 'Camden',
'Cameron', 'Camila', 'Camilla', 'Camille', 'Camren', 'Camron', 'Camryn', 'Camylle', 'Candace', 'Candelario',
'Candice', 'Candida', 'Candido', 'Cara', 'Carey', 'Carissa', 'Carlee', 'Carleton', 'Carley', 'Carli', 'Carlie',
'Carlo', 'Carlos', 'Carlotta', 'Carmel', 'Carmela', 'Carmella', 'Carmelo', 'Carmen', 'Carmine', 'Carol',
'Carolanne', 'Carole', 'Carolina', 'Caroline', 'Carolyn', 'Carolyne', 'Carrie', 'Carroll', 'Carson', 'Carter',
'Cary', 'Casandra', 'Casey', 'Casimer', 'Casimir', 'Casper', 'Cassandra', 'Cassandre', 'Cassidy', 'Cassie',
'Catalina', 'Caterina', 'Catharine', 'Catherine', 'Cathrine', 'Cathryn', 'Cathy', 'Cayla', 'Ceasar', 'Cecelia',
'Cecil', 'Cecile', 'Cecilia', 'Cedrick', 'Celestine', 'Celestino', 'Celia', 'Celine', 'Cesar', 'Chad', 'Chadd',
'Chadrick', 'Chaim', 'Chance', 'Chandler', 'Chanel', 'Chanelle', 'Charity', 'Charlene', 'Charles', 'Charley',
'Charlie', 'Charlotte', 'Chase', 'Chasity', 'Chauncey', 'Chaya', 'Chaz', 'Chelsea', 'Chelsey', 'Chelsie',
'Chesley', 'Chester', 'Chet', 'Cheyanne', 'Cheyenne', 'Chloe', 'Chris', 'Christ', 'Christa', 'Christelle',
'Christian', 'Christiana', 'Christina', 'Christine', 'Christop', 'Christophe', 'Christopher', 'Christy',
'Chyna', 'Ciara', 'Cicero', 'Cielo', 'Cierra', 'Cindy', 'Citlalli', 'Clair', 'Claire', 'Clara', 'Clarabelle',
'Clare', 'Clarissa', 'Clark', 'Claud', 'Claude', 'Claudia', 'Claudie', 'Claudine', 'Clay', 'Clemens', 'Clement',
'Clementina', 'Clementine', 'Clemmie', 'Cleo', 'Cleora', 'Cleta', 'Cletus', 'Cleve', 'Cleveland', 'Clifford',
'Clifton', 'Clint', 'Clinton', 'Clotilde', 'Clovis', 'Cloyd', 'Clyde', 'Coby', 'Cody', 'Colby', 'Cole',
'Coleman', 'Colin', 'Colleen', 'Collin', 'Colt', 'Colten', 'Colton', 'Columbus', 'Concepcion', 'Conner',
'Connie', 'Connor', 'Conor', 'Conrad', 'Constance', 'Constantin', 'Consuelo', 'Cooper', 'Cora', 'Coralie',
'Corbin', 'Cordelia', 'Cordell', 'Cordia', 'Cordie', 'Corene', 'Corine', 'Cornelius', 'Cornell', 'Corrine',
'Cortez', 'Cortney', 'Cory', 'Coty', 'Courtney', 'Coy', 'Craig', 'Crawford', 'Creola', 'Cristal', 'Cristian',
'Cristina', 'Cristobal', 'Cristopher', 'Cruz', 'Crystal', 'Crystel', 'Cullen', 'Curt', 'Curtis', 'Cydney',
'Cynthia', 'Cyril', 'Cyrus',
'Dagmar', 'Dahlia', 'Daija', 'Daisha', 'Daisy', 'Dakota', 'Dale', 'Dallas', 'Dallin', 'Dalton', 'Damaris',
'Dameon', 'Damian', 'Damien', 'Damion', 'Damon', 'Dan', 'Dana', 'Dandre', 'Dane', 'D\'angelo', 'Dangelo',
'Danial', 'Daniela', 'Daniella', 'Danielle', 'Danika', 'Dannie', 'Danny', 'Dante', 'Danyka', 'Daphne',
'Daphnee', 'Daphney', 'Darby', 'Daren', 'Darian', 'Dariana', 'Darien', 'Dario', 'Darion', 'Darius', 'Darlene',
'Daron', 'Darrel', 'Darrell', 'Darren', 'Darrick', 'Darrin', 'Darrion', 'Darron', 'Darryl', 'Darwin', 'Daryl',
'Dashawn', 'Dasia', 'Dave', 'David', 'Davin', 'Davion', 'Davon', 'Davonte', 'Dawn', 'Dawson', 'Dax', 'Dayana',
'Dayna', 'Dayne', 'Dayton', 'Dean', 'Deangelo', 'Deanna', 'Deborah', 'Declan', 'Dedric', 'Dedrick', 'Dee',
'Deion', 'Deja', 'Dejah', 'Dejon', 'Dejuan', 'Delaney', 'Delbert', 'Delfina', 'Delia', 'Delilah', 'Dell',
'Della', 'Delmer', 'Delores', 'Delpha', 'Delphia', 'Delphine', 'Delta', 'Demarco', 'Demarcus', 'Demario',
'Demetris', 'Demetrius', 'Demond', 'Dena', 'Denis', 'Dennis', 'Deon', 'Deondre', 'Deontae', 'Deonte', 'Dereck',
'Derek', 'Derick', 'Deron', 'Derrick', 'Deshaun', 'Deshawn', 'Desiree', 'Desmond', 'Dessie', 'Destany',
'Destin', 'Destinee', 'Destiney', 'Destini', 'Destiny', 'Devan', 'Devante', 'Deven', 'Devin', 'Devon',
'Devonte', 'Devyn', 'Dewayne', 'Dewitt', 'Dexter', 'Diamond', 'Diana', 'Dianna', 'Diego', 'Dillan', 'Dillon',
'Dimitri', 'Dina', 'Dino', 'Dion', 'Dixie', 'Dock', 'Dolly', 'Dolores', 'Domenic', 'Domenica', 'Domenick',
'Domenico', 'Domingo', 'Dominic', 'Dominique', 'Don', 'Donald', 'Donato', 'Donavon', 'Donna', 'Donnell',
'Donnie', 'Donny', 'Dora', 'Dorcas', 'Dorian', 'Doris', 'Dorothea', 'Dorothy', 'Dorris', 'Dortha', 'Dorthy',
'Doug', 'Douglas', 'Dovie', 'Doyle', 'Drake', 'Drew', 'Duane', 'Dudley', 'Dulce', 'Duncan', 'Durward', 'Dustin',
'Dusty', 'Dwight', 'Dylan',
'Earl', 'Earlene', 'Earline', 'Earnest', 'Earnestine', 'Easter', 'Easton', 'Ebba', 'Ebony', 'Ed', 'Eda', 'Edd',
'Eddie', 'Eden', 'Edgar', 'Edgardo', 'Edison', 'Edmond', 'Edmund', 'Edna', 'Eduardo', 'Edward', 'Edwardo',
'Edwin', 'Edwina', 'Edyth', 'Edythe', 'Effie', 'Efrain', 'Efren', 'Eileen', 'Einar', 'Eino', 'Eladio', 'Elaina',
'Elbert', 'Elda', 'Eldon', 'Eldora', 'Eldred', 'Eldridge', 'Eleanora', 'Eleanore', 'Eleazar', 'Electa', 'Elena',
'Elenor', 'Elenora', 'Eleonore', 'Elfrieda', 'Eli', 'Elian', 'Eliane', 'Elias', 'Eliezer', 'Elijah', 'Elinor',
'Elinore', 'Elisa', 'Elisabeth', 'Elise', 'Eliseo', 'Elisha', 'Elissa', 'Eliza', 'Elizabeth', 'Ella', 'Ellen',
'Ellie', 'Elliot', 'Elliott', 'Ellis', 'Ellsworth', 'Elmer', 'Elmira', 'Elmo', 'Elmore', 'Elna', 'Elnora',
'Elody', 'Eloisa', 'Eloise', 'Elouise', 'Eloy', 'Elroy', 'Elsa', 'Else', 'Elsie', 'Elta', 'Elton', 'Elva',
'Elvera', 'Elvie', 'Elvis', 'Elwin', 'Elwyn', 'Elyse', 'Elyssa', 'Elza', 'Emanuel', 'Emelia', 'Emelie', 'Emely',
'Emerald', 'Emerson', 'Emery', 'Emie', 'Emil', 'Emile', 'Emilia', 'Emiliano', 'Emilie', 'Emilio', 'Emily',
'Emma', 'Emmalee', 'Emmanuel', 'Emmanuelle', 'Emmet', 'Emmett', 'Emmie', 'Emmitt', 'Emmy', 'Emory', 'Ena',
'Enid', 'Enoch', 'Enola', 'Enos', 'Enrico', 'Enrique', 'Ephraim', 'Era', 'Eriberto', 'Eric', 'Erica', 'Erich',
'Erick', 'Ericka', 'Erik', 'Erika', 'Erin', 'Erling', 'Erna', 'Ernest', 'Ernestina', 'Ernestine', 'Ernesto',
'Ernie', 'Ervin', 'Erwin', 'Eryn', 'Esmeralda', 'Esperanza', 'Esta', 'Esteban', 'Estefania', 'Estel', 'Estell',
'Estella', 'Estelle', 'Estevan', 'Esther', 'Estrella', 'Etha', 'Ethan', 'Ethel', 'Ethelyn', 'Ethyl', 'Ettie',
'Eudora', 'Eugene', 'Eugenia', 'Eula', 'Eulah', 'Eulalia', 'Euna', 'Eunice', 'Eusebio', 'Eva', 'Evalyn', 'Evan',
'Evangeline', 'Evans', 'Eve', 'Eveline', 'Evelyn', 'Everardo', 'Everett', 'Everette', 'Evert', 'Evie', 'Ewald',
'Ewell', 'Ezekiel', 'Ezequiel', 'Ezra',
'Fabian', 'Fabiola', 'Fae', 'Fannie', 'Fanny', 'Fatima', 'Faustino', 'Fausto', 'Favian', 'Fay', 'Faye',
'Federico', 'Felicia', 'Felicita', 'Felicity', 'Felipa', 'Felipe', 'Felix', 'Felton', 'Fermin', 'Fern',
'Fernando', 'Ferne', 'Fidel', 'Filiberto', 'Filomena', 'Finn', 'Fiona', 'Flavie', 'Flavio', 'Fleta', 'Fletcher',
'Flo', 'Florence', 'Florencio', 'Florian', 'Florida', 'Florine', 'Flossie', 'Floy', 'Floyd', 'Ford', 'Forest',
'Forrest', 'Foster', 'Frances', 'Francesca', 'Francesco', 'Francis', 'Francisca', 'Francisco', 'Franco',
'Frank', 'Frankie', 'Franz', 'Fred', 'Freda', 'Freddie', 'Freddy', 'Frederic', 'Frederick', 'Frederik',
'Frederique', 'Fredrick', 'Fredy', 'Freeda', 'Freeman', 'Freida', 'Frida', 'Frieda', 'Friedrich', 'Fritz',
'Furman',
'Gabe', 'Gabriel', 'Gabriella', 'Gabrielle', 'Gaetano', 'Gage', 'Gail', 'Gardner', 'Garett', 'Garfield',
'Garland', 'Garnet', 'Garnett', 'Garret', 'Garrett', 'Garrick', 'Garrison', 'Garry', 'Garth', 'Gaston', 'Gavin',
'Gay', 'Gayle', 'Gaylord', 'Gene', 'General', 'Genesis', 'Genevieve', 'Gennaro', 'Genoveva', 'Geo', 'Geoffrey',
'George', 'Georgette', 'Georgiana', 'Georgianna', 'Geovanni', 'Geovanny', 'Geovany', 'Gerald', 'Geraldine',
'Gerard', 'Gerardo', 'Gerda', 'Gerhard', 'Germaine', 'German', 'Gerry', 'Gerson', 'Gertrude', 'Gia', 'Gianni',
'Gideon', 'Gilbert', 'Gilberto', 'Gilda', 'Giles', 'Gillian', 'Gina', 'Gino', 'Giovani', 'Giovanna', 'Giovanni',
'Giovanny', 'Gisselle', 'Giuseppe', 'Gladyce', 'Gladys', 'Glen', 'Glenda', 'Glenna', 'Glennie', 'Gloria',
'Godfrey', 'Golda', 'Golden', 'Gonzalo', 'Gordon', 'Grace', 'Gracie', 'Graciela', 'Grady', 'Graham', 'Grant',
'Granville', 'Grayce', 'Grayson', 'Green', 'Greg', 'Gregg', 'Gregoria', 'Gregorio', 'Gregory', 'Greta',
'Gretchen', 'Greyson', 'Griffin', 'Grover', 'Guadalupe', 'Gudrun', 'Guido', 'Guillermo', 'Guiseppe', 'Gunnar',
'Gunner', 'Gus', 'Gussie', 'Gust', 'Gustave', 'Guy', 'Gwen', 'Gwendolyn',
'Hadley', 'Hailee', 'Hailey', 'Hailie', 'Hal', 'Haleigh', 'Haley', 'Halie', 'Halle', 'Hallie', 'Hank', 'Hanna',
'Hannah', 'Hans', 'Hardy', 'Harley', 'Harmon', 'Harmony', 'Harold', 'Harrison', 'Harry', 'Harvey', 'Haskell',
'Hassan', 'Hassie', 'Hattie', 'Haven', 'Hayden', 'Haylee', 'Hayley', 'Haylie', 'Hazel', 'Hazle', 'Heath',
'Heather', 'Heaven', 'Heber', 'Hector', 'Heidi', 'Helen', 'Helena', 'Helene', 'Helga', 'Hellen', 'Helmer',
'Heloise', 'Henderson', 'Henri', 'Henriette', 'Henry', 'Herbert', 'Herman', 'Hermann', 'Hermina', 'Herminia',
'Herminio', 'Hershel', 'Herta', 'Hertha', 'Hester', 'Hettie', 'Hilario', 'Hilbert', 'Hilda', 'Hildegard',
'Hillard', 'Hillary', 'Hilma', 'Hilton', 'Hipolito', 'Hiram', 'Hobart', 'Holden', 'Hollie', 'Hollis', 'Holly',
'Hope', 'Horace', 'Horacio', 'Hortense', 'Hosea', 'Houston', 'Howard', 'Howell', 'Hoyt', 'Hubert', 'Hudson',
'Hugh', 'Hulda', 'Humberto', 'Hunter', 'Hyman',
'Ian', 'Ibrahim', 'Icie', 'Ida', 'Idell', 'Idella', 'Ignacio', 'Ignatius', 'Ike', 'Ila', 'Ilene', 'Iliana',
'Ima', 'Imani', 'Imelda', 'Immanuel', 'Imogene', 'Ines', 'Irma', 'Irving', 'Irwin', 'Isaac', 'Isabel',
'Isabell', 'Isabella', 'Isabelle', 'Isac', 'Isadore', 'Isai', 'Isaiah', 'Isaias', 'Isidro', 'Ismael', 'Isobel',
'Isom', 'Israel', 'Issac', 'Itzel', 'Iva', 'Ivah', 'Ivory', 'Ivy', 'Izabella', 'Izaiah',
'Jabari', 'Jace', 'Jacey', 'Jacinthe', 'Jacinto', 'Jack', 'Jackeline', 'Jackie', 'Jacklyn', 'Jackson', 'Jacky',
'Jaclyn', 'Jacquelyn', 'Jacques', 'Jacynthe', 'Jada', 'Jade', 'Jaden', 'Jadon', 'Jadyn', 'Jaeden', 'Jaida',
'Jaiden', 'Jailyn', 'Jaime', 'Jairo', 'Jakayla', 'Jake', 'Jakob', 'Jaleel', 'Jalen', 'Jalon', 'Jalyn', 'Jamaal',
'Jamal', 'Jamar', 'Jamarcus', 'Jamel', 'Jameson', 'Jamey', 'Jamie', 'Jamil', 'Jamir', 'Jamison', 'Jammie',
'Jan', 'Jana', 'Janae', 'Jane', 'Janelle', 'Janessa', 'Janet', 'Janice', 'Janick', 'Janie', 'Janis', 'Janiya',
'Jannie', 'Jany', 'Jaquan', 'Jaquelin', 'Jaqueline', 'Jared', 'Jaren', 'Jarod', 'Jaron', 'Jarred', 'Jarrell',
'Jarret', 'Jarrett', 'Jarrod', 'Jarvis', 'Jasen', 'Jasmin', 'Jason', 'Jasper', 'Jaunita', 'Javier', 'Javon',
'Javonte', 'Jay', 'Jayce', 'Jaycee', 'Jayda', 'Jayde', 'Jayden', 'Jaydon', 'Jaylan', 'Jaylen', 'Jaylin',
'Jaylon', 'Jayme', 'Jayne', 'Jayson', 'Jazlyn', 'Jazmin', 'Jazmyn', 'Jazmyne', 'Jean', 'Jeanette', 'Jeanie',
'Jeanne', 'Jed', 'Jedediah', 'Jedidiah', 'Jeff', 'Jefferey', 'Jeffery', 'Jeffrey', 'Jeffry', 'Jena', 'Jenifer',
'Jennie', 'Jennifer', 'Jennings', 'Jennyfer', 'Jensen', 'Jerad', 'Jerald', 'Jeramie', 'Jeramy', 'Jerel',
'Jeremie', 'Jeremy', 'Jermain', 'Jermaine', 'Jermey', 'Jerod', 'Jerome', 'Jeromy', 'Jerrell', 'Jerrod',
'Jerrold', 'Jerry', 'Jess', 'Jesse', 'Jessica', 'Jessie', 'Jessika', 'Jessy', 'Jessyca', 'Jesus', 'Jett',
'Jettie', 'Jevon', 'Jewel', 'Jewell', 'Jillian', 'Jimmie', 'Jimmy', 'Jo', 'Joan', 'Joana', 'Joanie', 'Joanne',
'Joannie', 'Joanny', 'Joany', 'Joaquin', 'Jocelyn', 'Jodie', 'Jody', 'Joe', 'Joel', 'Joelle', 'Joesph', 'Joey',
'Johan', 'Johann', 'Johanna', 'Johathan', 'John', 'Johnathan', 'Johnathon', 'Johnnie', 'Johnny', 'Johnpaul',
'Johnson', 'Jolie', 'Jon', 'Jonas', 'Jonatan', 'Jonathan', 'Jonathon', 'Jordan', 'Jordane', 'Jordi', 'Jordon',
'Jordy', 'Jordyn', 'Jorge', 'Jose', 'Josefa', 'Josefina', 'Joseph', 'Josephine', 'Josh', 'Joshua', 'Joshuah',
'Josiah', 'Josiane', 'Josianne', 'Josie', 'Josue', 'Jovan', 'Jovani', 'Jovanny', 'Jovany', 'Joy', 'Joyce',
'Juana', 'Juanita', 'Judah', 'Judd', 'Jude', 'Judge', 'Judson', 'Judy', 'Jules', 'Julia', 'Julian', 'Juliana',
'Julianne', 'Julie', 'Julien', 'Juliet', 'Julio', 'Julius', 'June', 'Junior', 'Junius', 'Justen', 'Justice',
'Justina', 'Justine', 'Juston', 'Justus', 'Justyn', 'Juvenal', 'Juwan',
'Kacey', 'Kaci', 'Kacie', 'Kade', 'Kaden', 'Kadin', 'Kaela', 'Kaelyn', 'Kaia', 'Kailee', 'Kailey', 'Kailyn',
'Kaitlin', 'Kaitlyn', 'Kale', 'Kaleb', 'Kaleigh', 'Kaley', 'Kali', 'Kallie', 'Kameron', 'Kamille', 'Kamren',
'Kamron', 'Kamryn', 'Kane', 'Kara', 'Kareem', 'Karelle', 'Karen', 'Kari', 'Kariane', 'Karianne', 'Karina',
'Karine', 'Karl', 'Karlee', 'Karley', 'Karli', 'Karlie', 'Karolann', 'Karson', 'Kasandra', 'Kasey', 'Kassandra',
'Katarina', 'Katelin', 'Katelyn', 'Katelynn', 'Katharina', 'Katherine', 'Katheryn', 'Kathleen', 'Kathlyn',
'Kathryn', 'Kathryne', 'Katlyn', 'Katlynn', 'Katrina', 'Katrine', 'Kattie', 'Kavon', 'Kay', 'Kaya', 'Kaycee',
'Kayden', 'Kayla', 'Kaylah', 'Kaylee', 'Kayleigh', 'Kayley', 'Kayli', 'Kaylie', 'Kaylin', 'Keagan', 'Keanu',
'Keara', 'Keaton', 'Keegan', 'Keeley', 'Keely', 'Keenan', 'Keira', 'Keith', 'Kellen', 'Kelley', 'Kelli',
'Kellie', 'Kelly', 'Kelsi', 'Kelsie', 'Kelton', 'Kelvin', 'Ken', 'Kendall', 'Kendra', 'Kendrick', 'Kenna',
'Kennedi', 'Kennedy', 'Kenneth', 'Kennith', 'Kenny', 'Kenton', 'Kenya', 'Kenyatta', 'Kenyon', 'Keon', 'Keshaun',
'Keshawn', 'Keven', 'Kevin', 'Kevon', 'Keyon', 'Keyshawn', 'Khalid', 'Khalil', 'Kian', 'Kiana', 'Kianna',
'Kiara', 'Kiarra', 'Kiel', 'Kiera', 'Kieran', 'Kiley', 'Kim', 'Kimberly', 'King', 'Kip', 'Kira', 'Kirk',
'Kirsten', 'Kirstin', 'Kitty', 'Kobe', 'Koby', 'Kody', 'Kolby', 'Kole', 'Korbin', 'Korey', 'Kory', 'Kraig',
'Kris', 'Krista', 'Kristian', 'Kristin', 'Kristina', 'Kristofer', 'Kristoffer', 'Kristopher', 'Kristy',
'Krystal', 'Krystel', 'Krystina', 'Kurt', 'Kurtis', 'Kyla', 'Kyle', 'Kylee', 'Kyleigh', 'Kyler', 'Kylie',
'Kyra',
'Lacey', 'Lacy', 'Ladarius', 'Lafayette', 'Laila', 'Laisha', 'Lamar', 'Lambert', 'Lamont', 'Lance', 'Landen',
'Lane', 'Laney', 'Larissa', 'Laron', 'Larry', 'Larue', 'Laura', 'Laurel', 'Lauren', 'Laurence', 'Lauretta',
'Lauriane', 'Laurianne', 'Laurie', 'Laurine', 'Laury', 'Lauryn', 'Lavada', 'Lavern', 'Laverna', 'Laverne',
'Lavina', 'Lavinia', 'Lavon', 'Lavonne', 'Lawrence', 'Lawson', 'Layla', 'Layne', 'Lazaro', 'Lea', 'Leann',
'Leanna', 'Leanne', 'Leatha', 'Leda', 'Lee', 'Leif', 'Leila', 'Leilani', 'Lela', 'Lelah', 'Leland', 'Lelia',
'Lempi', 'Lemuel', 'Lenna', 'Lennie', 'Lenny', 'Lenora', 'Lenore', 'Leo', 'Leola', 'Leon', 'Leonard',
'Leonardo', 'Leone', 'Leonel', 'Leonie', 'Leonor', 'Leonora', 'Leopold', 'Leopoldo', 'Leora', 'Lera', 'Lesley',
'Leslie', 'Lesly', 'Lessie', 'Lester', 'Leta', 'Letha', 'Letitia', 'Levi', 'Lew', 'Lewis', 'Lexi', 'Lexie',
'Lexus', 'Lia', 'Liam', 'Liana', 'Libbie', 'Libby', 'Lila', 'Lilian', 'Liliana', 'Liliane', 'Lilla', 'Lillian',
'Lilliana', 'Lillie', 'Lilly', 'Lily', 'Lilyan', 'Lina', 'Lincoln', 'Linda', 'Lindsay', 'Lindsey', 'Linnea',
'Linnie', 'Linwood', 'Lionel', 'Lisa', 'Lisandro', 'Lisette', 'Litzy', 'Liza', 'Lizeth', 'Lizzie', 'Llewellyn',
'Lloyd', 'Logan', 'Lois', 'Lola', 'Lolita', 'Loma', 'Lon', 'London', 'Lonie', 'Lonnie', 'Lonny', 'Lonzo',
'Lora', 'Loraine', 'Loren', 'Lorena', 'Lorenz', 'Lorenza', 'Lorenzo', 'Lori', 'Lorine', 'Lorna', 'Lottie',
'Lou', 'Louie', 'Louisa', 'Lourdes', 'Louvenia', 'Lowell', 'Loy', 'Loyal', 'Loyce', 'Lucas', 'Luciano', 'Lucie',
'Lucienne', 'Lucile', 'Lucinda', 'Lucio', 'Lucious', 'Lucius', 'Lucy', 'Ludie', 'Ludwig', 'Lue', 'Luella',
'Luigi', 'Luis', 'Luisa', 'Lukas', 'Lula', 'Lulu', 'Luna', 'Lupe', 'Lura', 'Lurline', 'Luther', 'Luz', 'Lyda',
'Lydia', 'Lyla', 'Lynn', 'Lyric', 'Lysanne',
'Mabel', 'Mabelle', 'Mable', 'Mac', 'Macey', 'Maci', 'Macie', 'Mack', 'Mackenzie', 'Macy', 'Madaline',
'Madalyn', 'Maddison', 'Madeline', 'Madelyn', 'Madelynn', 'Madge', 'Madie', 'Madilyn', 'Madisen', 'Madison',
'Madisyn', 'Madonna', 'Madyson', 'Mae', 'Maegan', 'Maeve', 'Mafalda', 'Magali', 'Magdalen', 'Magdalena',
'Maggie', 'Magnolia', 'Magnus', 'Maia', 'Maida', 'Maiya', 'Major', 'Makayla', 'Makenna', 'Makenzie', 'Malachi',
'Malcolm', 'Malika', 'Malinda', 'Mallie', 'Mallory', 'Malvina', 'Mandy', 'Manley', 'Manuel', 'Manuela', 'Mara',
'Marc', 'Marcel', 'Marcelina', 'Marcelino', 'Marcella', 'Marcelle', 'Marcellus', 'Marcelo', 'Marcia', 'Marco',
'Marcos', 'Marcus', 'Margaret', 'Margarete', 'Margarett', 'Margaretta', 'Margarette', 'Margarita', 'Marge',
'Margie', 'Margot', 'Margret', 'Marguerite', 'Maria', 'Mariah', 'Mariam', 'Marian', 'Mariana', 'Mariane',
'Marianna', 'Marianne', 'Mariano', 'Maribel', 'Marie', 'Mariela', 'Marielle', 'Marietta', 'Marilie', 'Marilou',
'Marilyne', 'Marina', 'Mario', 'Marion', 'Marisa', 'Marisol', 'Maritza', 'Marjolaine', 'Marjorie', 'Marjory',
'Mark', 'Markus', 'Marlee', 'Marlen', 'Marlene', 'Marley', 'Marlin', 'Marlon', 'Marques', 'Marquis', 'Marquise',
'Marshall', 'Marta', 'Martin', 'Martina', 'Martine', 'Marty', 'Marvin', 'Mary', 'Maryam', 'Maryjane', 'Maryse',
'Mason', 'Mateo', 'Mathew', 'Mathias', 'Mathilde', 'Matilda', 'Matilde', 'Matt', 'Matteo', 'Mattie', 'Maud',
'Maude', 'Maudie', 'Maureen', 'Maurice', 'Mauricio', 'Maurine', 'Maverick', 'Mavis', 'Max', 'Maxie', 'Maxime',
'Maximilian', 'Maximillia', 'Maximillian', 'Maximo', 'Maximus', 'Maxine', 'Maxwell', 'May', 'Maya', 'Maybell',
'Maybelle', 'Maye', 'Maymie', 'Maynard', 'Mayra', 'Mazie', 'Mckayla', 'Mckenna', 'Mckenzie', 'Meagan',
'Meaghan', 'Meda', 'Megane', 'Meggie', 'Meghan', 'Mekhi', 'Melany', 'Melba', 'Melisa', 'Melissa', 'Mellie',
'Melody', 'Melvin', 'Melvina', 'Melyna', 'Melyssa', 'Mercedes', 'Meredith', 'Merl', 'Merle', 'Merlin',
'Merritt', 'Mertie', 'Mervin', 'Meta', 'Mia', 'Micaela', 'Micah', 'Michael', 'Michaela', 'Michale', 'Micheal',
'Michel', 'Michele', 'Michelle', 'Miguel', 'Mikayla', 'Mike', 'Mikel', 'Milan', 'Miles', 'Milford', 'Miller',
'Millie', 'Milo', 'Milton', 'Mina', 'Minerva', 'Minnie', 'Miracle', 'Mireille', 'Mireya', 'Misael', 'Missouri',
'Misty', 'Mitchel', 'Mitchell', 'Mittie', 'Modesta', 'Modesto', 'Mohamed', 'Mohammad', 'Mohammed', 'Moises',
'Mollie', 'Molly', 'Mona', 'Monica', 'Monique', 'Monroe', 'Monserrat', 'Monserrate', 'Montana', 'Monte',
'Monty', 'Morgan', 'Moriah', 'Morris', 'Mortimer', 'Morton', 'Mose', 'Moses', 'Moshe', 'Mossie', 'Mozell',
'Mozelle', 'Muhammad', 'Muriel', 'Murl', 'Murphy', 'Murray', 'Mustafa', 'Mya', 'Myah', 'Mylene', 'Myles',
'Myra', 'Myriam', 'Myrl', 'Myrna', 'Myron', 'Myrtice', 'Myrtie', 'Myrtis', 'Myrtle',
'Nadia', 'Nakia', 'Name', 'Nannie', 'Naomi', 'Naomie', 'Napoleon', 'Narciso', 'Nash', 'Nasir', 'Nat', 'Natalia',
'Natalie', 'Natasha', 'Nathan', 'Nathanael', 'Nathanial', 'Nathaniel', 'Nathen', 'Nayeli', 'Neal', 'Ned',
'Nedra', 'Neha', 'Neil', 'Nelda', 'Nella', 'Nelle', 'Nellie', 'Nels', 'Nelson', 'Neoma', 'Nestor', 'Nettie',
'Neva', 'Newell', 'Newton', 'Nia', 'Nicholas', 'Nicholaus', 'Nichole', 'Nick', 'Nicklaus', 'Nickolas', 'Nico',
'Nicola', 'Nicolas', 'Nicole', 'Nicolette', 'Nigel', 'Nikita', 'Nikki', 'Nikko', 'Niko', 'Nikolas', 'Nils',
'Nina', 'Noah', 'Noble', 'Noe', 'Noel', 'Noelia', 'Noemi', 'Noemie', 'Noemy', 'Nola', 'Nolan', 'Nona', 'Nora',
'Norbert', 'Norberto', 'Norene', 'Norma', 'Norris', 'Norval', 'Norwood', 'Nova', 'Novella', 'Nya', 'Nyah',
'Nyasia',
'Obie', 'Oceane', 'Ocie', 'Octavia', 'Oda', 'Odell', 'Odessa', 'Odie', 'Ofelia', 'Okey', 'Ola', 'Olaf', 'Ole',
'Olen', 'Oleta', 'Olga', 'Olin', 'Oliver', 'Ollie', 'Oma', 'Omari', 'Omer', 'Ona', 'Onie', 'Opal', 'Ophelia',
'Ora', 'Oral', 'Oran', 'Oren', 'Orie', 'Orin', 'Orion', 'Orland', 'Orlando', 'Orlo', 'Orpha', 'Orrin', 'Orval',
'Orville', 'Osbaldo', 'Osborne', 'Oscar', 'Osvaldo', 'Oswald', 'Oswaldo', 'Otha', 'Otho', 'Otilia', 'Otis',
'Ottilie', 'Ottis', 'Otto', 'Ova', 'Owen', 'Ozella',
'Pablo', 'Paige', 'Palma', 'Pamela', 'Pansy', 'Paolo', 'Paris', 'Parker', 'Pascale', 'Pasquale', 'Pat',
'Patience', 'Patricia', 'Patrick', 'Patsy', 'Pattie', 'Paul', 'Paula', 'Pauline', 'Paxton', 'Payton', 'Pearl',
'Pearlie', 'Pearline', 'Pedro', 'Peggie', 'Penelope', 'Percival', 'Percy', 'Perry', 'Pete', 'Peter', 'Petra',
'Peyton', 'Philip', 'Phoebe', 'Phyllis', 'Pierce', 'Pierre', 'Pietro', 'Pink', 'Pinkie', 'Piper', 'Polly',
'Porter', 'Precious', 'Presley', 'Preston', 'Price', 'Prince', 'Princess', 'Priscilla', 'Providenci',
'Prudence',
'Queen', 'Queenie', 'Quentin', 'Quincy', 'Quinn', 'Quinten', 'Quinton',
'Rachael', 'Rachel', 'Rachelle', 'Rae', 'Raegan', 'Rafael', 'Rafaela', 'Raheem', 'Rahsaan', 'Rahul', 'Raina',
'Raleigh', 'Ralph', 'Ramiro', 'Ramon', 'Ramona', 'Randal', 'Randall', 'Randi', 'Randy', 'Ransom', 'Raoul',
'Raphael', 'Raphaelle', 'Raquel', 'Rashad', 'Rashawn', 'Rasheed', 'Raul', 'Raven', 'Ray', 'Raymond', 'Raymundo',
'Reagan', 'Reanna', 'Reba', 'Rebeca', 'Rebecca', 'Rebeka', 'Rebekah', 'Reece', 'Reed', 'Reese', 'Regan',
'Reggie', 'Reginald', 'Reid', 'Reilly', 'Reina', 'Reinhold', 'Remington', 'Rene', 'Renee', 'Ressie', 'Reta',
'Retha', 'Retta', 'Reuben', 'Reva', 'Rex', 'Rey', 'Reyes', 'Reymundo', 'Reyna', 'Reynold', 'Rhea', 'Rhett',
'Rhianna', 'Rhiannon', 'Rhoda', 'Ricardo', 'Richard', 'Richie', 'Richmond', 'Rick', 'Rickey', 'Rickie', 'Ricky',
'Rico', 'Rigoberto', 'Riley', 'Rita', 'River', 'Robb', 'Robbie', 'Robert', 'Roberta', 'Roberto', 'Robin',
'Robyn', 'Rocio', 'Rocky', 'Rod', 'Roderick', 'Rodger', 'Rodolfo', 'Rodrick', 'Rodrigo', 'Roel', 'Rogelio',
'Roger', 'Rogers', 'Rolando', 'Rollin', 'Roma', 'Romaine', 'Roman', 'Ron', 'Ronaldo', 'Ronny', 'Roosevelt',
'Rory', 'Rosa', 'Rosalee', 'Rosalia', 'Rosalind', 'Rosalinda', 'Rosalyn', 'Rosamond', 'Rosanna', 'Rosario',
'Roscoe', 'Rose', 'Rosella', 'Roselyn', 'Rosemarie', 'Rosemary', 'Rosendo', 'Rosetta', 'Rosie', 'Rosina',
'Roslyn', 'Ross', 'Rossie', 'Rowan', 'Rowena', 'Rowland', 'Roxane', 'Roxanne', 'Roy', 'Royal', 'Royce',
'Rozella', 'Ruben', 'Rubie', 'Ruby', 'Rubye', 'Rudolph', 'Rudy', 'Rupert', 'Russ', 'Russel', 'Russell', 'Rusty',
'Ruth', 'Ruthe', 'Ruthie', 'Ryan', 'Ryann', 'Ryder', 'Rylan', 'Rylee', 'Ryleigh', 'Ryley',
'Sabina', 'Sabrina', 'Sabryna', 'Sadie', 'Sadye', 'Sage', 'Saige', 'Sallie', 'Sally', 'Salma', 'Salvador',
'Salvatore', 'Sam', 'Samanta', 'Samantha', 'Samara', 'Samir', 'Sammie', 'Sammy', 'Samson', 'Sandra', 'Sandrine',
'Sandy', 'Sanford', 'Santa', 'Santiago', 'Santina', 'Santino', 'Santos', 'Sarah', 'Sarai', 'Sarina', 'Sasha',
'Saul', 'Savanah', 'Savanna', 'Savannah', 'Savion', 'Scarlett', 'Schuyler', 'Scot', 'Scottie', 'Scotty',
'Seamus', 'Sean', 'Sebastian', 'Sedrick', 'Selena', 'Selina', 'Selmer', 'Serena', 'Serenity', 'Seth', 'Shad',
'Shaina', 'Shakira', 'Shana', 'Shane', 'Shanel', 'Shanelle', 'Shania', 'Shanie', 'Shaniya', 'Shanna', 'Shannon',
'Shanny', 'Shanon', 'Shany', 'Sharon', 'Shaun', 'Shawn', 'Shawna', 'Shaylee', 'Shayna', 'Shayne', 'Shea',
'Sheila', 'Sheldon', 'Shemar', 'Sheridan', 'Sherman', 'Sherwood', 'Shirley', 'Shyann', 'Shyanne', 'Sibyl',
'Sid', 'Sidney', 'Sienna', 'Sierra', 'Sigmund', 'Sigrid', 'Sigurd', 'Silas', 'Sim', 'Simeon', 'Simone',
'Sincere', 'Sister', 'Skye', 'Skyla', 'Skylar', 'Sofia', 'Soledad', 'Solon', 'Sonia', 'Sonny', 'Sonya',
'Sophia', 'Sophie', 'Spencer', 'Stacey', 'Stacy', 'Stan', 'Stanford', 'Stanley', 'Stanton', 'Stefan',
'Stefanie', 'Stella', 'Stephan', 'Stephania', 'Stephanie', 'Stephany', 'Stephen', 'Stephon', 'Sterling',
'Steve', 'Stevie', 'Stewart', 'Stone', 'Stuart', 'Summer', 'Sunny', 'Susan', 'Susana', 'Susanna', 'Susie',
'Suzanne', 'Sven', 'Syble', 'Sydnee', 'Sydney', 'Sydni', 'Sydnie', 'Sylvan', 'Sylvester', 'Sylvia',
'Tabitha', 'Tad', 'Talia', 'Talon', 'Tamara', 'Tamia', 'Tania', 'Tanner', 'Tanya', 'Tara', 'Taryn', 'Tate',
'Tatum', 'Tatyana', 'Taurean', 'Tavares', 'Taya', 'Taylor', 'Teagan', 'Ted', 'Telly', 'Terence', 'Teresa',
'Terrance', 'Terrell', 'Terrence', 'Terrill', 'Terry', 'Tess', 'Tessie', 'Tevin', 'Thad', 'Thaddeus', 'Thalia',
'Thea', 'Thelma', 'Theo', 'Theodora', 'Theodore', 'Theresa', 'Therese', 'Theresia', 'Theron', 'Thomas', 'Thora',
'Thurman', 'Tia', 'Tiana', 'Tianna', 'Tiara', 'Tierra', 'Tiffany', 'Tillman', 'Timmothy', 'Timmy', 'Timothy',
'Tina', 'Tito', 'Titus', 'Tobin', 'Toby', 'Tod', 'Tom', 'Tomas', 'Tomasa', 'Tommie', 'Toney', 'Toni', 'Tony',
'Torey', 'Torrance', 'Torrey', 'Toy', 'Trace', 'Tracey', 'Tracy', 'Travis', 'Travon', 'Tre', 'Tremaine',
'Tremayne', 'Trent', 'Trenton', 'Tressa', 'Tressie', 'Treva', 'Trever', 'Trevion', 'Trevor', 'Trey', 'Trinity',
'Trisha', 'Tristian', 'Tristin', 'Triston', 'Troy', 'Trudie', 'Trycia', 'Trystan', 'Turner', 'Twila', 'Tyler',
'Tyra', 'Tyree', 'Tyreek', 'Tyrel', 'Tyrell', 'Tyrese', 'Tyrique', 'Tyshawn', 'Tyson', 'Ubaldo',
'Ulices', 'Ulises', 'Una', 'Unique', 'Urban', 'Uriah', 'Uriel', 'Ursula',
'Vada', 'Valentin', 'Valentina', 'Valentine', 'Valerie', 'Vallie', 'Van', 'Vance', 'Vanessa', 'Vaughn', 'Veda',
'Velda', 'Vella', 'Velma', 'Velva', 'Vena', 'Verda', 'Verdie', 'Vergie', 'Verla', 'Verlie', 'Vern', 'Verna',
'Verner', 'Vernice', 'Vernie', 'Vernon', 'Verona', 'Veronica', 'Vesta', 'Vicenta', 'Vicente', 'Vickie', 'Vicky',
'Victor', 'Victoria', 'Vida', 'Vidal', 'Vilma', 'Vince', 'Vincent', 'Vincenza', 'Vincenzo', 'Vinnie', 'Viola',
'Violet', 'Violette', 'Virgie', 'Virgil', 'Virginia', 'Virginie', 'Vita', 'Vito', 'Viva', 'Vivian', 'Viviane',
'Vivianne', 'Vivien', 'Vivienne', 'Vladimir',
'Wade', 'Waino', 'Waldo', 'Walker', 'Wallace', 'Walter', 'Walton', 'Wanda', 'Ward', 'Warren', 'Watson', 'Wava',
'Waylon', 'Wayne', 'Webster', 'Weldon', 'Wellington', 'Wendell', 'Wendy', 'Werner', 'Westley', 'Weston',
'Whitney', 'Wilber', 'Wilbert', 'Wilburn', 'Wiley', 'Wilford', 'Wilfred', 'Wilfredo', 'Wilfrid', 'Wilhelm',
'Wilhelmine', 'Will', 'Willa', 'Willard', 'William', 'Willie', 'Willis', 'Willow', 'Willy', 'Wilma', 'Wilmer',
'Wilson', 'Wilton', 'Winfield', 'Winifred', 'Winnifred', 'Winona', 'Winston', 'Woodrow', 'Wyatt', 'Wyman',
'Xander',
'Xavier', 'Xzavier',
'Yadira', 'Yasmeen', 'Yasmin', 'Yasmine', 'Yazmin', 'Yesenia', 'Yessenia', 'Yolanda', 'Yoshiko', 'Yvette',
'Yvonne',
'Zachariah', 'Zachary', 'Zachery', 'Zack', 'Zackary', 'Zackery', 'Zakary', 'Zander', 'Zane', 'Zaria',
'Zechariah', 'Zelda', 'Zella', 'Zelma', 'Zena', 'Zetta', 'Zion', 'Zita', 'Zoe', 'Zoey', 'Zoie', 'Zoila', 'Zola',
'Zora', 'Zula'
)
last_names = (
'Abbott', 'Abernathy', 'Abshire', 'Adams', 'Altenwerth', 'Anderson', 'Ankunding', 'Armstrong', 'Auer',
'Aufderhar',
'Bahringer', 'Bailey', 'Balistreri', 'Barrows', 'Bartell', 'Bartoletti', 'Barton', 'Bashirian', 'Batz', 'Bauch',
'Baumbach', 'Bayer', 'Beahan', 'Beatty', 'Bechtelar', 'Becker', 'Bednar', 'Beer', 'Beier', 'Berge', 'Bergnaum',
'Bergstrom', 'Bernhard', 'Bernier', 'Bins', 'Blanda', 'Blick', 'Block', 'Bode', 'Boehm', 'Bogan', 'Bogisich',
'Borer', 'Bosco', 'Botsford', 'Boyer', 'Boyle', 'Bradtke', 'Brakus', 'Braun', 'Breitenberg', 'Brekke', 'Brown',
'Bruen', 'Buckridge',
'Carroll', 'Carter', 'Cartwright', 'Casper', 'Cassin', 'Champlin', 'Christiansen', 'Cole', 'Collier', 'Collins',
'Conn', 'Connelly', 'Conroy', 'Considine', 'Corkery', 'Cormier', 'Corwin', 'Cremin', 'Crist', 'Crona', 'Cronin',
'Crooks', 'Cruickshank', 'Cummerata', 'Cummings',
'Dach', 'D\'Amore', 'Daniel', 'Dare', 'Daugherty', 'Davis', 'Deckow', 'Denesik', 'Dibbert', 'Dickens', 'Dicki',
'Dickinson', 'Dietrich', 'Donnelly', 'Dooley', 'Douglas', 'Doyle', 'DuBuque', 'Durgan',
'Ebert', 'Effertz', 'Eichmann', 'Emard', 'Emmerich', 'Erdman', 'Ernser', 'Fadel',
'Fahey', 'Farrell', 'Fay', 'Feeney', 'Feest', 'Feil', 'Ferry', 'Fisher', 'Flatley', 'Frami', 'Franecki',
'Friesen', 'Fritsch', 'Funk',
'Gaylord', 'Gerhold', 'Gerlach', 'Gibson', 'Gislason', 'Gleason', 'Gleichner', 'Glover', 'Goldner', 'Goodwin',
'Gorczany', 'Gottlieb', 'Goyette', 'Grady', 'Graham', 'Grant', 'Green', 'Greenfelder', 'Greenholt', 'Grimes',
'Gulgowski', 'Gusikowski', 'Gutkowski', 'Gutmann',
'Haag', 'Hackett', 'Hagenes', 'Hahn', 'Haley', 'Halvorson', 'Hamill', 'Hammes', 'Hand', 'Hane', 'Hansen',
'Harber', 'Harris', 'Hartmann', 'Harvey', 'Hauck', 'Hayes', 'Heaney', 'Heathcote', 'Hegmann', 'Heidenreich',
'Heller', 'Herman', 'Hermann', 'Hermiston', 'Herzog', 'Hessel', 'Hettinger', 'Hickle', 'Hilll', 'Hills',
'Hilpert', 'Hintz', 'Hirthe', 'Hodkiewicz', 'Hoeger', 'Homenick', 'Hoppe', 'Howe', 'Howell', 'Hudson', 'Huel',
'Huels', 'Hyatt',
'Jacobi', 'Jacobs', 'Jacobson', 'Jakubowski', 'Jaskolski', 'Jast', 'Jenkins', 'Jerde', 'Jewess', 'Johns',
'Johnson', 'Johnston', 'Jones',
'Kassulke', 'Kautzer', 'Keebler', 'Keeling', 'Kemmer', 'Kerluke', 'Kertzmann', 'Kessler', 'Kiehn', 'Kihn',
'Kilback', 'King', 'Kirlin', 'Klein', 'Kling', 'Klocko', 'Koch', 'Koelpin', 'Koepp', 'Kohler', 'Konopelski',
'Koss', 'Kovacek', 'Kozey', 'Krajcik', 'Kreiger', 'Kris', 'Kshlerin', 'Kub', 'Kuhic', 'Kuhlman', 'Kuhn',
'Kulas', 'Kunde', 'Kunze', 'Kuphal', 'Kutch', 'Kuvalis',
'Labadie', 'Lakin', 'Lang', 'Langosh', 'Langworth', 'Larkin', 'Larson', 'Leannon', 'Lebsack', 'Ledner',
'Leffler', 'Legros', 'Lehner', 'Lemke', 'Lesch', 'Leuschke', 'Lind', 'Lindgren', 'Littel', 'Little', 'Lockman',
'Lowe', 'Lubowitz', 'Lueilwitz', 'Luettgen', 'Lynch',
'Macejkovic', 'Maggio', 'Mann', 'Mante', 'Marks', 'Marquardt', 'Marvin', 'Mayer', 'Mayert', 'McClure',
'McCullough', 'McDermott', 'McGlynn', 'McKenzie', 'McLaughlin', 'Medhurst', 'Mertz', 'Metz', 'Miller', 'Mills',
'Mitchell', 'Moen', 'Mohr', 'Monahan', 'Moore', 'Morar', 'Morissette', 'Mosciski', 'Mraz', 'Mueller', 'Muller',
'Murazik', 'Murphy', 'Murray',
'Nader', 'Nicolas', 'Nienow', 'Nikolaus', 'Nitzsche', 'Nolan',
'Oberbrunner', 'O\'Connell', 'O\'Conner', 'O\'Hara', 'O\'Keefe', 'O\'Kon', 'Okuneva', 'Olson', 'Ondricka',
'O\'Reilly', 'Orn', 'Ortiz', 'Osinski',
'Pacocha', 'Padberg', 'Pagac', 'Parisian', 'Parker', 'Paucek', 'Pfannerstill', 'Pfeffer', 'Pollich', 'Pouros',
'Powlowski', 'Predovic', 'Price', 'Prohaska', 'Prosacco', 'Purdy',
'Quigley', 'Quitzon',
'Rath', 'Ratke', 'Rau', 'Raynor', 'Reichel', 'Reichert', 'Reilly', 'Reinger', 'Rempel', 'Renner', 'Reynolds',
'Rice', 'Rippin', 'Ritchie', 'Robel', 'Roberts', 'Rodriguez', 'Rogahn', 'Rohan', 'Rolfson', 'Romaguera', 'Roob',
'Rosenbaum', 'Rowe', 'Ruecker', 'Runolfsdottir', 'Runolfsson', 'Runte', 'Russel', 'Rutherford', 'Ryan',
'Sanford', 'Satterfield', 'Sauer', 'Sawayn',
'Schaden', 'Schaefer', 'Schamberger', 'Schiller', 'Schimmel', 'Schinner', 'Schmeler', 'Schmidt', 'Schmitt',
'Schneider', 'Schoen', 'Schowalter', 'Schroeder', 'Schulist', 'Schultz', 'Schumm', 'Schuppe', 'Schuster',
'Senger', 'Shanahan', 'Shields', 'Simonis', 'Sipes', 'Skiles', 'Smith', 'Smitham', 'Spencer', 'Spinka',
'Sporer', 'Stamm', 'Stanton', 'Stark', 'Stehr', 'Steuber', 'Stiedemann', 'Stokes', 'Stoltenberg', 'Stracke',
'Streich', 'Stroman', 'Strosin', 'Swaniawski', 'Swift',
'Terry', 'Thiel', 'Thompson', 'Tillman', 'Torp', 'Torphy', 'Towne', 'Toy', 'Trantow', 'Tremblay', 'Treutel',
'Tromp', 'Turcotte', 'Turner',
'Ullrich', 'Upton',
'Vandervort', 'Veum', 'Volkman', 'Von', 'VonRueden',
'Waelchi', 'Walker', 'Walsh', 'Walter', 'Ward', 'Waters', 'Watsica', 'Weber', 'Wehner', 'Weimann', 'Weissnat',
'Welch', 'West', 'White', 'Wiegand', 'Wilderman', 'Wilkinson', 'Will', 'Williamson', 'Willms', 'Windler',
'Wintheiser', 'Wisoky', 'Wisozk', 'Witting', 'Wiza', 'Wolf', 'Wolff', 'Wuckert', 'Wunsch', 'Wyman',
'Yost', 'Yundt',
'Zboncak', 'Zemlak', 'Ziemann', 'Zieme', 'Zulauf'
)
prefixes = ('Mr.', 'Mrs.', 'Ms.', 'Miss', 'Dr.')
suffixes = ('Jr.', 'Sr.', 'I', 'II', 'III', 'IV', 'V', 'MD', 'DDS', 'PhD', 'DVM')
@classmethod
def prefix(cls):
return cls.random_element(cls.prefixes)
@classmethod
def suffix(cls):
return cls.random_element(cls.suffixes)
|
ShaguptaS/faker
|
faker/providers/en_US/person.py
|
Python
|
mit
| 38,668
|
[
"Amber",
"Brian",
"COLUMBUS",
"CRYSTAL",
"Dalton",
"Desmond"
] |
9a6e95450c3ad359267f2e8ee83fe824c2ebfe4c5bed4eacca1ab642b176aa4a
|
"""
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
|
vermouthmjl/scikit-learn
|
examples/mixture/plot_gmm.py
|
Python
|
bsd-3-clause
| 2,875
|
[
"Gaussian"
] |
198df19b49d8383b2a4da91ee28d3854bbf55231b0e518b0956e77543a1ed34c
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
gamma = 4.4983169634398596e-06
def derivs(solarray, t, M, S):
"""Computes the derivatives of the equations dictating the behavior of the stars orbiting galaxy M and the
disrupting galaxy, S
Parameters
--------------
solarray : solution array for the differential equations
t : array of time values
M : central mass of main galaxy
S : central mass of disrupting galaxy
Returns
--------------
derivarray : an array of the velocities and accelerations of galaxy S and stars, m
"""
derivarray = np.zeros(len(solarray))
R_x = solarray[0]
R_y = solarray[1]
R = np.sqrt(solarray[0]**2+solarray[1]**2)
vR_x = solarray[2]
vR_y = solarray[3]
dR_x = vR_x
dR_y = vR_y
dvR_x = ((-gamma*(M+S)*R_x)/R**3)
dvR_y = ((-gamma*(M+S)*R_y)/R**3)
derivarray[0] = dR_x
derivarray[1] = dR_y
derivarray[2] = dvR_x
derivarray[3] = dvR_y
for n in range(1,int(len(solarray)/4)):
r_x = solarray[4*n]
r_y = solarray[4*n+1]
r = np.sqrt(r_x**2+r_y**2)
vr_x = solarray[4*n+2]
vr_y = solarray[4*n+3]
p_x = R_x - r_x
p_y = R_y - r_y
p = np.sqrt(p_x**2+p_y**2)
dr_x = vr_x
dr_y = vr_y
dvr_x = -gamma*((M/r**3)*r_x-(S/p**3)*p_x+(S/R**3)*R_x)
dvr_y = -gamma*((M/r**3)*r_y-(S/p**3)*p_y+(S/R**3)*R_y)
derivarray[4*n] = dr_x
derivarray[4*n+1] = dr_y
derivarray[4*n+2] = dvr_x
derivarray[4*n+3] = dvr_y
return derivarray
def equationsolver(ic,max_time,time_step,M,S):
"""Solves the differential equations using odeint and the derivs function defined above
Parameters
-------------
ic : initial conditions
max_time : maximum time to be used for time array
time_step : step size of time array
M : central mass of main galaxy
S : central mass of disrupting galaxy
Returns
------------
sol : solution array for the differential equations
"""
t = np.linspace(0,max_time,time_step)
sol = odeint(derivs, ic, t, args=(M,S),atol=1e-3,rtol=1e-3)
return sol
|
bjshaw/phys202-project
|
galaxy_project/DE_solver.py
|
Python
|
mit
| 2,258
|
[
"Galaxy"
] |
7ab5b67dd4a4074dbd8cbd3507935a1312deca206ec3ba2f870cd9a4a4c438fd
|
#
# Copyright (C) 2000 greg Landrum
#
""" unit tests for the ID3 implementation """
from rdkit import RDConfig
import unittest
from rdkit.ML.DecTree import ID3,DecTree
import cPickle
from rdkit.ML.Data import MLData
class ID3TestCase(unittest.TestCase):
def setUp(self):
print '\n%s: '%self.shortDescription(),
self.basicTreeName=RDConfig.RDCodeDir+'/ML/DecTree/test_data/BasicTree.pkl'
self.multiTreeName=RDConfig.RDCodeDir+'/ML/DecTree/test_data/MultiTree.pkl'
def _setupBasicTree(self):
examples = [[0,0,0,0,0],
[0,0,0,1,0],
[1,0,0,0,1],
[2,1,0,0,1],
[2,2,1,0,1],
[2,2,1,1,0],
[1,2,1,1,1],
[0,1,0,0,0],
[0,2,1,0,1],
[2,1,1,0,1],
[0,1,1,1,1],
[1,1,0,1,1],
[1,0,1,0,1],
[2,1,0,1,0]
]
data = MLData.MLQuantDataSet(examples)
attrs = range(0,data.GetNVars())
t1 = ID3.ID3Boot(data.GetAllData(),attrs,data.GetNPossibleVals())
self.t1 = t1
self.examples = examples
def testBasicTree(self):
" testing basic tree growth "
self._setupBasicTree()
inFile = open(self.basicTreeName,'r')
t2 = cPickle.load(inFile)
assert self.t1 == t2, 'Incorrect tree generated.'
def _setupMultiTree(self):
examples = [[0,1,0,0],
[0,0,0,1],
[0,0,1,2],
[0,1,1,2],
[1,0,0,2],
[1,0,1,2],
[1,1,0,2],
[1,1,1,0]
]
data = MLData.MLQuantDataSet(examples)
attrs = range(0,data.GetNVars())
t1 = ID3.ID3Boot(data.GetAllData(),attrs,data.GetNPossibleVals())
self.t1 = t1
self.examples = examples
def testMultiTree(self):
" testing multivalued tree growth "
self._setupMultiTree()
inFile = open(self.multiTreeName,'r')
t2 = cPickle.load(inFile)
assert self.t1 == t2, 'Incorrect tree generated.'
def testClassify(self):
" testing basic tree classification "
self._setupBasicTree()
assert self.t1.ClassifyExample(self.examples[0])==self.examples[0][-1],\
'BasicExample 0 misclassified'
assert self.t1.ClassifyExample(self.examples[1])==self.examples[1][-1],\
'BasicExample 1 misclassified'
assert self.t1.ClassifyExample(self.examples[6])==self.examples[6][-1],\
'BasicExample 6 misclassified'
self._setupMultiTree()
assert self.t1.ClassifyExample(self.examples[0])==self.examples[0][-1],\
'MultiExample 0 misclassified'
assert self.t1.ClassifyExample(self.examples[1])==self.examples[1][-1],\
'MultiExample 1 misclassified'
assert self.t1.ClassifyExample(self.examples[6])==self.examples[6][-1],\
'MultiExample 6 misclassified'
# ------------- force python in the ID3 code
def _setupPyBasicTree(self):
from rdkit.ML.InfoTheory import entropy
ID3.entropy.InfoEntropy = entropy.PyInfoEntropy
ID3.entropy.InfoGain = entropy.PyInfoGain
examples = [[0,0,0,0,0],
[0,0,0,1,0],
[1,0,0,0,1],
[2,1,0,0,1],
[2,2,1,0,1],
[2,2,1,1,0],
[1,2,1,1,1],
[0,1,0,0,0],
[0,2,1,0,1],
[2,1,1,0,1],
[0,1,1,1,1],
[1,1,0,1,1],
[1,0,1,0,1],
[2,1,0,1,0]
]
data = MLData.MLQuantDataSet(examples)
attrs = range(0,data.GetNVars())
t1 = ID3.ID3Boot(data.GetAllData(),attrs,data.GetNPossibleVals())
self.t1 = t1
self.examples = examples
def testPyBasicTree(self):
" testing basic tree growth (python entropy code) "
self._setupPyBasicTree()
inFile = open(self.basicTreeName,'r')
t2 = cPickle.load(inFile)
assert self.t1 == t2, 'Incorrect tree generated.'
def _setupPyMultiTree(self):
from rdkit.ML.InfoTheory import entropy
ID3.entropy.InfoEntropy = entropy.PyInfoEntropy
ID3.entropy.InfoGain = entropy.PyInfoGain
examples = [[0,1,0,0],
[0,0,0,1],
[0,0,1,2],
[0,1,1,2],
[1,0,0,2],
[1,0,1,2],
[1,1,0,2],
[1,1,1,0]
]
data = MLData.MLQuantDataSet(examples)
attrs = range(0,data.GetNVars())
t1 = ID3.ID3Boot(data.GetAllData(),attrs,data.GetNPossibleVals())
self.t1 = t1
self.examples = examples
def testPyMultiTree(self):
" testing multivalued tree growth (python entropy code) "
self._setupPyMultiTree()
inFile = open(self.multiTreeName,'r')
t2 = cPickle.load(inFile)
assert self.t1 == t2, 'Incorrect tree generated.'
def testPyClassify(self):
" testing tree classification (python entropy code) "
self._setupPyBasicTree()
assert self.t1.ClassifyExample(self.examples[0])==self.examples[0][-1],\
'BasicExample 0 misclassified'
assert self.t1.ClassifyExample(self.examples[1])==self.examples[1][-1],\
'BasicExample 1 misclassified'
assert self.t1.ClassifyExample(self.examples[6])==self.examples[6][-1],\
'BasicExample 6 misclassified'
self._setupMultiTree()
assert self.t1.ClassifyExample(self.examples[0])==self.examples[0][-1],\
'MultiExample 0 misclassified'
assert self.t1.ClassifyExample(self.examples[1])==self.examples[1][-1],\
'MultiExample 1 misclassified'
assert self.t1.ClassifyExample(self.examples[6])==self.examples[6][-1],\
'MultiExample 6 misclassified'
if __name__ == '__main__':
unittest.main()
|
rdkit/rdkit-orig
|
rdkit/ML/DecTree/UnitTestID3.py
|
Python
|
bsd-3-clause
| 5,726
|
[
"RDKit"
] |
7f506a07cd8487db6965888cc9454956f3447e87a1e65a2ff635202bf7d7b1f0
|
"""Support for Z-Wave lights."""
from __future__ import annotations
import logging
from typing import Any
from zwave_js_server.client import Client as ZwaveClient
from zwave_js_server.const import ColorComponent, CommandClass
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_RGBW_COLOR,
ATTR_TRANSITION,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_RGBW,
DOMAIN as LIGHT_DOMAIN,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
import homeassistant.util.color as color_util
from .const import DATA_CLIENT, DOMAIN
from .discovery import ZwaveDiscoveryInfo
from .entity import ZWaveBaseEntity
LOGGER = logging.getLogger(__name__)
MULTI_COLOR_MAP = {
ColorComponent.WARM_WHITE: "warmWhite",
ColorComponent.COLD_WHITE: "coldWhite",
ColorComponent.RED: "red",
ColorComponent.GREEN: "green",
ColorComponent.BLUE: "blue",
ColorComponent.AMBER: "amber",
ColorComponent.CYAN: "cyan",
ColorComponent.PURPLE: "purple",
}
TRANSITION_DURATION = "transitionDuration"
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Z-Wave Light from Config Entry."""
client: ZwaveClient = hass.data[DOMAIN][config_entry.entry_id][DATA_CLIENT]
@callback
def async_add_light(info: ZwaveDiscoveryInfo) -> None:
"""Add Z-Wave Light."""
light = ZwaveLight(config_entry, client, info)
async_add_entities([light])
config_entry.async_on_unload(
async_dispatcher_connect(
hass,
f"{DOMAIN}_{config_entry.entry_id}_add_{LIGHT_DOMAIN}",
async_add_light,
)
)
def byte_to_zwave_brightness(value: int) -> int:
"""Convert brightness in 0-255 scale to 0-99 scale.
`value` -- (int) Brightness byte value from 0-255.
"""
if value > 0:
return max(1, round((value / 255) * 99))
return 0
class ZwaveLight(ZWaveBaseEntity, LightEntity):
"""Representation of a Z-Wave light."""
def __init__(
self, config_entry: ConfigEntry, client: ZwaveClient, info: ZwaveDiscoveryInfo
) -> None:
"""Initialize the light."""
super().__init__(config_entry, client, info)
self._supports_color = False
self._supports_rgbw = False
self._supports_color_temp = False
self._hs_color: tuple[float, float] | None = None
self._rgbw_color: tuple[int, int, int, int] | None = None
self._color_mode: str | None = None
self._color_temp: int | None = None
self._min_mireds = 153 # 6500K as a safe default
self._max_mireds = 370 # 2700K as a safe default
self._warm_white = self.get_zwave_value(
"targetColor",
CommandClass.SWITCH_COLOR,
value_property_key=ColorComponent.WARM_WHITE,
)
self._cold_white = self.get_zwave_value(
"targetColor",
CommandClass.SWITCH_COLOR,
value_property_key=ColorComponent.COLD_WHITE,
)
self._supported_color_modes = set()
# get additional (optional) values and set features
self._target_brightness = self.get_zwave_value(
"targetValue", add_to_watched_value_ids=False
)
self._target_color = self.get_zwave_value(
"targetColor", CommandClass.SWITCH_COLOR, add_to_watched_value_ids=False
)
self._calculate_color_values()
if self._supports_rgbw:
self._supported_color_modes.add(COLOR_MODE_RGBW)
elif self._supports_color:
self._supported_color_modes.add(COLOR_MODE_HS)
if self._supports_color_temp:
self._supported_color_modes.add(COLOR_MODE_COLOR_TEMP)
if not self._supported_color_modes:
self._supported_color_modes.add(COLOR_MODE_BRIGHTNESS)
# Entity class attributes
self._attr_supported_features = 0
self.supports_brightness_transition = bool(
self._target_brightness is not None
and TRANSITION_DURATION
in self._target_brightness.metadata.value_change_options
)
self.supports_color_transition = bool(
self._target_color is not None
and TRANSITION_DURATION in self._target_color.metadata.value_change_options
)
if self.supports_brightness_transition or self.supports_color_transition:
self._attr_supported_features |= SUPPORT_TRANSITION
@callback
def on_value_update(self) -> None:
"""Call when a watched value is added or updated."""
self._calculate_color_values()
@property
def brightness(self) -> int:
"""Return the brightness of this light between 0..255.
Z-Wave multilevel switches use a range of [0, 99] to control brightness.
"""
if self.info.primary_value.value is not None:
return round((self.info.primary_value.value / 99) * 255)
return 0
@property
def color_mode(self) -> str | None:
"""Return the color mode of the light."""
return self._color_mode
@property
def is_on(self) -> bool:
"""Return true if device is on (brightness above 0)."""
return self.brightness > 0
@property
def hs_color(self) -> tuple[float, float] | None:
"""Return the hs color."""
return self._hs_color
@property
def rgbw_color(self) -> tuple[int, int, int, int] | None:
"""Return the hs color."""
return self._rgbw_color
@property
def color_temp(self) -> int | None:
"""Return the color temperature."""
return self._color_temp
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this light supports."""
return self._min_mireds
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this light supports."""
return self._max_mireds
@property
def supported_color_modes(self) -> set | None:
"""Flag supported features."""
return self._supported_color_modes
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the device on."""
transition = kwargs.get(ATTR_TRANSITION)
# RGB/HS color
hs_color = kwargs.get(ATTR_HS_COLOR)
if hs_color is not None and self._supports_color:
red, green, blue = color_util.color_hs_to_RGB(*hs_color)
colors = {
ColorComponent.RED: red,
ColorComponent.GREEN: green,
ColorComponent.BLUE: blue,
}
if self._supports_color_temp:
# turn of white leds when setting rgb
colors[ColorComponent.WARM_WHITE] = 0
colors[ColorComponent.COLD_WHITE] = 0
await self._async_set_colors(colors, transition)
# Color temperature
color_temp = kwargs.get(ATTR_COLOR_TEMP)
if color_temp is not None and self._supports_color_temp:
# Limit color temp to min/max values
cold = max(
0,
min(
255,
round(
(self._max_mireds - color_temp)
/ (self._max_mireds - self._min_mireds)
* 255
),
),
)
warm = 255 - cold
await self._async_set_colors(
{
# turn off color leds when setting color temperature
ColorComponent.RED: 0,
ColorComponent.GREEN: 0,
ColorComponent.BLUE: 0,
ColorComponent.WARM_WHITE: warm,
ColorComponent.COLD_WHITE: cold,
},
transition,
)
# RGBW
rgbw = kwargs.get(ATTR_RGBW_COLOR)
if rgbw is not None and self._supports_rgbw:
rgbw_channels = {
ColorComponent.RED: rgbw[0],
ColorComponent.GREEN: rgbw[1],
ColorComponent.BLUE: rgbw[2],
}
if self._warm_white:
rgbw_channels[ColorComponent.WARM_WHITE] = rgbw[3]
if self._cold_white:
rgbw_channels[ColorComponent.COLD_WHITE] = rgbw[3]
await self._async_set_colors(rgbw_channels, transition)
# set brightness
await self._async_set_brightness(kwargs.get(ATTR_BRIGHTNESS), transition)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the light off."""
await self._async_set_brightness(0, kwargs.get(ATTR_TRANSITION))
async def _async_set_colors(
self, colors: dict[ColorComponent, int], transition: float | None = None
) -> None:
"""Set (multiple) defined colors to given value(s)."""
# prefer the (new) combined color property
# https://github.com/zwave-js/node-zwave-js/pull/1782
combined_color_val = self.get_zwave_value(
"targetColor",
CommandClass.SWITCH_COLOR,
value_property_key=None,
)
zwave_transition = None
if self.supports_color_transition:
if transition is not None:
zwave_transition = {TRANSITION_DURATION: f"{int(transition)}s"}
else:
zwave_transition = {TRANSITION_DURATION: "default"}
colors_dict = {}
for color, value in colors.items():
color_name = MULTI_COLOR_MAP[color]
colors_dict[color_name] = value
# set updated color object
await self.info.node.async_set_value(
combined_color_val, colors_dict, zwave_transition
)
async def _async_set_brightness(
self, brightness: int | None, transition: float | None = None
) -> None:
"""Set new brightness to light."""
if brightness is None:
# Level 255 means to set it to previous value.
zwave_brightness = 255
else:
# Zwave multilevel switches use a range of [0, 99] to control brightness.
zwave_brightness = byte_to_zwave_brightness(brightness)
# set transition value before sending new brightness
zwave_transition = None
if self.supports_brightness_transition:
if transition is not None:
zwave_transition = {TRANSITION_DURATION: f"{int(transition)}s"}
else:
zwave_transition = {TRANSITION_DURATION: "default"}
# setting a value requires setting targetValue
await self.info.node.async_set_value(
self._target_brightness, zwave_brightness, zwave_transition
)
@callback
def _calculate_color_values(self) -> None:
"""Calculate light colors."""
# NOTE: We lookup all values here (instead of relying on the multicolor one)
# to find out what colors are supported
# as this is a simple lookup by key, this not heavy
red_val = self.get_zwave_value(
"currentColor",
CommandClass.SWITCH_COLOR,
value_property_key=ColorComponent.RED.value,
)
green_val = self.get_zwave_value(
"currentColor",
CommandClass.SWITCH_COLOR,
value_property_key=ColorComponent.GREEN.value,
)
blue_val = self.get_zwave_value(
"currentColor",
CommandClass.SWITCH_COLOR,
value_property_key=ColorComponent.BLUE.value,
)
ww_val = self.get_zwave_value(
"currentColor",
CommandClass.SWITCH_COLOR,
value_property_key=ColorComponent.WARM_WHITE.value,
)
cw_val = self.get_zwave_value(
"currentColor",
CommandClass.SWITCH_COLOR,
value_property_key=ColorComponent.COLD_WHITE.value,
)
# prefer the (new) combined color property
# https://github.com/zwave-js/node-zwave-js/pull/1782
combined_color_val = self.get_zwave_value(
"currentColor",
CommandClass.SWITCH_COLOR,
value_property_key=None,
)
if combined_color_val and isinstance(combined_color_val.value, dict):
multi_color = combined_color_val.value
else:
multi_color = {}
# Default: Brightness (no color)
self._color_mode = COLOR_MODE_BRIGHTNESS
# RGB support
if red_val and green_val and blue_val:
# prefer values from the multicolor property
red = multi_color.get("red", red_val.value)
green = multi_color.get("green", green_val.value)
blue = multi_color.get("blue", blue_val.value)
self._supports_color = True
if None not in (red, green, blue):
# convert to HS
self._hs_color = color_util.color_RGB_to_hs(red, green, blue)
# Light supports color, set color mode to hs
self._color_mode = COLOR_MODE_HS
# color temperature support
if ww_val and cw_val:
self._supports_color_temp = True
warm_white = multi_color.get("warmWhite", ww_val.value)
cold_white = multi_color.get("coldWhite", cw_val.value)
# Calculate color temps based on whites
if cold_white or warm_white:
self._color_temp = round(
self._max_mireds
- ((cold_white / 255) * (self._max_mireds - self._min_mireds))
)
# White channels turned on, set color mode to color_temp
self._color_mode = COLOR_MODE_COLOR_TEMP
else:
self._color_temp = None
# only one white channel (warm white) = rgbw support
elif red_val and green_val and blue_val and ww_val:
self._supports_rgbw = True
white = multi_color.get("warmWhite", ww_val.value)
self._rgbw_color = (red, green, blue, white)
# Light supports rgbw, set color mode to rgbw
self._color_mode = COLOR_MODE_RGBW
# only one white channel (cool white) = rgbw support
elif cw_val:
self._supports_rgbw = True
white = multi_color.get("coldWhite", cw_val.value)
self._rgbw_color = (red, green, blue, white)
# Light supports rgbw, set color mode to rgbw
self._color_mode = COLOR_MODE_RGBW
|
sander76/home-assistant
|
homeassistant/components/zwave_js/light.py
|
Python
|
apache-2.0
| 14,879
|
[
"Amber"
] |
3f3c9830d5af2c75bdf8516db167c392faa506f44bfa7785d5a244928a2fc20c
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/examples/linear_model/plot_ard.py
|
Python
|
mit
| 2,827
|
[
"Gaussian"
] |
1f3a112006523e716bde70f79e3a904150d6e8756a78c63df95ed13c215befba
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from abc import abstractmethod
from dataclasses import dataclass
from typing import Callable, Dict, List, Union
import numpy as np
from psi4.driver import constants
@dataclass
class Lineshape:
"""Lineshape ABC
Attributes
----------
domain : Union[numpy.ndarray, List[float]]
Domain of the spectral band.
gamma : Callable[[float], float]
A function returning the broadening factor.
Notes
-----
Why do we use a callable broadening factor?
For plots in the *wavelength domain*, the broadening factor depends on the location of the band's maximum.
"""
domain: Union[np.ndarray, List[float]]
gamma: Callable[[float], float]
@abstractmethod
def lineshape(self, x_0: float) -> np.ndarray:
pass
@abstractmethod
def maximum(self, x_0: float) -> float:
pass
class Gaussian(Lineshape):
r"""Gaussian function on `domain`, centered at `x_0` with broadening `gamma`.
Parameters
----------
domain : Union[List[float], numpy.ndarray]
The domain of the Gaussian profile.
gamma : float
Broadening parameter.
This is related to the full width at half maximum as :math:`\mathrm{FWHM} = \gamma \sqrt{2\ln 2}`
Notes
-----
Use this profile to model inhomegenous broadening.
"""
def lineshape(self, x_0: float) -> np.ndarray:
"""Gaussian function on `self.domain`, centered at `x_0` with broadening `self.gamma`.
Parameters
----------
x_0 : float
Center of the Gaussian, i.e. its maximum.
Returns
-------
gauss : numpy.ndarray
The Gaussian profile.
"""
prefactor = 2.0 / (self.gamma(x_0) * np.sqrt(2.0 * np.pi))
exponent = -2.0 * ((self.domain - x_0) / self.gamma(x_0))**2
return prefactor * np.exp(exponent)
def maximum(self, x_0: float) -> float:
return 2.0 / (self.gamma(x_0) * np.sqrt(2.0 * np.pi))
class Lorentzian(Lineshape):
"""Lorentzian function on `domain`, centered at `x_0` with broadening `gamma`.
Parameters
----------
domain : Union[List[float], numpy.ndarray]
The domain of the Lorentzian profile.
gamma : float
Broadening parameter.
This is the full width at half maximum (FWHM).
Notes
-----
Use this profile to model homogeneous broadening.
"""
def lineshape(self, x_0: float) -> np.ndarray:
"""Lorentzian function on :py:attr:`Lineshape.domain`, centered at `x_0` with broadening :py:attr:`Lineshape.gamma`.
Parameters
----------
x_0
Center of the Lorentzian, i.e. its maximum.
Returns
-------
lorentz : numpy.ndarray
The Lorentzian profile.
"""
prefactor = 1.0 / np.pi
numerator = self.gamma(x_0) / 2.0
denominator = (self.domain - x_0)**2 + numerator**2
return prefactor * (numerator / denominator)
def maximum(self, x_0: float) -> float:
return 2.0 / (np.pi * self.gamma(x_0))
def prefactor_opa() -> float:
r"""Prefactor for converting microscopic observable to decadic molar
extinction coefficient in one-photon absorption.
Returns
-------
prefactor : float
Notes
-----
This function implements the calculation of the following prefactor:
.. math::
k = \frac{4\pi^{2}N_{\mathrm{A}}}{3\times 1000\times \ln(10) (4 \pi \epsilon_{0}) n \hbar c}
The prefactor is computed in SI units and then adjusted for the fact that
we use atomic units to express microscopic observables: excitation energies
and transition dipole moments.
The refractive index :math:`n` is, in general, frequency-dependent. We
assume it to be constant and equal to 1.
"""
N_A = constants.get("Avogadro constant")
c = constants.get("speed of light in vacuum")
hbar = constants.get("Planck constant over 2 pi")
e_0 = constants.get("electric constant")
au_to_Coulomb_centimeter = constants.get("elementary charge") * constants.get(
"Bohr radius") * constants.conversion_factor("m", "cm")
numerator = 4.0 * np.pi**2 * N_A
denominator = 3 * 1000 * np.log(10) * (4 * np.pi * e_0) * hbar * c
return (numerator / denominator) * au_to_Coulomb_centimeter**2
def prefactor_ecd() -> float:
r"""Prefactor for converting microscopic observable to decadic molar
extinction coefficient in electronic circular dichroism.
Returns
-------
prefactor : float
Notes
-----
This function implements the calculation of the following prefactor:
.. math::
k = \frac{16\pi^{2}N_{\mathrm{A}}}{3\times 1000\times \ln(10) (4 \pi \epsilon_{0}) n \hbar c^{2}}
The prefactor is computed in SI units and then adjusted for the fact that
we use atomic units to express microscopic observables: excitation energies
and transition dipole moments.
The refractive index :math:`n` is, in general, frequency-dependent. We
assume it to be constant and equal to 1.
"""
N_A = constants.get("Avogadro constant")
c = constants.get("speed of light in vacuum")
hbar = constants.get("Planck constant over 2 pi")
e_0 = constants.get("electric constant")
au_to_Coulomb_centimeter = constants.get("elementary charge") * constants.get(
"Bohr radius") * constants.conversion_factor("m", "cm")
au_to_Joule_inverse_Tesla = 2.0 * constants.get("Bohr magneton") * constants.conversion_factor("m", "cm")
conversion = au_to_Coulomb_centimeter * au_to_Joule_inverse_Tesla
numerator = 16.0 * np.pi**2 * N_A
denominator = 3 * 1000 * np.log(10) * (4 * np.pi * e_0) * hbar * c**2
return (numerator / denominator) * conversion
def spectrum(*,
poles: Union[List[float], np.ndarray],
residues: Union[List[float], np.ndarray],
kind: str = "opa",
lineshape: str = "gaussian",
gamma: float = 0.2,
npoints: int = 5000,
out_units: str = "nm") -> Dict[str, np.ndarray]:
r"""One-photon absorption (OPA) or electronic circular dichroism (ECD)
spectra with phenomenological line broadening.
This function gives arrays of values ready to be plotted as OPA spectrum:
.. math::
\varepsilon(\omega) =
\frac{4\pi^{2}N_{\mathrm{A}}\omega}{3\times 1000\times \ln(10) (4 \pi \epsilon_{0}) n \hbar c}
\sum_{i \rightarrow j}g_{ij}(\omega)|\mathbf{\mu}_{ij}|^{2}
or ECD spectrum:
.. math::
\Delta\varepsilon(\omega) =
\frac{16\pi^{2}N_{\mathrm{A}}\omega}{3\times 1000\times \ln(10) (4 \pi \epsilon_{0}) n \hbar c^{2}}
\sum_{i \rightarrow j}g_{ij}(\omega)\Im(\mathbf{\mu}_{ij}\cdot\mathbf{m}_{ij})
in macroscopic units of :math:`\mathrm{L}\cdot\mathrm{mol}^{-1}\cdot\mathrm{cm}^{-1}`.
The lineshape function :math:`g_{ij}(\omega)` with phenomenological
broadening :math:`\gamma` is used for the convolution of the infinitely
narrow results from a linear response calculation.
Parameters
----------
poles
Poles of the response function, i.e. the excitation energies.
These are **expected** in atomic units of angular frequency.
residues
Residues of the linear response functions, i.e. transition dipole moments (OPA) and rotatory strengths (ECD).
These are **expected** in atomic units.
kind
{"opa", "ecd"}
Which kind of spectrum to generate, one-photon absorption ("opa") or electronic circular dichroism ("ecd").
Default is `opa`.
lineshape
{"gaussian", "lorentzian"}
The lineshape function to use in the fitting. Default is `gaussian`.
gamma
Full width at half maximum of the lineshape function.
Default is 0.2 au of angular frequency.
This value is **expected** in atomic units of angular frequency.
npoints
How many points to generate for the x axis. Default is 5000.
out_units
Units for the output array `x`, the x axis of the spectrum plot.
Default is wavelengths in nanometers.
Valid (and case-insensitive) values for the units are:
- `au` atomic units of angular frequency
- `Eh` atomic units of energy
- `eV`
- `nm`
- `THz`
Returns
-------
spectrum : Dict
The fitted electronic absorption spectrum, with units for the x axis specified by the `out_units` parameter.
This is a dictionary containing the convoluted (key: `convolution`) and the infinitely narrow spectra (key: `sticks`).
.. code-block:: python
{"convolution": {"x": np.ndarray, "y": np.ndarray},
"sticks": {"poles": np.ndarray, "residues": np.ndarray}}
Notes
-----
* Conversion of the broadening parameter :math:`\gamma`.
The lineshape functions are formulated as functions of the angular frequency :math:`\omega`.
When converting to other physical quantities, the broadening parameter has to be modified accordingly.
If :math:`\gamma_{\omega}` is the chosen broadening parameter then:
- Wavelength: :math:`gamma_{\lambda} = \frac{\lambda_{ij}^{2}}{2\pi c}\gamma_{\omega}`
- Frequency: :math:`gamma_{\nu} = \frac{\gamma_{\omega}}{2\pi}`
- Energy: :math:`gamma_{E} = \gamma_{\omega}\hbar`
References
----------
A. Rizzo, S. Coriani, K. Ruud, "Response Function Theory Computational Approaches to Linear and Nonlinear Optical Spectroscopy". In Computational Strategies for Spectroscopy.
"""
# Transmute inputs to np.ndarray
if isinstance(poles, list):
poles = np.array(poles)
if isinstance(residues, list):
residues = np.array(residues)
# Validate input arrays
if poles.shape != residues.shape:
raise ValueError(f"Shapes of poles ({poles.shape}) and residues ({residues.shape}) vectors do not match!")
# Validate kind of spectrum
kind = kind.lower()
valid_kinds = ["opa", "ecd"]
if kind not in valid_kinds:
raise ValueError(f"Spectrum kind {kind} not among recognized ({valid_kinds})")
# Validate output units
out_units = out_units.lower()
valid_out_units = ["au", "eh", "ev", "nm", "thz"]
if out_units not in valid_out_units:
raise ValueError(f"Output units {out_units} not among recognized ({valid_out_units})")
c = constants.get("speed of light in vacuum")
c_nm = c * constants.conversion_factor("m", "nm")
hbar = constants.get("Planck constant over 2 pi")
h = constants.get("Planck constant")
Eh = constants.get("Hartree energy")
au_to_nm = 2.0 * np.pi * c_nm * hbar / Eh
au_to_THz = (Eh / h) * constants.conversion_factor("Hz", "THz")
au_to_eV = constants.get("Hartree energy in eV")
converters = {
"au": lambda x: x, # Angular frequency in atomic units
"eh": lambda x: x, # Energy in atomic units
"ev": lambda x: x * au_to_eV, # Energy in electronvolts
"nm": lambda x: au_to_nm / x, # Wavelength in nanometers
"thz": lambda x: x * au_to_THz, # Frequency in terahertz
}
# Perform conversion of poles from au of angular frequency to output units
poles = converters[out_units](poles)
# Broadening functions
gammas = {
"au": lambda x_0: gamma, # Angular frequency in atomic units
"eh": lambda x_0: gamma, # Energy in atomic units
"ev": lambda x_0: gamma * au_to_eV, # Energy in electronvolts
"nm": lambda x_0: ((x_0**2 * gamma * (Eh / hbar)) / (2 * np.pi * c_nm)), # Wavelength in nanometers
"thz": lambda x_0: gamma * au_to_THz, # Frequency in terahertz
}
# Generate x axis
# Add a fifth of the range on each side
expand_side = (np.max(poles) - np.min(poles)) / 5
x = np.linspace(np.min(poles) - expand_side, np.max(poles) + expand_side, npoints)
# Validate lineshape
lineshape = lineshape.lower()
valid_lineshapes = ["gaussian", "lorentzian"]
if lineshape not in valid_lineshapes:
raise ValueError(f"Lineshape {lineshape} not among recognized ({valid_lineshapes})")
# Obtain lineshape function
shape = Gaussian(x, gammas[out_units]) if lineshape == "gaussian" else Lorentzian(x, gammas[out_units])
# Generate y axis, i.e. molar decadic absorption coefficient
prefactor = prefactor_opa() if kind == "opa" else prefactor_ecd()
transform_residue = (lambda x: x**2) if kind == "opa" else (lambda x: x)
y = prefactor * x * np.sum([transform_residue(r) * shape.lineshape(p) for p, r in zip(poles, residues)], axis=0)
# Generate sticks
sticks = prefactor * np.array([p * transform_residue(r) * shape.maximum(p) for p, r in zip(poles, residues)])
return {"convolution": {"x": x, "y": y}, "sticks": {"poles": poles, "residues": sticks}}
|
psi4/psi4
|
psi4/driver/p4util/spectrum.py
|
Python
|
lgpl-3.0
| 13,818
|
[
"Avogadro",
"Gaussian",
"Psi4"
] |
80af72b4e042ab4305b2e4b1ac1104191677c59b872f061a931e3aa18ca2874a
|
import matplotlib as mpl
mpl.use('Agg')
import numpy as np
import moose
import rdesigneur as rd
def test_current_pulse_squid():
"""Test current pulse.
>>> test_current_pulse_squid()
Rdesigneur: Elec model has 1 compartments and 0 spines on 0 compartments.
[array([-0.065 , -0.06525877, -0.06549723, ..., -0.06676325,
-0.06676329, -0.06676332])]
"""
rdes = rd.rdesigneur(
chanProto = [['make_HH_Na()', 'Na'], ['make_HH_K()', 'K']],
chanDistrib = [
['Na', 'soma', 'Gbar', '1200' ],
['K', 'soma', 'Gbar', '360' ]],
stimList = [['soma', '1', '.', 'inject', '(t>0.1 && t<0.2) * 1e-8' ]],
plotList = [['soma', '1', '.', 'Vm', 'Membrane potential']])
rdes.buildModel()
moose.reinit()
moose.start(0.3)
rdes.display(block=False)
data = moose.wildcardFind('/##[TYPE=Table]')[0].vector
m, u = data.mean(), data.std()
np.allclose([-0.06507780556166297, 0.012544993918549656], [m, u])
print("[INFO ] Done")
return True
if __name__ == '__main__':
test_current_pulse_squid()
|
dilawar/moose-core
|
tests/rdesigneur/test_30_squid_currentPulse.py
|
Python
|
gpl-3.0
| 1,098
|
[
"MOOSE"
] |
faf99a6bf2461fb3a320ae2be4ae0309d73c121231027f9ed2150d8b3f165e50
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2.services.participants import ParticipantsAsyncClient
from google.cloud.dialogflow_v2.services.participants import ParticipantsClient
from google.cloud.dialogflow_v2.services.participants import pagers
from google.cloud.dialogflow_v2.services.participants import transports
from google.cloud.dialogflow_v2.types import audio_config
from google.cloud.dialogflow_v2.types import context
from google.cloud.dialogflow_v2.types import entity_type
from google.cloud.dialogflow_v2.types import participant
from google.cloud.dialogflow_v2.types import participant as gcd_participant
from google.cloud.dialogflow_v2.types import session
from google.cloud.dialogflow_v2.types import session_entity_type
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.type import latlng_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ParticipantsClient._get_default_mtls_endpoint(None) is None
assert (
ParticipantsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
ParticipantsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ParticipantsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ParticipantsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert ParticipantsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [ParticipantsClient, ParticipantsAsyncClient,])
def test_participants_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.ParticipantsGrpcTransport, "grpc"),
(transports.ParticipantsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_participants_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [ParticipantsClient, ParticipantsAsyncClient,])
def test_participants_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_participants_client_get_transport_class():
transport = ParticipantsClient.get_transport_class()
available_transports = [
transports.ParticipantsGrpcTransport,
]
assert transport in available_transports
transport = ParticipantsClient.get_transport_class("grpc")
assert transport == transports.ParticipantsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ParticipantsClient, transports.ParticipantsGrpcTransport, "grpc"),
(
ParticipantsAsyncClient,
transports.ParticipantsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
ParticipantsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ParticipantsClient)
)
@mock.patch.object(
ParticipantsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ParticipantsAsyncClient),
)
def test_participants_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ParticipantsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ParticipantsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(ParticipantsClient, transports.ParticipantsGrpcTransport, "grpc", "true"),
(
ParticipantsAsyncClient,
transports.ParticipantsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(ParticipantsClient, transports.ParticipantsGrpcTransport, "grpc", "false"),
(
ParticipantsAsyncClient,
transports.ParticipantsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
ParticipantsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ParticipantsClient)
)
@mock.patch.object(
ParticipantsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ParticipantsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_participants_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [ParticipantsClient, ParticipantsAsyncClient])
@mock.patch.object(
ParticipantsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ParticipantsClient)
)
@mock.patch.object(
ParticipantsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ParticipantsAsyncClient),
)
def test_participants_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ParticipantsClient, transports.ParticipantsGrpcTransport, "grpc"),
(
ParticipantsAsyncClient,
transports.ParticipantsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_participants_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
ParticipantsClient,
transports.ParticipantsGrpcTransport,
"grpc",
grpc_helpers,
),
(
ParticipantsAsyncClient,
transports.ParticipantsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_participants_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_participants_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2.services.participants.transports.ParticipantsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = ParticipantsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
ParticipantsClient,
transports.ParticipantsGrpcTransport,
"grpc",
grpc_helpers,
),
(
ParticipantsAsyncClient,
transports.ParticipantsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_participants_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [gcd_participant.CreateParticipantRequest, dict,]
)
def test_create_participant(request_type, transport: str = "grpc"):
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_participant), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_participant.Participant(
name="name_value",
role=gcd_participant.Participant.Role.HUMAN_AGENT,
sip_recording_media_label="sip_recording_media_label_value",
)
response = client.create_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_participant.CreateParticipantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_participant.Participant)
assert response.name == "name_value"
assert response.role == gcd_participant.Participant.Role.HUMAN_AGENT
assert response.sip_recording_media_label == "sip_recording_media_label_value"
def test_create_participant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_participant), "__call__"
) as call:
client.create_participant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_participant.CreateParticipantRequest()
@pytest.mark.asyncio
async def test_create_participant_async(
transport: str = "grpc_asyncio",
request_type=gcd_participant.CreateParticipantRequest,
):
client = ParticipantsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_participant), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_participant.Participant(
name="name_value",
role=gcd_participant.Participant.Role.HUMAN_AGENT,
sip_recording_media_label="sip_recording_media_label_value",
)
)
response = await client.create_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_participant.CreateParticipantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_participant.Participant)
assert response.name == "name_value"
assert response.role == gcd_participant.Participant.Role.HUMAN_AGENT
assert response.sip_recording_media_label == "sip_recording_media_label_value"
@pytest.mark.asyncio
async def test_create_participant_async_from_dict():
await test_create_participant_async(request_type=dict)
def test_create_participant_field_headers():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_participant.CreateParticipantRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_participant), "__call__"
) as call:
call.return_value = gcd_participant.Participant()
client.create_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_participant_field_headers_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_participant.CreateParticipantRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_participant), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_participant.Participant()
)
await client.create_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_participant_flattened():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_participant), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_participant.Participant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_participant(
parent="parent_value",
participant=gcd_participant.Participant(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].participant
mock_val = gcd_participant.Participant(name="name_value")
assert arg == mock_val
def test_create_participant_flattened_error():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_participant(
gcd_participant.CreateParticipantRequest(),
parent="parent_value",
participant=gcd_participant.Participant(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_participant_flattened_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_participant), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_participant.Participant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_participant.Participant()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_participant(
parent="parent_value",
participant=gcd_participant.Participant(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].participant
mock_val = gcd_participant.Participant(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_participant_flattened_error_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_participant(
gcd_participant.CreateParticipantRequest(),
parent="parent_value",
participant=gcd_participant.Participant(name="name_value"),
)
@pytest.mark.parametrize("request_type", [participant.GetParticipantRequest, dict,])
def test_get_participant(request_type, transport: str = "grpc"):
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_participant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = participant.Participant(
name="name_value",
role=participant.Participant.Role.HUMAN_AGENT,
sip_recording_media_label="sip_recording_media_label_value",
)
response = client.get_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == participant.GetParticipantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, participant.Participant)
assert response.name == "name_value"
assert response.role == participant.Participant.Role.HUMAN_AGENT
assert response.sip_recording_media_label == "sip_recording_media_label_value"
def test_get_participant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_participant), "__call__") as call:
client.get_participant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == participant.GetParticipantRequest()
@pytest.mark.asyncio
async def test_get_participant_async(
transport: str = "grpc_asyncio", request_type=participant.GetParticipantRequest
):
client = ParticipantsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_participant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.Participant(
name="name_value",
role=participant.Participant.Role.HUMAN_AGENT,
sip_recording_media_label="sip_recording_media_label_value",
)
)
response = await client.get_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == participant.GetParticipantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, participant.Participant)
assert response.name == "name_value"
assert response.role == participant.Participant.Role.HUMAN_AGENT
assert response.sip_recording_media_label == "sip_recording_media_label_value"
@pytest.mark.asyncio
async def test_get_participant_async_from_dict():
await test_get_participant_async(request_type=dict)
def test_get_participant_field_headers():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = participant.GetParticipantRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_participant), "__call__") as call:
call.return_value = participant.Participant()
client.get_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_participant_field_headers_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = participant.GetParticipantRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_participant), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.Participant()
)
await client.get_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_participant_flattened():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_participant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = participant.Participant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_participant(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_participant_flattened_error():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_participant(
participant.GetParticipantRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_participant_flattened_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_participant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = participant.Participant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.Participant()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_participant(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_participant_flattened_error_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_participant(
participant.GetParticipantRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [participant.ListParticipantsRequest, dict,])
def test_list_participants(request_type, transport: str = "grpc"):
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_participants), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = participant.ListParticipantsResponse(
next_page_token="next_page_token_value",
)
response = client.list_participants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == participant.ListParticipantsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListParticipantsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_participants_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_participants), "__call__"
) as call:
client.list_participants()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == participant.ListParticipantsRequest()
@pytest.mark.asyncio
async def test_list_participants_async(
transport: str = "grpc_asyncio", request_type=participant.ListParticipantsRequest
):
client = ParticipantsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_participants), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.ListParticipantsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_participants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == participant.ListParticipantsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListParticipantsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_participants_async_from_dict():
await test_list_participants_async(request_type=dict)
def test_list_participants_field_headers():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = participant.ListParticipantsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_participants), "__call__"
) as call:
call.return_value = participant.ListParticipantsResponse()
client.list_participants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_participants_field_headers_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = participant.ListParticipantsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_participants), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.ListParticipantsResponse()
)
await client.list_participants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_participants_flattened():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_participants), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = participant.ListParticipantsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_participants(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_participants_flattened_error():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_participants(
participant.ListParticipantsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_participants_flattened_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_participants), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = participant.ListParticipantsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.ListParticipantsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_participants(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_participants_flattened_error_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_participants(
participant.ListParticipantsRequest(), parent="parent_value",
)
def test_list_participants_pager(transport_name: str = "grpc"):
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_participants), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
participant.ListParticipantsResponse(
participants=[
participant.Participant(),
participant.Participant(),
participant.Participant(),
],
next_page_token="abc",
),
participant.ListParticipantsResponse(
participants=[], next_page_token="def",
),
participant.ListParticipantsResponse(
participants=[participant.Participant(),], next_page_token="ghi",
),
participant.ListParticipantsResponse(
participants=[participant.Participant(), participant.Participant(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_participants(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, participant.Participant) for i in results)
def test_list_participants_pages(transport_name: str = "grpc"):
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_participants), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
participant.ListParticipantsResponse(
participants=[
participant.Participant(),
participant.Participant(),
participant.Participant(),
],
next_page_token="abc",
),
participant.ListParticipantsResponse(
participants=[], next_page_token="def",
),
participant.ListParticipantsResponse(
participants=[participant.Participant(),], next_page_token="ghi",
),
participant.ListParticipantsResponse(
participants=[participant.Participant(), participant.Participant(),],
),
RuntimeError,
)
pages = list(client.list_participants(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_participants_async_pager():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_participants),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
participant.ListParticipantsResponse(
participants=[
participant.Participant(),
participant.Participant(),
participant.Participant(),
],
next_page_token="abc",
),
participant.ListParticipantsResponse(
participants=[], next_page_token="def",
),
participant.ListParticipantsResponse(
participants=[participant.Participant(),], next_page_token="ghi",
),
participant.ListParticipantsResponse(
participants=[participant.Participant(), participant.Participant(),],
),
RuntimeError,
)
async_pager = await client.list_participants(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, participant.Participant) for i in responses)
@pytest.mark.asyncio
async def test_list_participants_async_pages():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_participants),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
participant.ListParticipantsResponse(
participants=[
participant.Participant(),
participant.Participant(),
participant.Participant(),
],
next_page_token="abc",
),
participant.ListParticipantsResponse(
participants=[], next_page_token="def",
),
participant.ListParticipantsResponse(
participants=[participant.Participant(),], next_page_token="ghi",
),
participant.ListParticipantsResponse(
participants=[participant.Participant(), participant.Participant(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_participants(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [gcd_participant.UpdateParticipantRequest, dict,]
)
def test_update_participant(request_type, transport: str = "grpc"):
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_participant), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_participant.Participant(
name="name_value",
role=gcd_participant.Participant.Role.HUMAN_AGENT,
sip_recording_media_label="sip_recording_media_label_value",
)
response = client.update_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_participant.UpdateParticipantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_participant.Participant)
assert response.name == "name_value"
assert response.role == gcd_participant.Participant.Role.HUMAN_AGENT
assert response.sip_recording_media_label == "sip_recording_media_label_value"
def test_update_participant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_participant), "__call__"
) as call:
client.update_participant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_participant.UpdateParticipantRequest()
@pytest.mark.asyncio
async def test_update_participant_async(
transport: str = "grpc_asyncio",
request_type=gcd_participant.UpdateParticipantRequest,
):
client = ParticipantsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_participant), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_participant.Participant(
name="name_value",
role=gcd_participant.Participant.Role.HUMAN_AGENT,
sip_recording_media_label="sip_recording_media_label_value",
)
)
response = await client.update_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_participant.UpdateParticipantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_participant.Participant)
assert response.name == "name_value"
assert response.role == gcd_participant.Participant.Role.HUMAN_AGENT
assert response.sip_recording_media_label == "sip_recording_media_label_value"
@pytest.mark.asyncio
async def test_update_participant_async_from_dict():
await test_update_participant_async(request_type=dict)
def test_update_participant_field_headers():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_participant.UpdateParticipantRequest()
request.participant.name = "participant.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_participant), "__call__"
) as call:
call.return_value = gcd_participant.Participant()
client.update_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "participant.name=participant.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_participant_field_headers_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_participant.UpdateParticipantRequest()
request.participant.name = "participant.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_participant), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_participant.Participant()
)
await client.update_participant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "participant.name=participant.name/value",) in kw[
"metadata"
]
def test_update_participant_flattened():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_participant), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_participant.Participant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_participant(
participant=gcd_participant.Participant(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].participant
mock_val = gcd_participant.Participant(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_participant_flattened_error():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_participant(
gcd_participant.UpdateParticipantRequest(),
participant=gcd_participant.Participant(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_participant_flattened_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_participant), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_participant.Participant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_participant.Participant()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_participant(
participant=gcd_participant.Participant(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].participant
mock_val = gcd_participant.Participant(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_participant_flattened_error_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_participant(
gcd_participant.UpdateParticipantRequest(),
participant=gcd_participant.Participant(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [gcd_participant.AnalyzeContentRequest, dict,])
def test_analyze_content(request_type, transport: str = "grpc"):
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_content), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_participant.AnalyzeContentResponse(
reply_text="reply_text_value",
)
response = client.analyze_content(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_participant.AnalyzeContentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_participant.AnalyzeContentResponse)
assert response.reply_text == "reply_text_value"
def test_analyze_content_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_content), "__call__") as call:
client.analyze_content()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_participant.AnalyzeContentRequest()
@pytest.mark.asyncio
async def test_analyze_content_async(
transport: str = "grpc_asyncio", request_type=gcd_participant.AnalyzeContentRequest
):
client = ParticipantsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_content), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_participant.AnalyzeContentResponse(reply_text="reply_text_value",)
)
response = await client.analyze_content(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_participant.AnalyzeContentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_participant.AnalyzeContentResponse)
assert response.reply_text == "reply_text_value"
@pytest.mark.asyncio
async def test_analyze_content_async_from_dict():
await test_analyze_content_async(request_type=dict)
def test_analyze_content_field_headers():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_participant.AnalyzeContentRequest()
request.participant = "participant/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_content), "__call__") as call:
call.return_value = gcd_participant.AnalyzeContentResponse()
client.analyze_content(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "participant=participant/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_analyze_content_field_headers_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_participant.AnalyzeContentRequest()
request.participant = "participant/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_content), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_participant.AnalyzeContentResponse()
)
await client.analyze_content(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "participant=participant/value",) in kw["metadata"]
def test_analyze_content_flattened():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_content), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_participant.AnalyzeContentResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.analyze_content(
participant="participant_value",
text_input=session.TextInput(text="text_value"),
event_input=session.EventInput(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].participant
mock_val = "participant_value"
assert arg == mock_val
assert args[0].event_input == session.EventInput(name="name_value")
def test_analyze_content_flattened_error():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.analyze_content(
gcd_participant.AnalyzeContentRequest(),
participant="participant_value",
text_input=session.TextInput(text="text_value"),
event_input=session.EventInput(name="name_value"),
)
@pytest.mark.asyncio
async def test_analyze_content_flattened_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_content), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_participant.AnalyzeContentResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_participant.AnalyzeContentResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.analyze_content(
participant="participant_value",
text_input=session.TextInput(text="text_value"),
event_input=session.EventInput(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].participant
mock_val = "participant_value"
assert arg == mock_val
assert args[0].event_input == session.EventInput(name="name_value")
@pytest.mark.asyncio
async def test_analyze_content_flattened_error_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.analyze_content(
gcd_participant.AnalyzeContentRequest(),
participant="participant_value",
text_input=session.TextInput(text="text_value"),
event_input=session.EventInput(name="name_value"),
)
@pytest.mark.parametrize("request_type", [participant.SuggestArticlesRequest, dict,])
def test_suggest_articles(request_type, transport: str = "grpc"):
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.suggest_articles), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = participant.SuggestArticlesResponse(
latest_message="latest_message_value", context_size=1311,
)
response = client.suggest_articles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == participant.SuggestArticlesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, participant.SuggestArticlesResponse)
assert response.latest_message == "latest_message_value"
assert response.context_size == 1311
def test_suggest_articles_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.suggest_articles), "__call__") as call:
client.suggest_articles()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == participant.SuggestArticlesRequest()
@pytest.mark.asyncio
async def test_suggest_articles_async(
transport: str = "grpc_asyncio", request_type=participant.SuggestArticlesRequest
):
client = ParticipantsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.suggest_articles), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.SuggestArticlesResponse(
latest_message="latest_message_value", context_size=1311,
)
)
response = await client.suggest_articles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == participant.SuggestArticlesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, participant.SuggestArticlesResponse)
assert response.latest_message == "latest_message_value"
assert response.context_size == 1311
@pytest.mark.asyncio
async def test_suggest_articles_async_from_dict():
await test_suggest_articles_async(request_type=dict)
def test_suggest_articles_field_headers():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = participant.SuggestArticlesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.suggest_articles), "__call__") as call:
call.return_value = participant.SuggestArticlesResponse()
client.suggest_articles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_suggest_articles_field_headers_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = participant.SuggestArticlesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.suggest_articles), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.SuggestArticlesResponse()
)
await client.suggest_articles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_suggest_articles_flattened():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.suggest_articles), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = participant.SuggestArticlesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.suggest_articles(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_suggest_articles_flattened_error():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.suggest_articles(
participant.SuggestArticlesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_suggest_articles_flattened_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.suggest_articles), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = participant.SuggestArticlesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.SuggestArticlesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.suggest_articles(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_suggest_articles_flattened_error_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.suggest_articles(
participant.SuggestArticlesRequest(), parent="parent_value",
)
@pytest.mark.parametrize("request_type", [participant.SuggestFaqAnswersRequest, dict,])
def test_suggest_faq_answers(request_type, transport: str = "grpc"):
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_faq_answers), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = participant.SuggestFaqAnswersResponse(
latest_message="latest_message_value", context_size=1311,
)
response = client.suggest_faq_answers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == participant.SuggestFaqAnswersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, participant.SuggestFaqAnswersResponse)
assert response.latest_message == "latest_message_value"
assert response.context_size == 1311
def test_suggest_faq_answers_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_faq_answers), "__call__"
) as call:
client.suggest_faq_answers()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == participant.SuggestFaqAnswersRequest()
@pytest.mark.asyncio
async def test_suggest_faq_answers_async(
transport: str = "grpc_asyncio", request_type=participant.SuggestFaqAnswersRequest
):
client = ParticipantsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_faq_answers), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.SuggestFaqAnswersResponse(
latest_message="latest_message_value", context_size=1311,
)
)
response = await client.suggest_faq_answers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == participant.SuggestFaqAnswersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, participant.SuggestFaqAnswersResponse)
assert response.latest_message == "latest_message_value"
assert response.context_size == 1311
@pytest.mark.asyncio
async def test_suggest_faq_answers_async_from_dict():
await test_suggest_faq_answers_async(request_type=dict)
def test_suggest_faq_answers_field_headers():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = participant.SuggestFaqAnswersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_faq_answers), "__call__"
) as call:
call.return_value = participant.SuggestFaqAnswersResponse()
client.suggest_faq_answers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_suggest_faq_answers_field_headers_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = participant.SuggestFaqAnswersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_faq_answers), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.SuggestFaqAnswersResponse()
)
await client.suggest_faq_answers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_suggest_faq_answers_flattened():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_faq_answers), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = participant.SuggestFaqAnswersResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.suggest_faq_answers(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_suggest_faq_answers_flattened_error():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.suggest_faq_answers(
participant.SuggestFaqAnswersRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_suggest_faq_answers_flattened_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_faq_answers), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = participant.SuggestFaqAnswersResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.SuggestFaqAnswersResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.suggest_faq_answers(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_suggest_faq_answers_flattened_error_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.suggest_faq_answers(
participant.SuggestFaqAnswersRequest(), parent="parent_value",
)
@pytest.mark.parametrize(
"request_type", [participant.SuggestSmartRepliesRequest, dict,]
)
def test_suggest_smart_replies(request_type, transport: str = "grpc"):
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_smart_replies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = participant.SuggestSmartRepliesResponse(
latest_message="latest_message_value", context_size=1311,
)
response = client.suggest_smart_replies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == participant.SuggestSmartRepliesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, participant.SuggestSmartRepliesResponse)
assert response.latest_message == "latest_message_value"
assert response.context_size == 1311
def test_suggest_smart_replies_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_smart_replies), "__call__"
) as call:
client.suggest_smart_replies()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == participant.SuggestSmartRepliesRequest()
@pytest.mark.asyncio
async def test_suggest_smart_replies_async(
transport: str = "grpc_asyncio", request_type=participant.SuggestSmartRepliesRequest
):
client = ParticipantsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_smart_replies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.SuggestSmartRepliesResponse(
latest_message="latest_message_value", context_size=1311,
)
)
response = await client.suggest_smart_replies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == participant.SuggestSmartRepliesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, participant.SuggestSmartRepliesResponse)
assert response.latest_message == "latest_message_value"
assert response.context_size == 1311
@pytest.mark.asyncio
async def test_suggest_smart_replies_async_from_dict():
await test_suggest_smart_replies_async(request_type=dict)
def test_suggest_smart_replies_field_headers():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = participant.SuggestSmartRepliesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_smart_replies), "__call__"
) as call:
call.return_value = participant.SuggestSmartRepliesResponse()
client.suggest_smart_replies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_suggest_smart_replies_field_headers_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = participant.SuggestSmartRepliesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_smart_replies), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.SuggestSmartRepliesResponse()
)
await client.suggest_smart_replies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_suggest_smart_replies_flattened():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_smart_replies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = participant.SuggestSmartRepliesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.suggest_smart_replies(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_suggest_smart_replies_flattened_error():
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.suggest_smart_replies(
participant.SuggestSmartRepliesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_suggest_smart_replies_flattened_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_smart_replies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = participant.SuggestSmartRepliesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
participant.SuggestSmartRepliesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.suggest_smart_replies(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_suggest_smart_replies_flattened_error_async():
client = ParticipantsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.suggest_smart_replies(
participant.SuggestSmartRepliesRequest(), parent="parent_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ParticipantsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ParticipantsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ParticipantsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.ParticipantsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ParticipantsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ParticipantsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.ParticipantsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ParticipantsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ParticipantsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ParticipantsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.ParticipantsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.ParticipantsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.ParticipantsGrpcTransport,
transports.ParticipantsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ParticipantsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.ParticipantsGrpcTransport,)
def test_participants_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.ParticipantsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_participants_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2.services.participants.transports.ParticipantsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ParticipantsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_participant",
"get_participant",
"list_participants",
"update_participant",
"analyze_content",
"suggest_articles",
"suggest_faq_answers",
"suggest_smart_replies",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_participants_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2.services.participants.transports.ParticipantsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ParticipantsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_participants_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2.services.participants.transports.ParticipantsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ParticipantsTransport()
adc.assert_called_once()
def test_participants_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ParticipantsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ParticipantsGrpcTransport,
transports.ParticipantsGrpcAsyncIOTransport,
],
)
def test_participants_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.ParticipantsGrpcTransport, grpc_helpers),
(transports.ParticipantsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_participants_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.ParticipantsGrpcTransport, transports.ParticipantsGrpcAsyncIOTransport],
)
def test_participants_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_participants_host_no_port():
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_participants_host_with_port():
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_participants_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ParticipantsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_participants_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ParticipantsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.ParticipantsGrpcTransport, transports.ParticipantsGrpcAsyncIOTransport],
)
def test_participants_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.ParticipantsGrpcTransport, transports.ParticipantsGrpcAsyncIOTransport],
)
def test_participants_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_answer_record_path():
project = "squid"
answer_record = "clam"
expected = "projects/{project}/answerRecords/{answer_record}".format(
project=project, answer_record=answer_record,
)
actual = ParticipantsClient.answer_record_path(project, answer_record)
assert expected == actual
def test_parse_answer_record_path():
expected = {
"project": "whelk",
"answer_record": "octopus",
}
path = ParticipantsClient.answer_record_path(**expected)
# Check that the path construction is reversible.
actual = ParticipantsClient.parse_answer_record_path(path)
assert expected == actual
def test_context_path():
project = "oyster"
session = "nudibranch"
context = "cuttlefish"
expected = "projects/{project}/agent/sessions/{session}/contexts/{context}".format(
project=project, session=session, context=context,
)
actual = ParticipantsClient.context_path(project, session, context)
assert expected == actual
def test_parse_context_path():
expected = {
"project": "mussel",
"session": "winkle",
"context": "nautilus",
}
path = ParticipantsClient.context_path(**expected)
# Check that the path construction is reversible.
actual = ParticipantsClient.parse_context_path(path)
assert expected == actual
def test_intent_path():
project = "scallop"
intent = "abalone"
expected = "projects/{project}/agent/intents/{intent}".format(
project=project, intent=intent,
)
actual = ParticipantsClient.intent_path(project, intent)
assert expected == actual
def test_parse_intent_path():
expected = {
"project": "squid",
"intent": "clam",
}
path = ParticipantsClient.intent_path(**expected)
# Check that the path construction is reversible.
actual = ParticipantsClient.parse_intent_path(path)
assert expected == actual
def test_message_path():
project = "whelk"
conversation = "octopus"
message = "oyster"
expected = "projects/{project}/conversations/{conversation}/messages/{message}".format(
project=project, conversation=conversation, message=message,
)
actual = ParticipantsClient.message_path(project, conversation, message)
assert expected == actual
def test_parse_message_path():
expected = {
"project": "nudibranch",
"conversation": "cuttlefish",
"message": "mussel",
}
path = ParticipantsClient.message_path(**expected)
# Check that the path construction is reversible.
actual = ParticipantsClient.parse_message_path(path)
assert expected == actual
def test_participant_path():
project = "winkle"
conversation = "nautilus"
participant = "scallop"
expected = "projects/{project}/conversations/{conversation}/participants/{participant}".format(
project=project, conversation=conversation, participant=participant,
)
actual = ParticipantsClient.participant_path(project, conversation, participant)
assert expected == actual
def test_parse_participant_path():
expected = {
"project": "abalone",
"conversation": "squid",
"participant": "clam",
}
path = ParticipantsClient.participant_path(**expected)
# Check that the path construction is reversible.
actual = ParticipantsClient.parse_participant_path(path)
assert expected == actual
def test_session_entity_type_path():
project = "whelk"
session = "octopus"
entity_type = "oyster"
expected = "projects/{project}/agent/sessions/{session}/entityTypes/{entity_type}".format(
project=project, session=session, entity_type=entity_type,
)
actual = ParticipantsClient.session_entity_type_path(project, session, entity_type)
assert expected == actual
def test_parse_session_entity_type_path():
expected = {
"project": "nudibranch",
"session": "cuttlefish",
"entity_type": "mussel",
}
path = ParticipantsClient.session_entity_type_path(**expected)
# Check that the path construction is reversible.
actual = ParticipantsClient.parse_session_entity_type_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ParticipantsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = ParticipantsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ParticipantsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = ParticipantsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = ParticipantsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ParticipantsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = ParticipantsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = ParticipantsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ParticipantsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = ParticipantsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = ParticipantsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ParticipantsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ParticipantsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = ParticipantsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ParticipantsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ParticipantsTransport, "_prep_wrapped_messages"
) as prep:
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ParticipantsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ParticipantsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = ParticipantsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = ParticipantsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(ParticipantsClient, transports.ParticipantsGrpcTransport),
(ParticipantsAsyncClient, transports.ParticipantsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-dialogflow
|
tests/unit/gapic/dialogflow_v2/test_participants.py
|
Python
|
apache-2.0
| 127,239
|
[
"Octopus"
] |
1590517dc135bd5eacb39afa46f132400cbaf235662b14aa5fb6d373e37d9a9b
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic unrestricted Hartree-Fock hyperfine coupling tensor
(In testing)
Refs:
JCP, 120, 2127
JCP, 118, 3939
'''
from functools import reduce
import numpy
from pyscf import lib
from pyscf.scf import _vhf
from pyscf.prop.hfc import uhf as uhf_hfc
from pyscf.prop.gtensor.uks import get_vxc_soc
# Note the (-) sign of beta-beta block is included in the integral
def make_h1_soc2e(hfcobj, dm0):
mf = hfcobj._scf
ni = mf._numint
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
if abs(omega) > 1e-10:
raise NotImplementedError
mem_now = lib.current_memory()[0]
max_memory = max(2000, mf.max_memory*.9-mem_now)
v1 = get_vxc_soc(ni, mol, mf.grids, mf.xc, dm0,
max_memory=max_memory, verbose=hfcobj.verbose)
if abs(hyb) > 1e-10:
vj, vk = uhf_hfc.get_jk(mol, dm0)
v1 += vj[0] + vj[1]
v1 -= vk * hyb
else:
vj = _vhf.direct_mapdm(mol._add_suffix('int2e_p1vxp1'),
'a4ij', 'lk->s2ij',
dm0, 3, mol._atm, mol._bas, mol._env)
for i in range(3):
lib.hermi_triu(vj[0,i], hermi=2, inplace=True)
lib.hermi_triu(vj[1,i], hermi=2, inplace=True)
v1 += vj[0] + vj[1]
v1[1] *= -1
return v1
class HyperfineCoupling(uhf_hfc.HyperfineCoupling):
make_h1_soc2e = make_h1_soc2e
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('\n')
log.info('******** %s for %s (In testing) ********',
self.__class__, self._scf.__class__)
log.info('HFC for atoms %s', str(self.hfc_nuc))
if self.cphf:
log.info('CPHF conv_tol = %g', self.conv_tol)
log.info('CPHF max_cycle_cphf = %d', self.max_cycle_cphf)
log.info('para_soc2e = %s', self.para_soc2e)
log.info('so_eff_charge = %s (1e SO effective charge)',
self.so_eff_charge)
if not self._scf.converged:
log.warn('Ground state SCF is not converged')
return self
HFC = HyperfineCoupling
if __name__ == '__main__':
from pyscf import gto, scf, dft
mol = gto.M(atom='C 0 0 0; O 0 0 1.12',
basis='ccpvdz', spin=1, charge=1, verbose=3)
mf = dft.UKS(mol).run()
hfc = HFC(mf)
hfc.verbose = 4
hfc.so_eff_charge = False
print(lib.finger(hfc.kernel()) - 255.92807696823797)
mol = gto.M(atom='H 0 0 0; H 0 0 1.',
basis='ccpvdz', spin=1, charge=-1, verbose=3)
mf = scf.UKS(mol).run()
hfc = HFC(mf)
hfc.cphf = True
print(lib.finger(hfc.kernel()) - -25.896662045941071)
mol = gto.M(atom='''
Li 0 0 1
''',
basis='ccpvdz', spin=1, charge=0, verbose=3)
mf = scf.UKS(mol).run()
hfc = HFC(mf)
hfc.cphf = True
print(lib.finger(hfc.kernel()) - 65.396568554095523)
mol = gto.M(atom='''
H 0 0 1
H 1.2 0 1
H .1 1.1 0.3
H .8 .7 .6
''',
basis='ccpvdz', spin=1, charge=1, verbose=3)
mf = scf.UKS(mol).run()
hfc = HFC(mf)
print(lib.finger(hfc.kernel()) - 180.05536650105842)
nao, nmo = mf.mo_coeff[0].shape
numpy.random.seed(1)
dm0 = numpy.random.random((2,nao,nao))
dm0 = dm0 + dm0.transpose(0,2,1)
hfc.so_eff_charge = False
h1a, h1b = make_h1_soc2e(hfc, dm0)
print(lib.finger(h1a) - -10.684681440665429)
print(lib.finger(h1b) - 10.23699899832944)
|
gkc1000/pyscf
|
pyscf/prop/hfc/uks.py
|
Python
|
apache-2.0
| 4,256
|
[
"PySCF"
] |
70382740c36309dab8bb55baa34dd806654c106d87ee7b049ad48c75b96e146f
|
#!/usr/bin/env python
# -- coding:utf-8 --
# Last-modified: 12 June 2020 10:48:53 PM
#
# Module/Scripts Description
#
# Copyright (c) 2020 Rowan University
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the BSD License (see the file COPYING included with
# the distribution).
#
# @version: 1.1.0
# @design: Yong Chen <chenyong@rowan.edu>
# @implementation: Yunfei Wang <yfwang0405@gmail.com>
# @corresponding author: Yong Chen <chenyong@rowan.edu>
# ------------------------------------
# python modules
# ------------------------------------
import os
import sys
import pandas
import argparse
import c3s
# ------------------------------------
# constants
# ------------------------------------
# ------------------------------------
# Misc functions
# ------------------------------------
def argParser():
''' Parse arguments. '''
p=argparse.ArgumentParser(description='C3S: model-based analysis and pipeline of dCas9 Capture-3C-Seq data.',add_help=False,epilog='dependency numpy, scipy, pandas, pysam, statsmodels')
pr = p.add_argument_group('Required')
pr.add_argument("-x","--genome",dest="genome",type=str,metavar="hg38", required=True, help="Bowtie2 built genome.")
pr.add_argument("-1",dest="fq1",type=str,metavar='sample_R1.fastq.gz',nargs="+",required=True,help="Read 1 fastq file. Can be gzip(.gz) or bzip2(.bz2) compressed.")
pr.add_argument("-2",dest="fq2",type=str,metavar='sample_R2.fastq.gz',nargs="+",required=True,help="Read 2 fastq file. Can be gzip(.gz) or bzip2(.bz2) compressed.")
pr.add_argument("--prefix",dest="prefix",type=str,metavar='prefix',required=True,help="Prefix of result files.")
pr.add_argument("--bait",dest="bait",type=str,metavar="chr11:5305934",required=True,help="Bait genomic locus. [e.g. \"chr11:5305934\"]")
po = p.add_argument_group('Optional')
po.add_argument("--extendsize",dest="extendsize",type=int,metavar="100000",default=100000,help="Length to be extended from bait regions. [Defaut=100000]")
po.add_argument("--readlen",dest="readlen",type=int,metavar="36",default=36,help="Read length. [Default=36].")
po.add_argument("--seed",dest="seed",type=int,metavar="1024",default=1024,help="Seed to generate random values. [Default=1024].")
po.add_argument("--smooth-window",dest="smooth_window",type=int,metavar="100",default=100,help="Smooth window for peak size inference. [Default=100].")
po.add_argument("--model-number",dest="nbins",type=int,metavar="11",default=11,help="Number of NB models used for testing. [Default=11].")
po.add_argument("--nperm",dest="nperm",type=int,metavar="10000",default=10000,help="Number of permutatons. [Default=10000].")
po.add_argument("-w",dest="wdir",type=str,metavar='"."',default=".",help="Working directory. [Default=\".\"].")
po.add_argument("-p",dest='proc',type=int,metavar='10',default=10,help="Number of processes. [Default=10]")
if len(sys.argv)==1:
sys.exit(p.print_help())
args = p.parse_args()
return args
# ------------------------------------
# Classes
# ------------------------------------
# ------------------------------------
# Main
# ------------------------------------
if __name__=="__main__":
args = argParser()
# Mapping reads to genome
mappingdir = args.wdir+"/010ReadMapping"
fq1, fq2 = ",".join(args.fq1), ",".join(args.fq2)
mappingdir = c3s.Utils.touchdir(mappingdir)
# 1st round of mapping
c3s.Utils.touchtime("FIRST ROUND OF MAPPING ...")
c3s.Utils.touchtime("MAPPING READ 1 ...")
c3s.Algorithms.bowtie2_SE(args.genome,fq1,args.prefix+"_R1",proc=args.proc,wdir=mappingdir,min_qual=30)
c3s.Utils.touchtime("MAPPING READ 2 ...")
c3s.Algorithms.bowtie2_SE(args.genome,fq2,args.prefix+"_R2",proc=args.proc,wdir=mappingdir,min_qual=30)
c3s.Utils.touchtime()
# Split the reads by GATC sites and take the larger one
c3s.Utils.touchtime("Split read by GATC sites ...")
c3s.Algorithms.ParseGATCSites("{0}/{1}_R1_un.fastq.gz".format(mappingdir,args.prefix),"{0}/{1}_R1_split.fastq.gz".format(mappingdir,args.prefix))
c3s.Algorithms.ParseGATCSites("{0}/{1}_R2_un.fastq.gz".format(mappingdir,args.prefix),"{0}/{1}_R2_split.fastq.gz".format(mappingdir,args.prefix))
c3s.Utils.touchtime()
# 2nd round of mapping
c3s.Utils.touchtime("SECOND ROUND OF MAPPING ...")
c3s.Utils.touchtime("MAPPING READ 1 ...")
c3s.Algorithms.bowtie2_SE(args.genome,"{0}/{1}_R1_split.fastq.gz".format(mappingdir,args.prefix),args.prefix+"_R1_remap",min_qual=30,proc=args.proc,wdir=mappingdir)
c3s.Utils.touchtime("MAPPING READ 2 ...")
c3s.Algorithms.bowtie2_SE(args.genome,"{0}/{1}_R2_split.fastq.gz".format(mappingdir,args.prefix),args.prefix+"_R2_remap",min_qual=30,proc=args.proc,wdir=mappingdir)
c3s.Utils.touchtime()
# Fix mate pairs
c3s.Utils.touchtime("Merge bam files and fix mate pairs ...")
bams = [mappingdir+args.prefix+f for f in ["_R1.bam", "_R2.bam", "_R1_remap.bam", "_R2_remap.bam"]]
tbffile = c3s.Algorithms.FixMatePairs(bams,mappingdir+args.prefix,args.proc)
c3s.Utils.touchtime()
# Infer peak characteristics from the bait region
plotdir = args.wdir+"/020Plotting"
plotdir = c3s.Utils.touchdir(plotdir)
c3s.Utils.touchtime("Draw bait figures ...")
tbf = c3s.TabixFile(tbffile)
tbf.setChromSizes(bams[0])
tbf.BaitStatsPlot(args.bait,
plotdir+args.prefix+"_stats.pdf",
extendsize=args.extendsize,
readlen=args.readlen,
smooth_window=args.smooth_window)
c3s.Utils.touchtime()
# Calculate intra- and inter-chrom interactions
modeldir = args.wdir+"/030Model"
modeldir = c3s.Utils.touchdir(modeldir)
c3s.Utils.touchtime("Permutation on intra-chromosomal interactions ...")
ns, ps = tbf.GetIntraChromLinks(nperm=args.nperm,nbins=args.nbins)
#for n,p in zip(ns,ps):
# print n,p
c3s.Utils.touchtime("Permutation on inter-chromosomal interactions ...")
n, p = tbf.GetInterChromLinks(nperm=args.nperm)
#print n, p
c3s.Utils.touchtime()
# Calculate p values for intra- and inter-chrom interactions.
c3s.Utils.touchtime("Calculate p values ...")
tbf.InferBaitPval(modeldir+args.prefix)
c3s.Utils.touchtime()
# Ending
c3s.Utils.touchtime("RunC3S finished successfully. Thank you for using C3S!")
|
YONGCHENUTD/C3S
|
bin/runC3S.py
|
Python
|
gpl-3.0
| 6,466
|
[
"pysam"
] |
edf82d108902705d5abf061ee7d0165d08fd0d9162d3da5782164fa5f9d30d00
|
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
start_time = time.time()
max_test = 2
twohundred_flag = False
for test_num in range(1, max_test):
# Hyperparameters
RANDOM_NUMBER_SEED = test_num
# ENVIRONMENT1 = "morph-v0"
ENVIRONMENT1 = "morph-sameinit-v0"
MAX_EPISODES = 5000 # number of episodes
EPISODE_LENGTH = 200 # single episode length
HIDDEN_SIZE = 16
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 100 # Including previous 100 rewards
CONST_LR = False # Constant or decaying learing rate
# Constant learning rate
const_learning_rate_in = 0.006
# Decay learning rate
start_learning_rate_in = 0.005
decay_steps_in = 100
decay_rate_in = 0.92
# DIR_PATH_SAVEFIG = "root/cartpole_plot/"
DIR_PATH_SAVEFIG = "./cartpole_plot/"
if CONST_LR:
learning_rate = const_learning_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_clr" + str(learning_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
else:
start_learning_rate = start_learning_rate_in
decay_steps = decay_steps_in
decay_rate = decay_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_dlr_slr" + str(start_learning_rate).replace(".", "p") \
+ "_ds" + str(decay_steps) \
+ "_dr" + str(decay_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = 4
output_size = 2
# input_size = env.observation_space.shape[0]
# try:
# output_size = env.action_space.shape[0]
# except AttributeError:
# output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
if not CONST_LR:
# decay learning rate
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, decay_steps, decay_rate, staircase=False)
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
if CONST_LR:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
else:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi, global_step=global_step)
saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
returns = []
mean_returns = []
for ep in range(MAX_EPISODES):
if twohundred_flag == False:
saver.restore(sess, "./cartpole_model/model.ckpt")
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
if raw_G == 200:
twohundred_flag = True
display_weights(sess)
break
# expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
# sess.run(train, feed_dict={x: ep_states, y: ep_actions,
# expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_return = np.mean(running_returns)
mean_returns.append(mean_return)
if CONST_LR:
msg = "Test: {}, Episode: {}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, ep+1, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), learning_rate, raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
else:
msg = "Test: {}, Episode: {}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, ep+1, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), sess.run(learning_rate), raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
env.close() # close openai gym environment
tf.reset_default_graph() # clear tensorflow graph
# Plot
# plt.style.use('ggplot')
# plt.style.use('dark_background')
# episodes_plot = np.arange(MAX_EPISODES)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# fig.subplots_adjust(top=0.85)
# if CONST_LR:
# ax.set_title("The Cart-Pole Problem Test %i \n \
# Episode Length: %i \
# Discount Factor: %.2f \n \
# Number of Hidden Neuron: %i \
# Constant Learning Rate: %.5f" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, learning_rate))
# else:
# ax.set_title("The Cart-Pole Problem Test %i \n \
# EpisodeLength: %i DiscountFactor: %.2f NumHiddenNeuron: %i \n \
# Decay Learning Rate: (start: %.5f, steps: %i, rate: %.2f)" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, start_learning_rate, decay_steps, decay_rate))
# ax.set_xlabel("Episode")
# ax.set_ylabel("Return")
# ax.set_ylim((0, EPISODE_LENGTH))
# ax.grid(linestyle='--')
# ax.plot(episodes_plot, returns, label='Instant return')
# ax.plot(episodes_plot, mean_returns, label='Averaged return')
# legend = ax.legend(loc='best', shadow=True)
# fig.savefig(DIR_PATH_SAVEFIG + file_name_savefig, dpi=500)
# plt.show()
|
GitYiheng/reinforcement_learning_test
|
test03_monte_carlo/t50_plot_at_200.py
|
Python
|
mit
| 8,063
|
[
"NEURON"
] |
355c8dcd1de18cf8e3a574c66d2cb05d56f37c19011b0b992205914c85f0c88b
|
import unittest
import numpy as np
import pysal
from pysal.contrib.handler import Model
from functools import partial
from pysal.spreg import diagnostics
#from pysal.spreg.ols import OLS as OLS
OLS = Model
#from pysal.spreg.twosls import TSLS as TSLS
TSLS = partial(Model, mtype='TSLS')
#from pysal.spreg.twosls_sp import GM_Lag
GM_Lag = partial(Model, mtype='GM_Lag')
from pysal.spreg.diagnostics_sp import LMtests, MoranRes, spDcache, AKtest
class TestLMtests(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
X = np.array(X).T
self.y = y
self.X = X
ols = OLS(self.y, self.X)
self.ols = ols
w = pysal.open(pysal.examples.get_path('columbus.gal'), 'r').read()
w.transform='r'
self.w = w
def test_lm_err(self):
lms = LMtests(self.ols, self.w)
lme = np.array([3.097094, 0.078432])
np.testing.assert_array_almost_equal(lms.lme, lme, decimal=6)
def test_lm_lag(self):
lms = LMtests(self.ols, self.w)
lml = np.array([ 0.981552, 0.321816])
np.testing.assert_array_almost_equal(lms.lml, lml, decimal=6)
def test_rlm_err(self):
lms = LMtests(self.ols, self.w)
rlme = np.array([ 3.209187, 0.073226])
np.testing.assert_array_almost_equal(lms.rlme, rlme, decimal=6)
def test_rlm_lag(self):
lms = LMtests(self.ols, self.w)
rlml = np.array([ 1.093645, 0.295665])
np.testing.assert_array_almost_equal(lms.rlml, rlml, decimal=6)
def test_lm_sarma(self):
lms = LMtests(self.ols, self.w)
sarma = np.array([ 4.190739, 0.123025])
np.testing.assert_array_almost_equal(lms.sarma, sarma, decimal=6)
class TestMoranRes(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
X = np.array(X).T
self.y = y
self.X = X
ols = OLS(self.y, self.X)
self.ols = ols
w = pysal.open(pysal.examples.get_path('columbus.gal'), 'r').read()
w.transform='r'
self.w = w
def test_get_m_i(self):
m = MoranRes(self.ols, self.w, z=True)
np.testing.assert_array_almost_equal(m.I, 0.17130999999999999, decimal=6)
def test_get_v_i(self):
m = MoranRes(self.ols, self.w, z=True)
np.testing.assert_array_almost_equal(m.vI, 0.0081300000000000001, decimal=6)
def test_get_e_i(self):
m = MoranRes(self.ols, self.w, z=True)
np.testing.assert_array_almost_equal(m.eI, -0.034522999999999998, decimal=6)
def test_get_z_i(self):
m = MoranRes(self.ols, self.w, z=True)
np.testing.assert_array_almost_equal(m.zI, 2.2827389999999999, decimal=6)
class TestAKTest(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
y = np.array(db.by_col("CRIME"))
y = np.reshape(y, (49,1))
self.y = y
X = []
X.append(db.by_col("INC"))
X = np.array(X).T
self.X = X
yd = []
yd.append(db.by_col("HOVAL"))
yd = np.array(yd).T
self.yd = yd
q = []
q.append(db.by_col("DISCBD"))
q = np.array(q).T
self.q = q
reg = TSLS(y, X, yd, q=q)
self.reg = reg
w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
w.transform = 'r'
self.w = w
def test_gen_mi(self):
ak = AKtest(self.reg, self.w)
np.testing.assert_array_almost_equal(ak.mi, 0.2232672865437263, decimal=6)
def test_gen_ak(self):
ak = AKtest(self.reg, self.w)
np.testing.assert_array_almost_equal(ak.ak, 4.6428948758930852, decimal=6)
def test_gen_p(self):
ak = AKtest(self.reg, self.w)
np.testing.assert_array_almost_equal(ak.p, 0.031182360054340875, decimal=6)
def test_sp_mi(self):
ak = AKtest(self.reg, self.w, case='gen')
np.testing.assert_array_almost_equal(ak.mi, 0.2232672865437263, decimal=6)
def test_sp_ak(self):
ak = AKtest(self.reg, self.w,case='gen')
np.testing.assert_array_almost_equal(ak.ak, 1.1575928784397795, decimal=6)
def test_sp_p(self):
ak = AKtest(self.reg, self.w, case='gen')
np.testing.assert_array_almost_equal(ak.p, 0.28196531619791054, decimal=6)
class TestSpDcache(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
X = np.array(X).T
self.y = y
self.X = X
ols = OLS(self.y, self.X)
self.ols = ols
w = pysal.open(pysal.examples.get_path('columbus.gal'), 'r').read()
w.transform='r'
self.w = w
def test_j(self):
cache = spDcache(self.ols, self.w)
np.testing.assert_array_almost_equal(cache.j[0][0], 0.62330311259039439, decimal=6)
def test_t(self):
cache = spDcache(self.ols, self.w)
np.testing.assert_array_almost_equal(cache.t, 22.751186696900984, decimal=6)
def test_trA(self):
cache = spDcache(self.ols, self.w)
np.testing.assert_array_almost_equal(cache.trA, 1.5880426389276328, decimal=6)
def test_utwuDs(self):
cache = spDcache(self.ols, self.w)
np.testing.assert_array_almost_equal(cache.utwuDs[0][0], 8.3941977502916068, decimal=6)
def test_utwyDs(self):
cache = spDcache(self.ols, self.w)
np.testing.assert_array_almost_equal(cache.utwyDs[0][0], 5.475255215067957, decimal=6)
def test_wu(self):
cache = spDcache(self.ols, self.w)
np.testing.assert_array_almost_equal(cache.wu[0][0], -10.681344941514411, decimal=6)
if __name__ == '__main__':
unittest.main()
|
TaylorOshan/pysal
|
pysal/contrib/handler/tests/test_diagnostics_sp.py
|
Python
|
bsd-3-clause
| 6,254
|
[
"COLUMBUS"
] |
737e5f1316b99dadbd27c57322883a03460683b0125e6bc94270337d8b31438e
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from espressopp import Real3D, infinity
import espressopp.unittest
from espressopp.interaction.LennardJones import *
class TestLennardJones(espressopp.unittest.TestCase):
def testDefaults(self):
lj=LennardJones()
self.assertEqual(lj.epsilon, 1.0)
self.assertEqual(lj.sigma, 1.0)
self.assertEqual(lj.cutoff, infinity)
self.assertEqual(lj.shift, 0.0)
def testEnergy(self):
lj=LennardJones(epsilon=2.0, sigma=2.0)
# root
self.assertAlmostEqual(lj.computeEnergy(2.0), 0.0)
self.assertAlmostEqual(lj.computeEnergy(2.0, 0.0, 0.0), 0.0)
# minimum
self.assertAlmostEqual(
lj.computeEnergy(2.0*2.0**(1.0/6.0)), -2.0)
self.assertAlmostEqual(lj.computeEnergy(0.0, 2.0*2.0**(1.0/6.0), 0.0), -2.0)
def testForce(self):
lj=LennardJones(epsilon=2.0, sigma=2.0)
# force in the minimum
self.assertAlmostEqual(
(lj.computeForce(2.0*2.0**(1.0/6.0), 0.0, 0.0) -
Real3D(0.0, 0.0, 0.0)).sqr(), 0)
def testProperties(self) :
lj=LennardJones()
lj.epsilon=2.0
lj.sigma=2.0
lj.cutoff=4.0
lj.shift=0.0
# here we test energy computation, as testing property access
# would always work
self.assertAlmostEqual(lj.computeEnergy(2.0), 0.0)
self.assertAlmostEqual(lj.computeEnergy(2.0*2.0**(1.0/6.0)), -2.0)
if __name__ == "__main__":
unittest.main()
|
govarguz/espressopp
|
testsuite/interaction_potentials/unittest/PTestLennardJones.py
|
Python
|
gpl-3.0
| 2,370
|
[
"ESPResSo"
] |
8d1a3ddcabeddc3952b5b745be2c97eef9f898b139ee500e200b5afa35deba4a
|
import ast
import json
import pycodestyle
from gql_checker.__about__ import (
__author__, __copyright__, __email__, __license__, __summary__, __title__,
__uri__, __version__
)
from gql_checker.stdlib_list import STDLIB_NAMES
from graphql import Source, validate, parse, build_client_schema
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
GQL_SYNTAX_ERROR = 'GQL100'
GQL_VALIDATION_ERROR = 'GQL101'
class ImportVisitor(ast.NodeVisitor):
"""
This class visits all the gql calls.
"""
def __init__(self, filename, options):
self.filename = filename
self.options = options or {}
self.calls = []
def visit_Call(self, node): # noqa
if node.func.id == 'gql':
self.calls.append(node)
def node_query(self, node):
"""
Return the query for the gql call node
"""
if isinstance(node, ast.Call):
assert node.args
arg = node.args[0]
if not isinstance(arg, ast.Str):
return
else:
raise TypeError(type(node))
return arg.s
class ImportOrderChecker(object):
visitor_class = ImportVisitor
options = None
def __init__(self, filename, tree):
self.tree = tree
self.filename = filename
self.lines = None
def load_file(self):
if self.filename in ("stdin", "-", None):
self.filename = "stdin"
self.lines = pycodestyle.stdin_get_value().splitlines(True)
else:
self.lines = pycodestyle.readlines(self.filename)
if not self.tree:
self.tree = ast.parse("".join(self.lines))
def get_schema(self):
gql_introspection_schema = self.options.get('gql_introspection_schema')
if gql_introspection_schema:
try:
with open(gql_introspection_schema) as data_file:
introspection_schema = json.load(data_file)
return build_client_schema(introspection_schema)
except IOError as e:
raise Exception(f"Cannot find the provided introspection schema. {e}")
schema = self.options.get('schema')
assert schema, 'Need to provide schema'
def validation_errors(self, ast):
return validate(self.get_schema(), ast)
def error(self, node, code, message):
raise NotImplemented()
def check_gql(self):
if not self.tree or not self.lines:
self.load_file()
visitor = self.visitor_class(self.filename, self.options)
visitor.visit(self.tree)
for node in visitor.calls:
# Lines with the noqa flag are ignored entirely
if pycodestyle.noqa(self.lines[node.lineno - 1]):
continue
query = visitor.node_query(node)
if not query:
continue
try:
source = Source(query, 'gql query')
ast = parse(source)
except Exception as e:
message = str(e)
yield self.error(node, GQL_SYNTAX_ERROR, message)
continue
validation_errors = self.validation_errors(ast)
if validation_errors:
for error in validation_errors:
message = str(error)
yield self.error(node, GQL_VALIDATION_ERROR, message)
|
graphql-python/gql
|
gql-checker/gql_checker/__init__.py
|
Python
|
mit
| 3,483
|
[
"VisIt"
] |
287cf40a45e6cf82e184f877e3d7bb698907221f4cb217d4d9cc142a980f9562
|
# excepthook1.py#
#
# If the root frame contains a method __ExceptHook__, then this is automagically
# set as the exception hook for/by the Application.
#
# (If you want to remove or replace it, you can always tinker around with
# sys.excepthook.)
import sys
sys.path.append("../..")
from wax import *
from wax.tools.errordialog import ErrorDialog
class MainFrame(Frame):
def Body(self):
self.AddComponent(Button(self, "one", event=self.OnClick))
self.Pack()
def OnClick(self, event=None):
# deliberately create an error
x = 1/0
def __ExceptHook__(self, exctype, value, traceback):
dlg = ErrorDialog(self, exctype, value, traceback)
dlg.ShowModal()
dlg.Destroy()
app = Application(MainFrame)
app.MainLoop()
|
MSMBA/msmba-workflow
|
msmba-workflow/srclib/wax/examples/excepthook1.py
|
Python
|
gpl-2.0
| 779
|
[
"TINKER"
] |
325c231c935b9ef40f14e1b1e839911922858e8b70703db0c143687fe41dab08
|
"""
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. module:: scipy.stats
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
Continuous distributions
========================
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
frechet_r -- Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min
frechet_l -- Frechet Left Sided, Weibull_max
genlogistic -- Generalized Logistic
gennorm -- Generalized normal
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
halfgennorm -- Generalized Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
ksone -- Kolmogorov-Smirnov one-sided (no stats)
kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace -- Laplace
levy -- Levy
levy_l
levy_stable
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
reciprocal -- Reciprocal
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
t -- Student's T
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
vonmises_line -- Von-Mises (Line)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
==========================
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
Discrete distributions
======================
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
zipf -- Zipf
Statistical functions
=====================
Several of these functions have a similar version in scipy.stats.mstats
which work for masked arrays.
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
kurtosistest --
mode -- Modal value
moment -- Central moment
normaltest --
skew -- Skewness
skewtest --
kstat --
kstatvar --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
nanmean -- Mean, ignoring NaN values
nanstd -- Standard deviation, ignoring NaN values
nanmedian -- Median, ignoring NaN values
variation -- Coefficient of variation
find_repeats
trim_mean
.. autosummary::
:toctree: generated/
cumfreq
histogram2
histogram
itemfreq
percentileofscore
scoreatpercentile
relfreq
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
bayes_mvs
mvsdist
sem
zmap
zscore
.. autosummary::
:toctree: generated/
sigmaclip
threshold
trimboth
trim1
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
f_value
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
kstest
chisquare
power_divergence
ks_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
ss
square_of_sums
jarque_bera
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
fligner
median_test
mood
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
entropy
.. autosummary::
:toctree: generated/
chisqprob
betai
Circular statistical functions
==============================
.. autosummary::
:toctree: generated/
circmean
circvar
circstd
Contingency table functions
===========================
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.expected_freq
contingency.margins
fisher_exact
Plot-tests
==========
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
Masked statistics functions
===========================
.. toctree::
stats.mstats
Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`)
==============================================================================
.. autosummary::
:toctree: generated/
gaussian_kde
For many more stat related functions install the software R and the
interface package rpy.
"""
from __future__ import division, print_function, absolute_import
from .stats import *
from .distributions import *
from .morestats import *
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from .contingency import chi2_contingency
from ._multivariate import *
#remove vonmises_cython from __all__, I don't know why it is included
__all__ = [s for s in dir() if not (s.startswith('_') or s.endswith('cython'))]
from numpy.testing import Tester
test = Tester().test
|
ales-erjavec/scipy
|
scipy/stats/__init__.py
|
Python
|
bsd-3-clause
| 8,924
|
[
"Gaussian"
] |
11a5683900849340eb4c1176ada871f21ffa673436b58552860b8533454e1599
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSseq(RPackage):
"""Shrinkage estimation of dispersion in Negative Binomial models for RNA-
seq experiments with small sample size.
The purpose of this package is to discover the genes that are
differentially expressed between two conditions in RNA-seq experiments.
Gene expression is measured in counts of transcripts and modeled with
the Negative Binomial (NB) distribution using a shrinkage approach for
dispersion estimation. The method of moment (MM) estimates for
dispersion are shrunk towards an estimated target, which minimizes the
average squared difference between the shrinkage estimates and the
initial estimates. The exact per-gene probability under the NB model is
calculated, and used to test the hypothesis that the expected expression
of a gene in two conditions identically follow a NB distribution."""
homepage = "https://bioconductor.org/packages/sSeq"
git = "https://git.bioconductor.org/packages/sSeq.git"
version('1.22.0', commit='fa3895c9578edddca17b5d13a2678ee5830b85cc')
version('1.20.1', commit='91f31440323612cb04beb44404ab0a1bcb3ad87d')
version('1.18.0', commit='1f65e5a55ce0d51672b785450031872e6db5ca0f')
version('1.16.0', commit='b7f2b99dbd4a12ee9d18b0ec9898f13f1038479e')
version('1.14.0', commit='20ccffeb60196914975aa1feef902ddba659c571')
depends_on('r@3.0:', type=('build', 'run'))
depends_on('r-catools', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-sseq/package.py
|
Python
|
lgpl-2.1
| 1,773
|
[
"Bioconductor"
] |
3e4d4eb64565ddb1697739492a933178cfb38e3c99a37aeb8e3924cd9d4a5bc9
|
#!/usr/bin/env python2
# This script checks and can optionally update Zapdos source files.
# You should always run this script without the "-u" option
# first to make sure there is a clean dry run of the files that should
# be updated
# This is based on a script of the same name in the MOOSE Framework:
# https://github.com/idaholab/moose/blob/master/framework/scripts/fixup_headers.py
import os, string, re, shutil
from optparse import OptionParser
global_ignores = ['contrib', '.svn', '.git', 'crane', 'moose', 'squirrel']
unified_header = """\
//* This file is part of Zapdos, an open-source
//* application for the simulation of plasmas
//* https://github.com/shannon-lab/zapdos
//*
//* Zapdos is powered by the MOOSE Framework
//* https://www.mooseframework.org
//*
//* Licensed under LGPL 2.1, please see LICENSE for details
//* https://www.gnu.org/licenses/lgpl-2.1.html"""
python_header = """\
#* This file is part of Zapdos, an open-source
#* application for the simulation of plasmas
#* https://github.com/shannon-lab/zapdos
#*
#* Zapdos is powered by the MOOSE Framework
#* https://www.mooseframework.org
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html"""
global_options = {}
def fixupHeader():
for dirpath, dirnames, filenames in os.walk(os.getcwd() + ""):
# Don't traverse into ignored directories
for ignore in global_ignores:
if ignore in dirnames:
dirnames.remove(ignore)
#print dirpath
#print dirnames
for file in filenames:
suffix = os.path.splitext(file)
if (suffix[-1] == '.C' or suffix[-1] == '.h') and not global_options.python_only:
checkAndUpdateCPlusPlus(os.path.abspath(dirpath + '/' + file))
if suffix[-1] == '.py' and not global_options.cxx_only:
checkAndUpdatePython(os.path.abspath(dirpath + '/' + file))
def checkAndUpdateCPlusPlus(filename):
# Don't update soft links
if os.path.islink(filename):
return
f = open(filename)
text = f.read()
f.close()
header = unified_header
# Check (exact match only)
if (string.find(text, header) == -1 or global_options.force == True):
# print the first 10 lines or so of the file
if global_options.update == False: # Report only
print filename + ' does not contain an up to date header'
if global_options.verbose == True:
print '>'*40, '\n', '\n'.join((text.split('\n', 10))[:10]), '\n'*5
else:
# Make sure any previous C-style header version is removed
text = re.sub(r'^/\*+/$.*^/\*+/$', '', text, flags=re.S | re.M)
# Make sure that any previous C++-style header (with extra character)
# is also removed.
text = re.sub(r'(?:^//\*.*\n)*', '', text, flags=re.M)
# Now cleanup extra blank lines
text = re.sub(r'\A(^\s*\n)', '', text)
# Update
f = open(filename + '~tmp', 'w')
f.write(header + '\n\n')
f.write(text)
f.close()
os.rename(filename + '~tmp', filename)
def checkAndUpdatePython(filename):
f = open(filename)
text = f.read()
f.close()
header = python_header
# Check (exact match only)
if (string.find(text, header) == -1):
# print the first 10 lines or so of the file
if global_options.update == False: # Report only
print filename + ' does not contain an up to date header'
if global_options.verbose == True:
print '>'*40, '\n', '\n'.join((text.split('\n', 10))[:10]), '\n'*5
else:
# Save off the shebang line if it exists
m = re.match(r'#!.*\n', text)
shebang = ''
if m:
shebang = m.group(0)
text = re.sub(r'^.*\n', '', text)
# Save off any pytlint disable directives
m = re.match(r'\A#pylint:\s+disable.*\n', text)
pylint_disable = ''
if m:
pylint_disable = m.group(0)
text = re.sub(r'^.*\n', '', text)
pylint_enable = False
if re.search(r'#pylint: enable=missing-docstring', text) != None:
pylint_enable = True
# Make sure any previous box-style header version is removed
text = re.sub(r'\A(?:#.*#\n)*', '', text)
# Make sure any previous version of the new header is removed
text = re.sub(r'^#\*.*\n', '', text, flags=re.M)
# Discard any pylint missing-docstring commands
text = re.sub(r'\A#pylint:.*missing-docstring.*\n', '', text)
# Now cleanup extra blank lines at the beginning of the file
text = re.sub(r'\A(^\s*\n)', '', text)
# Update
f = open(filename + '~tmp', 'w')
f.write(shebang)
f.write(pylint_disable)
f.write(header + '\n')
if pylint_enable:
f.write('#pylint: enable=missing-docstring\n')
if len(text) != 0:
f.write('\n' + text)
f.close()
shutil.copystat(filename, filename + '~tmp')
os.rename(filename + '~tmp', filename)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-u", "--update", action="store_true", dest="update", default=False)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False)
parser.add_option("--python-only", action="store_true", dest="python_only", default=False)
parser.add_option("--cxx-only", action="store_true", dest="cxx_only", default=False)
parser.add_option("-f", "--force", action="store_true", dest="force", default=False)
(global_options, args) = parser.parse_args()
fixupHeader()
|
lindsayad/zapdos
|
scripts/fixup_headers.py
|
Python
|
lgpl-2.1
| 5,929
|
[
"MOOSE"
] |
fa731d85c223f73d614d2891afaf2fad764d99af4920222a18449df3747ebb3d
|
from __future__ import absolute_import
import sys, os, yaml, glob
import subprocess
import argparse
import re
from sciLifeLab_utils import submit_job
def main(args):
projectFolder = os.getcwd()
samples_data_dir = args.sample_data_dir
projectName = os.path.basename(os.path.normpath(samples_data_dir))
for sample_dir_name in [dir for dir in os.listdir(samples_data_dir) \
if os.path.isdir(os.path.join(samples_data_dir, dir))]:
sample_folder = os.path.join(os.getcwd(), sample_dir_name)
if not os.path.exists(sample_folder):
os.makedirs(sample_folder)
os.chdir(sample_folder)
# now I am in the folder, i can run at the same time QC and MP anlaysis
pipeline = "QCcontrol"
tools = ["trimmomatic", "fastqc", "abyss", "align"]
if args.reference is None:
tools = ["trimmomatic", "fastqc", "abyss"]
sample_YAML_name = os.path.join(sample_folder, "{}_{}.yaml".format(
sample_dir_name, pipeline))
sample_YAML = open(sample_YAML_name, 'w')
sample_YAML.write("pipeline:\n")
sample_YAML.write(" {}\n".format(pipeline))
sample_YAML.write("tools:\n")
sample_YAML.write(" {}\n".format(tools))
##TODO: output must became sampleName
sample_YAML.write("output: {}\n".format(sample_dir_name))
sample_YAML.write("projectName: {}\n".format(projectName))
sample_YAML.write("kmer: 35\n")
sample_YAML.write("threads: {}\n".format(args.threads))
sample_YAML.write("genomeSize: \n")
sample_YAML.write("adapters: {}\n".format(args.adapter))
if args.reference is not None:
sample_YAML.write("reference: {}\n".format(args.reference))
sample_YAML.write("libraries:\n")
sample_data_dir = os.path.join(samples_data_dir,sample_dir_name)
# helper variables for collecting FCs
fc_pat, prep_pat = (r'^\d{6}_.*_?.*$', r'^[A-Z]$')
def _get_expected_dir(path, pat):
return [os.path.join(path, d) for d in os.listdir(path) if re.match(pat, d) \
and os.path.isdir(os.path.join(path, d))]
#collect FC directories
flowcells_dirs = _get_expected_dir(sample_data_dir, fc_pat)
# to adapt the directory structure in IRMA where it have lib prep dir
lib_prep_dirs = _get_expected_dir(sample_data_dir, prep_pat)
# Check and collect the flowcells in the lib prep directory
for prep_dir in lib_prep_dirs:
flowcells_dirs.extend(_get_expected_dir(prep_dir, fc_pat))
sample_files = []
for flowcell in flowcells_dirs:
sample_files.extend([os.path.join(flowcell, f) for f in \
os.listdir(flowcell) \
if (os.path.isfile(os.path.realpath(os.path.join(flowcell,f))) \
and re.search('.gz$',f))])
# now sample_files contains all the file sequenced for this sample
pair1_file = ""
pair2_file = ""
single = ""
library = 1
while len(sample_files) > 0:
file = sample_files[0]
sample_YAML.write(" lib{}:\n".format(library))
if "_1.fastq.gz" in file:
pair1_file = file
pair2_file = re.sub("_1.fastq.gz", "_2.fastq.gz", file)
elif "_2.fastq.gz" in file:
pair2_file = file
pair1_file = re.sub("_2.fastq.gz", "_1.fastq.gz", file)
elif "R1_001.fastq.gz" in file:
pair1_file = file
pair2_file = re.sub("R1_001.fastq.gz", "R2_001.fastq.gz", file)
elif "R2_001.fastq.gz" in file:
pair2_file = file
pair1_file = re.sub("R2_001.fastq.gz", "R1_001.fastq.gz", file)
else:
sys.exit("file {} does not respect naming convection. \
Exit!".format(file))
sample_YAML.write(" pair1: {}\n".format(pair1_file))
sample_YAML.write(" pair2: {}\n".format(pair2_file))
sample_YAML.write(" orientation: {}\n".format(args.orientation))
sample_YAML.write(" insert: {}\n".format(args.insert))
sample_YAML.write(" std: {}\n".format(args.std))
sample_files.remove(pair1_file)
sample_files.remove(pair2_file)
library += 1
sample_YAML.close
# Run the job
extramodules = []
if "abyss" in tools:
extramodules.append("module load abyss/1.3.5\n")
if "align" in tools:
extramodules.append("module load samtools\nmodule load bwa\n")
jobname = "{}_{}".format(sample_dir_name, pipeline)
submit_job(sample_YAML_name, jobname, os.getcwd(), args, extramodules)
os.chdir(projectFolder)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--reference', type=str, default=None,
help="path to the reference file")
parser.add_argument('--adapter', type=str, required=True,
help="path to the file containing the adaptor sequence to be removed")
parser.add_argument('--global-config', type=str, required=True,
help="global configuration file")
parser.add_argument('--sample-data-dir', type=str, required=True,
help=("Path to directory (usually INBOX) containing the project "
"(one dir per sample, scilife structure project/sample/flowcell/)"))
parser.add_argument('--orientation', type=str, required=True,
help="orientation of the libraries")
parser.add_argument('--insert', type=str, required=True,
help="expected insert size of the libraries")
parser.add_argument('--std', type=str, required=True,
help=("expected stdandard variation of the insert size of "
"the libraries"))
parser.add_argument('--env', type=str,
default="DeNovoPipeline", help=("name of the virtual enviorment "
"(default is DeNovoPipeline)"))
parser.add_argument('--email', type=str,
help=("Send notifications/job status updates to this email "
"address."))
parser.add_argument('--time', type=str, default="1-00:00:00",
help="required time for the job (default is 1 day : 1-00:00:00)")
parser.add_argument('--project', type=str, default="a2010002",
help="project name for slurm submission (default is a2010002)")
parser.add_argument('--threads', type=int, default=16,
help="Number of thread the job will require")
parser.add_argument('--qos', type=str,
help=("Specify a quality of service preset for the job (eg. "
"--qos short)"))
args = parser.parse_args()
main(args)
|
SciLifeLab/NouGAT
|
sciLifeLab_utils/run_QC_analysis.py
|
Python
|
mit
| 6,886
|
[
"BWA"
] |
ced956ecc12be8db561dc6a7cea3178d911cb75f40bcb8a1fca02ed5be76cd83
|
from flask import Flask, render_template, session, request, redirect
import random
app = Flask(__name__)
app.secret_key = 'gold123'
@app.route('/')
def index():
if not 'gold' in session:
session['gold'] = 0
if not 'activities' in session:
session['activities'] = []
return render_template('ninja1.html', )
@app.route('/process', methods = ['POST'])
def process():
locations = {
'farm':random.randint(10,20),
'casino':random.randint(-50,50),
'cave':random.randint(5,10),
'house':random.randint(2,5)
}
if request.form['location'] in locations:
result = locations[request.form['location']]
session['gold'] = session['gold']+result
result_dictionary = {
'class': ('red','green')[result > 0],
'activity': "You went to the {} and {} {} gold! You now have {} gold.".format(request.form['location'], ('lost','gained')[result > 0], result, session['gold'])
}
session['activities'].append(result_dictionary)
return redirect('/')
if __name__ == '__main__':
app.run(debug = True)
|
jiobert/python
|
Smith_Ben/Assignments/ninja_gold copy/ninjagold.py
|
Python
|
mit
| 1,032
|
[
"CASINO"
] |
bf45bef3198391f126aa87b05015aaac9840760734d5896b7e59318e03d9bc53
|
import astropy.io.fits as fits
import numpy as np
import os
import astropy.constants as c
import astropy.units as u
import glob
from scipy.ndimage import binary_opening
import aplpy
from . import catalogs
import matplotlib as mpl
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
from config import plottingDictionary
#####################################################
# Calculate column densities of non-NH3 lines in GAS
# based on results of Gaussian fit analysis
# To do: possibly include hyperfine analysis of HC5N
#####################################################
def calc_jt(temp, nu):
factor = np.exp(c.h * nu/(c.k_B * temp))-1.
jt = c.h * nu / c.k_B / factor
return jt
def calc_tau(tmb,tex, nu):
tbg = 2.73*u.K
factor = tmb/(calc_jt(tex,nu)-calc_jt(tbg,nu))
tau = (-1.)*np.log(1. - factor)
return tau
def calc_q_linear(tex,b):
# Calc Q
# Qs seem reasonable for HC5N compared with Splatalogue values, poorer agreement with CCS
j_arr = np.array(range(100))
q=0
for j in j_arr:
q += (2.*j + 1)*np.exp(-1.*c.h.cgs*b.cgs*j*(j+1)/(c.k_B.cgs*tex))
return q
def calc_q_ccs(tex):
# For each J have three different states (N = J +/- 1)
ccs_file = os.path.join(os.path.dirname(__file__),'CCS_lvls.txt')
Eu, Nu, Ju = np.loadtxt(ccs_file,unpack=True)
gu = 2.*Ju + 1.
q = np.sum(gu * np.exp((-1.)*Eu/tex.value))
return q
def calc_n_mangum(tmb,tex,dv,params,q):
nu = params['nu']
tau = calc_tau(tmb,tex,nu)
jup = params['jup']
Eup = params['Eup']
mu = params['mu']
Sij = params['Sij']
b = params['b']
tbg = 2.73*u.K
#q = calc_q_linear(tex,b)
n = tau * 3.*c.h/(8.*np.pi**3.*Sij*mu**2)*q/(2.*jup+1)*\
np.exp(Eup/tex)*(np.exp(c.h*nu/(c.k_B*tex))-1.)**(-1.)*\
dv*np.sqrt(np.pi/(4.*np.log(2.)))
return n, tau
def calc_n_mom0(mom0,tmb,tex,nu,tex_arr,n_arr):
# Use assumed excitation temperature and Yancy's text files to convert from integrated intensity
# to column density (read tex_arr, n_arr based on line)
tau = calc_tau(tmb,tex,nu)
itex = np.where(tex_arr == tex.value) # Can do this if tex is less than 2 decimal precision
n = mom0 * n_arr[itex][0]
# Tau correction where needed
corr = np.where(tau > 0.1)
n[corr] = n[corr] * tau[corr]/(1.-np.exp(-(1.)*tau[corr]))
return n
def calc_column_densities_fits(region='B18',file_extension='all_rebase3',tex=7.*u.K):
if file_extension:
root = file_extension
else:
root = 'all'
lines = ['HC5N','C2S','HC7N_21_20']
hc5n_params = {'nu' : 23.9638968*u.GHz,
'jlow' : 8,
'jup' : 9,
'Elow' : 4.6003*u.K,
'Eup' : 5.7505*u.K,
'mu' : 4.33e-18*u.esu*u.cm,
'Sij' : 0.47,
'b' : 1331.33*u.MHz}
c2s_params = {'nu' : 22.3440308*u.GHz,
'jlow' : 1,
'jup' : 2,
'Elow' : 0.5336*u.K,
'Eup' : 1.857*u.K,
'mu' : 2.88e-18*u.esu*u.cm,
'Sij' : 0.4,
'b' : 6477.75*u.MHz}
hc7n_21_params = {'nu': 23.6878974*u.GHz,
'jlow' : 20,
'jup' : 21,
'Elow' : 11.3684*u.K,
'Eup' : 12.5052*u.K,
'mu' : 5.0e-18*u.esu*u.cm,
'Sij' : 0.489,
'b' : 564.0*u.MHz}
line_params = {'HC5N' : hc5n_params, 'C2S':c2s_params, 'HC7N_21_20':hc7n_21_params}
for line in lines:
gparamfits = '{0}/{0}_{2}_{1}_param_cube_masked.fits'.format(region,root,line)
mom0file = '{0}/{0}_{1}_{2}_mom0_QA.fits'.format(region,line,root)
colfile = '{0}/parameterMaps/{0}_{1}_{2}_N_masked.fits'.format(region,line,root)
taufile = '{0}/parameterMaps/{0}_{1}_{2}_tau_masked.fits'.format(region,line,root)
# Make sure files exist
if os.path.isfile(gparamfits):
gparam_hdu = fits.open(gparamfits)
gparam_data = gparam_hdu[0].data
mom0_hdu = fits.open(mom0file)
header = mom0_hdu[0].header
mom0 = mom0_hdu[0].data
tmb_fit = gparam_data[0] * u.K
sigv_fit = gparam_data[2] * u.km/u.s
fwhm = 2.*np.sqrt(2.*np.log(2.))*sigv_fit
params = line_params[line]
if line == 'C2S':
q = calc_q_ccs(tex)
else:
q = calc_q_linear(tex,params['b'])
ncol, tau = calc_n_mangum(tmb_fit,tex,fwhm,params,q)
# Edit header
#rm_key=['CDELT3', 'CUNIT3', 'CTYPE3', 'CRVAL3']
#for key_i in rm_key:
# header.remove(key_i)
header['NAXIS'] = 2
header['WCSAXES'] = 2
header['BUNIT'] = 'cm-2'
# Write out files
new_hdu = fits.PrimaryHDU(ncol.cgs.value,header=header)
new_hdu.writeto(colfile,overwrite=True)
header['BUNIT'] = ''
new_hdu2 = fits.PrimaryHDU(tau.value,header=header)
new_hdu2.writeto(taufile,overwrite=True)
def calc_all_columns(file_extension='all_rebase3',tex=7.*u.K,release='all'):
RegionCatalog = catalogs.GenerateRegions(release=release)
for ThisRegion in RegionCatalog:
region = ThisRegion['Region name']
calc_column_densities_fits(region=region,file_extension=file_extension,tex=tex)
def plot_property_maps(regions=None,file_extension='all_rebase3',release='all'):
# Get list of regions - run from images/ directory
# Assume directories correspond to regions to be imaged
# Update - use region list
if regions is None:
RegionCatalog = catalogs.GenerateRegions(release=release)
else:
RegionCatalog = catalogs.GenerateRegions(release=release)
keep = [idx for idx, row in enumerate(RegionCatalog) if row['Region name'] in regions]
RegionCatalog = RegionCatalog[keep]
ext_list = [0,1,2]
label_list = ['$T_B$ (K)','$v_\mathrm{LSR}$ (km s$^{-1}$)','$\sigma_v$ (km s$^{-1}$)']
file_list = ['T_B','vlsr','sigv']
ctable_list = ['plasma','RdYlBu_r','plasma'] #'YlGnBu_r'
text_color='black'
text_size = 12
beam_color='#d95f02' # previously used '#E31A1C'
# Try single set of contours for first look images
w11_step = 0.4
cont_levs=2**np.arange( 0,20)*w11_step
w11_lw = 0.5
# Masking of small (noisy) regions
selem = np.array([[0,1,0],[1,1,1],[0,1,0]])
line_list = ['HC5N','C2S','HC7N_21_20','HC7N_22_21','NH3_33']
for ThisRegion in RegionCatalog:
region = ThisRegion['Region name']
if os.path.isdir(region):
print region
# Use NH3 (1,1) moment maps for contours?
file_w11='{0}/{0}_NH3_11_{1}_mom0_QA_trim.fits'.format(region,file_extension)
plot_param = plottingDictionary[region]
for line in line_list:
gparamfits = '{0}/{0}_{2}_{1}_param_cube_masked.fits'.format(region,file_extension,line)
if os.path.isfile(gparamfits):
par_hdu = fits.open(gparamfits)
# Get NH3 (1,1) moment contours
LowestContour= cont_levs[0]*0.5
nh3mom0_hdu = fits.open(file_w11)
nh3mom0 = nh3mom0_hdu[0].data
mask = binary_opening(nh3mom0 > LowestContour, selem)
MaskedMap = mask*nh3mom0
nh3mom0_hdu[0].data = MaskedMap
for i in range(len(ext_list)):
# Use percentiles to set initial plot colourscale ranges
# Need to omit zeros from calculation
plane = par_hdu[0].data[i,:,:]
v_min=np.nanpercentile(plane[np.where(plane !=0)],2.5)
v_max=np.nanpercentile(plane[np.where(plane !=0)],97.5)
fig=aplpy.FITSFigure(par_hdu,slices=[i])
fig.show_colorscale(cmap=ctable_list[i],vmin=v_min, vmax=v_max)
# For some reason having set_nan_color *after* colorbar messes up the tick locations!
fig.set_nan_color('0.99')
# add colorbar
fig.add_colorbar()
fig.colorbar.show(box_orientation='horizontal', width=0.1, pad=0.0,
location='top')
fig.colorbar.set_font(family='sans_serif',size=text_size)
fig.colorbar.set_axis_label_font(family='sans_serif',size=text_size)
#
fig.show_contour(nh3mom0_hdu, colors='gray', levels=cont_levs, linewidths=w11_lw)
# Axis labels
fig.axis_labels.set_font(family='sans_serif',size=text_size)
# Ticks
fig.ticks.set_color(text_color)
fig.tick_labels.set_font(family='sans_serif',size=text_size)
fig.tick_labels.set_style('colons')
fig.tick_labels.set_xformat('hh:mm:ss')
fig.tick_labels.set_yformat('dd:mm')
# Add beam
fig.add_beam(major=0.0088441,minor=0.0088441,angle=0)
fig.beam.set_color(beam_color)
fig.beam.set_corner('bottom left')
# Scale bar
# magic line of code to obtain scale in arcsec obtained from
# http://www.astropy.org/astropy-tutorials/Quantities.html
ang_sep = (plot_param['scalebar_size'].to(u.au)/plot_param['distance']).to(u.arcsec, equivalencies=u.dimensionless_angles())
fig.add_scalebar(ang_sep.to(u.degree))
fig.scalebar.set_corner(plot_param['scalebar_pos'])
fig.scalebar.set_font(family='sans_serif',size=text_size)
fig.scalebar.set(color=text_color)
fig.scalebar.set_label('{0:4.2f}'.format(plot_param['scalebar_size']))
# Labels
label = label_list[i]
label_loc = plot_param['label_loc']
label_ha = plot_param['label_ha']
fig.add_label(label_loc[0],label_loc[1],
'{0}\n{1}'.format(region,label),
relative=True, color=text_color,
horizontalalignment=label_ha,
family='sans_serif',size=text_size)
# fig.set_system_latex(True)
fig.save('figures/{0}_{1}_{2}_{3}.pdf'.format(region,line,file_extension,file_list[i]),
adjust_bbox=True,dpi=200)
fig.close()
else:
print('File {0} not found'.format(gparamfits))
# Next plot column density maps
for ThisRegion in RegionCatalog:
region = ThisRegion['Region name']
if os.path.isdir(region):
print region
# Use NH3 (1,1) moment maps for contours?
file_w11='{0}/{0}_NH3_11_{1}_mom0_QA_trim.fits'.format(region,file_extension)
plot_param = plottingDictionary[region]
for line in line_list:
colfits = '{0}/parameterMaps/{0}_{2}_{1}_N_masked.fits'.format(region,file_extension,line)
if os.path.isfile(colfits):
col_hdu = fits.open(colfits)
# Get NH3 (1,1) moment contours
LowestContour= cont_levs[0]*0.5
nh3mom0_hdu = fits.open(file_w11)
nh3mom0 = nh3mom0_hdu[0].data
mask = binary_opening(nh3mom0 > LowestContour, selem)
MaskedMap = mask*nh3mom0
nh3mom0_hdu[0].data = MaskedMap
# Plot log of column for clarity
log_data = np.log10(col_hdu[0].data)
col_hdu[0].data = log_data
# Use percentiles to set initial plot colourscale ranges
v_min=np.nanpercentile(col_hdu[0].data[np.where(col_hdu[0].data > 0)],0.5)
v_max=np.nanpercentile(col_hdu[0].data[np.where(col_hdu[0].data > 0)],99.5)
v_mid = 0
fig=aplpy.FITSFigure(col_hdu)
fig.show_colorscale(cmap='Blues',vmin=v_min,vmax=v_max)
# For some reason having set_nan_color *after* colorbar messes up the tick locations!
fig.set_nan_color('0.95')
# add colorbar
fig.add_colorbar()
fig.colorbar.show(box_orientation='horizontal', width=0.1, pad=0.0,
location='top')
fig.colorbar.set_font(family='sans_serif',size=text_size)
fig.colorbar.set_axis_label_font(family='sans_serif',size=text_size)
#
fig.show_contour(nh3mom0_hdu, colors='gray', levels=cont_levs, linewidths=w11_lw)
# Axis labels
fig.axis_labels.set_font(family='sans_serif',size=text_size)
# Ticks
fig.ticks.set_color(text_color)
fig.tick_labels.set_font(family='sans_serif',size=text_size)
fig.tick_labels.set_style('colons')
fig.tick_labels.set_xformat('hh:mm:ss')
fig.tick_labels.set_yformat('dd:mm')
# Add beam
fig.add_beam(major=0.0088441,minor=0.0088441,angle=0)
fig.beam.set_color(beam_color)
fig.beam.set_corner('bottom left')
# Scale bar
# magic line of code to obtain scale in arcsec obtained from
# http://www.astropy.org/astropy-tutorials/Quantities.html
ang_sep = (plot_param['scalebar_size'].to(u.au)/plot_param['distance']).to(u.arcsec, equivalencies=u.dimensionless_angles())
fig.add_scalebar(ang_sep.to(u.degree))
fig.scalebar.set_corner(plot_param['scalebar_pos'])
fig.scalebar.set_font(family='sans_serif',size=text_size)
fig.scalebar.set(color=text_color)
fig.scalebar.set_label('{0:4.2f}'.format(plot_param['scalebar_size']))
# Labels
label_loc = plot_param['label_loc']
label_ha = plot_param['label_ha']
fig.add_label(label_loc[0],label_loc[1],
'{0}\nlog N({1})'.format(region,line),
relative=True, color=text_color,
horizontalalignment=label_ha,
family='sans_serif',size=text_size)
# fig.set_system_latex(True)
fig.save('figures/{0}_{1}_{2}_N.pdf'.format(region,line,file_extension),
adjust_bbox=True,dpi=200)
fig.close()
else:
print('File {0} not found'.format(gparamfits))
|
rfriesen/GAS
|
GAS/columnDensities.py
|
Python
|
mit
| 15,569
|
[
"Gaussian"
] |
9a94935a4f93ae80aab37d1c673613277e53ec0c54e9b1d5d9d0dedf4bbd9e10
|
from __future__ import print_function
from collections import defaultdict
from functools import partial
import gc, inspect, os, sys
import numpy as np
from sklearn.datasets import make_regression
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
import h2o
from h2o.sklearn.wrapper import H2OConnectionMonitorMixin
sys.path.insert(1, os.path.join("..",".."))
from tests import pyunit_utils, Namespace as ns
"""
This test suite creates a default sklearn regression estimator for each H2O estimator.
Then, it feeds them with H2O frames (more efficient and ensures compatibility with old API.)
or with numpy arrays to provide the simplest approach for users wanting to use H2O like any sklearn estimator.
"""
seed = 2019
init_connection_args = dict(strict_version_check=False, show_progress=True)
scores = defaultdict(dict)
def _get_data(format='numpy'):
X, y = make_regression(n_samples=100, n_features=10, n_informative=5, random_state=seed)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed)
data = ns(X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
if format == 'h2o':
for k, v in data.__dict__.items():
setattr(data, k, h2o.H2OFrame(v))
return data
def _get_default_args(estimator_cls):
defaults = dict(
H2OCoxProportionalHazardsRegressor=dict(),
H2ODeepLearningRegressor=dict(seed=seed, reproducible=True),
H2OGeneralizedAdditiveRegressor=dict(family='gaussian', seed=seed, gam_columns=["C1"]),
H2OGeneralizedLinearRegressor=dict(family='gaussian', seed=seed),
)
return defaults.get(estimator_cls.__name__, dict(seed=seed))
def _get_custom_behaviour(estimator_cls):
custom = dict(
# H2ODeepLearningRegressor=dict(scores_may_differ=True),
)
return custom.get(estimator_cls.__name__, dict())
def test_estimator_with_h2o_frames(estimator_cls):
args = _get_default_args(estimator_cls)
estimator = estimator_cls(**args)
data = _get_data(format='h2o')
assert isinstance(data.X_train, h2o.H2OFrame)
estimator.fit(data.X_train, data.y_train)
preds = estimator.predict(data.X_test)
print(preds)
assert isinstance(preds, h2o.H2OFrame)
if _get_custom_behaviour(estimator_cls).get('preds_as_vector', True):
assert preds.dim == [len(data.X_test), 1], "got {}".format(preds.dim)
else:
assert preds.dim[0] == len(data.X_test)
score = estimator.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test.as_data_frame().values, preds.as_data_frame().values)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
scores[estimator_cls].update(with_h2o_frames=score)
def test_estimator_with_numpy_arrays(estimator_cls):
estimator = estimator_cls(init_connection_args=init_connection_args, **_get_default_args(estimator_cls))
data = _get_data(format='numpy')
assert isinstance(data.X_train, np.ndarray)
with estimator:
estimator.fit(data.X_train, data.y_train)
preds = estimator.predict(data.X_test)
print(preds)
assert isinstance(preds, np.ndarray)
if _get_custom_behaviour(estimator_cls).get('preds_as_vector', True):
assert preds.shape == (len(data.X_test),), "got {}".format(preds.shape)
else:
assert preds.shape[0] == len(data.X_test)
score = estimator.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6
scores[estimator_cls].update(with_numpy_arrays=score)
def test_scores_are_equivalent(estimator_cls):
try:
lk, rk = ('with_h2o_frames', 'with_numpy_arrays')
est_scores = scores[estimator_cls]
if lk in est_scores and rk in est_scores:
assert abs(est_scores[lk] - est_scores[rk]) < 1e-6, \
"expected equivalent scores but got {lk}={lscore} and {rk}={rscore}" \
.format(lk=lk, rk=rk, lscore=est_scores[lk], rscore=est_scores[rk])
elif lk not in est_scores:
print("no scores for {}".format(estimator_cls.__name__+' '+lk))
else:
print("no scores for {}".format(estimator_cls.__name__+' '+rk))
except AssertionError as e:
if _get_custom_behaviour(estimator_cls).get('scores_may_differ', False):
print("ERROR !!! "+str(e))
else:
raise e
def make_test(test, classifier):
bound_test = partial(test, classifier)
bound_test.__name__ = test.__name__
pyunit_utils.tag_test(bound_test, classifier.__name__)
return bound_test
def make_tests(classifier):
return list(map(lambda test: make_test(test, classifier), [
test_estimator_with_h2o_frames,
test_estimator_with_numpy_arrays,
test_scores_are_equivalent
]))
failing = [
'H2OCoxProportionalHazardsRegressor', # doesn't support regression?
'H2OStackedEnsembleRegressor', # needs a separate test (requires models as parameters),
'H2OUpliftRandomForestRegressor' # does not support regression yet
]
regressors = [cls for name, cls in inspect.getmembers(h2o.sklearn, inspect.isclass)
if name.endswith('Regressor') and name not in ['H2OAutoMLRegressor']+failing]
pyunit_utils.run_tests([make_tests(c) for c in regressors])
|
h2oai/h2o-3
|
h2o-py/tests/testdir_sklearn/pyunit_sklearn_regression_all_estimators.py
|
Python
|
apache-2.0
| 5,468
|
[
"Gaussian"
] |
5abbe2ea74e1dfb23adbb7bcc90e036f8dbbe43d9de321a26656aa10ee89fe6c
|
import numpy as np
import glob
filename2 = 'data.GPCSH1.65r1-Eq-300K' #data.GP2.0w0r2-Compacted-Eq' # xyz file was given gy Roland E Pellenq
flist2 = glob.glob(filename2)
natoms_GBP = 103231
rotation = (90.00*np.pi)/180.00
c=np.cos(rotation)
s=np.sin(rotation)
#the 1st col. is string so its loaded seperately
for f in flist2:
#load2 = np.genfromtxt(f, skip_header=12, dtype=float, usecols=(0,1, 2, 3, 4, 5, 6)) #dtype=("|S10", float, float, float),
load2 = np.genfromtxt(f, skip_header=27, skip_footer=natoms_GBP+1, dtype=float, usecols=(0,1, 2, 3, 4, 5, 6)) #dtype=("|S10", float, float, float),
dataovito1=np.array(load2)
for f in flist2:
#load2 = np.genfromtxt(f, skip_header=12, dtype=str, usecols=(0)) #dtype=("|S10", float, float, float),
load2 = np.genfromtxt(f, skip_header=27, skip_footer=natoms_GBP+1, dtype=str, usecols=(0)) #dtype=("|S10", float, float, float),
dataovito2=np.array(load2)
datazero = np.zeros((len(dataovito1),5))
for j in range(len(dataovito1)): #rotating wrt y axis
datazero[j,0] = dataovito1[j,0]
datazero[j,1] = dataovito1[j,2]
datazero[j,2] = dataovito1[j,4]*c + dataovito1[j,6]*s
datazero[j,3] = dataovito1[j,5]
datazero[j,4] = -dataovito1[j,4]*s + dataovito1[j,6]*c
#dataovito1[j,4] = dataovito1[j,4]*c + dataovito1[j,6]*s
'''
for j in range(len(dataovito1)): #rotating wrt x axis
dataovito1[j,4] = -dataovito1[j,5]
dataovito1[j,5] = dataovito1[j,4]
dataovito1[j,6] = dataovito1[j,6]
'''
size1 = len(dataovito1) #total atoms
size2 = len(dataovito1) #total atoms
xmin = np.min(datazero[:,2]) #+min(0.0,xy,xz,xy+xz)
xmax = np.max(datazero[:,2]) #+max(0.0,xy,xz,xy+xz)
ymin = np.min(datazero[:,3]) #+min(0.0,yz)
ymax = np.max(datazero[:,3]) #+max(0.0,yz)
zmin = np.min(datazero[:,4])
zmax = np.max(datazero[:,4])
outFile = open('data.GPCSH1.65r1-Eq-300K-rotated', 'w')
outFile.write('LAMMPS data file written by Rafat Sadat using Python\n')
outFile.write('\n')
outFile.write('%i %s \n' %(len(dataovito1), 'atoms'))
outFile.write('12 atom types \n')
#outFile.write('%i %s \n' %(nbonds, 'bonds'))
#outFile.write('1 bond types \n')
#outFile.write('%i %s \n' %(nangles, 'angles'))
#outFile.write('1 angle types \n')
outFile.write('\n')
outFile.write('%f %f %s %s \n' %(xmin, xmax, 'xlo', 'xhi'))
outFile.write('%f %f %s %s \n' %(ymin, ymax, 'ylo', 'yhi'))
outFile.write('%f %f %s %s \n' %(zmin, zmax, 'zlo', 'zhi'))
#outFile.write('\n')
#outFile.write('%f %f %f %s %s %s \n' %(-8.1554775, 0.0, 0.0, 'xy', 'xz', 'yz'))
outFile.write('\n')
outFile.write('Masses\n')
outFile.write('\n')
outFile.write('%i %f \n' %(1, 28.065))
outFile.write('%i %f \n' %(2, 26.98))
outFile.write('%i %f \n' %(3, 22.98))
outFile.write('%i %f \n' %(4, 16.0))
outFile.write('%i %f \n' %(5, 16.0))
outFile.write('%i %f \n' %(6, 1.0))
outFile.write('%i %f \n' %(7, 28.065)) #Si
outFile.write('%i %f \n' %(8, 40.078)) #Ca
outFile.write('%i %f \n' %(9, 40.078)) #Ca
outFile.write('%i %f \n' %(10, 16.0)) #Os
outFile.write('%i %f \n' %(11, 16.0)) #Ow
outFile.write('%i %f \n' %(12, 1.0)) #Hw
outFile.write('\n')
outFile.write('Atoms\n')
outFile.write('\n')
for j in range(size1): #writing atoms without water
if datazero[j,1]==1:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 1, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
elif datazero[j,1]==2:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 2, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
elif datazero[j,1]==3:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 3, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
elif datazero[j,1]==4:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 4, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
elif datazero[j,1]==5:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 5, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
elif datazero[j,1]==6:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 6, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
elif datazero[j,1]==7:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 7, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
elif datazero[j,1]==8:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 8, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
elif datazero[j,1]==9:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 9, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
elif datazero[j,1]==10:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 10, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
elif datazero[j,1]==11:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 11, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
elif datazero[j,1]==12:
outFile.write('%i %i %i %f %f %f %f \n' %(j+1, 1, 12, 0, datazero[j,2], datazero[j,3], datazero[j,4]))
'''
counter = -1
molid = 0
for j in range(size1,size2): #writing the atoms for water molecules
counter +=1
if counter%3==0:
molid += 1
else:
molid = molid
if dataovito2[j]=='Ow':
outFile.write('%i %i %i %i %f %f %f \n' %(j+1, molid, 5, 0, dataovito1[j,0], dataovito1[j,1], dataovito1[j,2]))
elif dataovito2[j]=='Hw':
outFile.write('%i %i %i %i %f %f %f \n' %(j+1, molid, 6, 0, dataovito1[j,0], dataovito1[j,1], dataovito1[j,2]))
outFile.write('\n')
outFile.write('Bonds\n')
outFile.write('\n')
count = 0
for j in range(size1+1,size2, 3):
count +=1
outFile.write('%i %s %i %i \n' %(count,'1',j,j+1))
count +=1
outFile.write('%i %s %i %i \n' %(count,'1',j,j+2))
outFile.write('\n')
outFile.write('Angles\n')
outFile.write('\n')
count = 0
for j in range(size1+1,size2, 3):
count +=1
outFile.write('%i %s %i %i %i \n' %(count,'1', j+1, j, j+2))
'''
outFile.close()
print "All done!"
|
msadat/python-scripts
|
rotate_lammps_data.py
|
Python
|
gpl-3.0
| 5,902
|
[
"LAMMPS"
] |
5fba9db4b1d662a51061ba822d0789ab5a2b9fbd9da9757380cf04fc33d9b180
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import shutil
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.kubernetes.flagmanager import FlagManager
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.destroy('feature-gates')
kubelet_opts.destroy('experimental-nvidia-gpus')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# cleanup old flagmanagers
FlagManager('kubelet').destroy_all()
FlagManager('kube-proxy').destroy_all()
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
archive = hookenv.resource_get('cni')
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
if (_systemctl_is_active('snap.kubelet.daemon')):
hookenv.status_set('active', 'Kubernetes worker running.')
# if kubelet is not running, we're waiting on something else to converge
elif (not _systemctl_is_active('snap.kubelet.daemon')):
hookenv.status_set('waiting', 'Waiting for kubelet to start.')
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = kube_control.get_auth_credentials()
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_worker_services(servers, dns, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
apply_node_labels()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress RC enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-replication-controller.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('kubernetes-worker.ingress.available')
def scale_ingress_controller():
''' Scale the number of ingress controller replicas to match the number of
nodes. '''
try:
output = kubectl('get', 'nodes', '-o', 'name')
count = len(output.splitlines())
kubectl('scale', '--replicas=%d' % count, 'rc/nginx-ingress-controller') # noqa
except CalledProcessError:
hookenv.log('Failed to scale ingress controllers. Will attempt again next update.') # noqa
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
try:
_apply_node_label(label, delete=True)
except CalledProcessError:
hookenv.log('Error removing node label {}'.format(label))
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label, overwrite=True)
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig('/root/.kube/config', server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
def configure_worker_services(api_servers, dns, cluster_cidr):
''' Add remaining flags for the worker services and configure snaps to use
them '''
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add('require-kubeconfig', 'true')
kubelet_opts.add('kubeconfig', kubeconfig_path)
kubelet_opts.add('network-plugin', 'cni')
kubelet_opts.add('v', '0')
kubelet_opts.add('address', '0.0.0.0')
kubelet_opts.add('port', '10250')
kubelet_opts.add('cluster-dns', dns['sdn-ip'])
kubelet_opts.add('cluster-domain', dns['domain'])
kubelet_opts.add('anonymous-auth', 'false')
kubelet_opts.add('client-ca-file', ca_cert_path)
kubelet_opts.add('tls-cert-file', server_cert_path)
kubelet_opts.add('tls-private-key-file', server_key_path)
kubelet_opts.add('logtostderr', 'true')
kube_proxy_opts = FlagManager('kube-proxy')
kube_proxy_opts.add('cluster-cidr', cluster_cidr)
kube_proxy_opts.add('kubeconfig', kubeconfig_path)
kube_proxy_opts.add('logtostderr', 'true')
kube_proxy_opts.add('v', '0')
kube_proxy_opts.add('master', random.choice(api_servers), strict=True)
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts.add('conntrack-max-per-core', '0')
cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ')
check_call(cmd)
cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ')
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress replication controller manifest
manifest = addon_path.format('ingress-replication-controller.yaml')
render('ingress-replication-controller.yaml', manifest, context)
hookenv.log('Creating the ingress replication controller.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
flag = 'allow-privileged'
hookenv.log('Setting {}={}'.format(flag, privileged))
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add(flag, privileged)
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts.add('experimental-nvidia-gpus', '1')
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts.add('feature-gates', 'Accelerators=true')
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
kubelet_opts.destroy('experimental-nvidia-gpus')
else:
kubelet_opts.remove('feature-gates', 'Accelerators=true')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(gethostname())
kube_control.set_auth_request(nodeuser)
@when('kube-control.auth.available')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
creds = kube_control.get_auth_credentials()
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
hostname = gethostname()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, hostname, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, hostname, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
check_call(split(cmd))
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
|
yaxinlx/apiserver-builder
|
cmd/vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
Python
|
apache-2.0
| 31,640
|
[
"CDK"
] |
4fc7cd8549303214e382f9c3cf7e9cd0d5c7189dc39d3996666710d1214464f6
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Module to support the loading of a NetCDF file into an Iris cube.
See also: `netCDF4 python <http://code.google.com/p/netcdf4-python/>`_.
Also refer to document 'NetCDF Climate and Forecast (CF) Metadata Conventions',
Version 1.4, 27 February 2009.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import collections
import os
import os.path
import string
import warnings
import biggus
import iris.proxy
iris.proxy.apply_proxy('netCDF4', globals())
import numpy as np
import numpy.ma as ma
from pyke import knowledge_engine
import iris.analysis
from iris.aux_factory import HybridHeightFactory, HybridPressureFactory, \
OceanSigmaZFactory, OceanSigmaFactory, OceanSFactory, OceanSg1Factory, \
OceanSg2Factory
import iris.coord_systems
import iris.coords
import iris.cube
import iris.exceptions
import iris.fileformats.cf
import iris.fileformats._pyke_rules
import iris.io
import iris.unit
import iris.util
# Show Pyke inference engine statistics.
DEBUG = False
# Pyke CF related file names.
_PYKE_RULE_BASE = 'fc_rules_cf'
_PYKE_FACT_BASE = 'facts_cf'
# Standard CML spatio-temporal axis names.
SPATIO_TEMPORAL_AXES = ['t', 'z', 'y', 'x']
# Pass through CF attributes:
# - comment
# - Conventions
# - flag_masks
# - flag_meanings
# - flag_values
# - history
# - institution
# - reference
# - source
# - title
# - positive
#
_CF_ATTRS = ['add_offset', 'ancillary_variables', 'axis', 'bounds', 'calendar',
'cell_measures', 'cell_methods', 'climatology', 'compress',
'coordinates', '_FillValue', 'formula_terms', 'grid_mapping',
'leap_month', 'leap_year', 'long_name', 'missing_value',
'month_lengths', 'scale_factor', 'standard_error_multiplier',
'standard_name', 'units', 'valid_max', 'valid_min', 'valid_range']
# CF attributes that should not be global.
_CF_DATA_ATTRS = ['flag_masks', 'flag_meanings', 'flag_values',
'instance_dimension', 'sample_dimension',
'standard_error_multiplier']
# CF attributes that should only be global.
_CF_GLOBAL_ATTRS = ['conventions', 'featureType', 'history', 'title']
# UKMO specific attributes that should not be global.
_UKMO_DATA_ATTRS = ['STASH', 'um_stash_source', 'ukmo__process_flags']
CF_CONVENTIONS_VERSION = 'CF-1.5'
_FactoryDefn = collections.namedtuple('_FactoryDefn', ('primary', 'std_name',
'formula_terms_format'))
_FACTORY_DEFNS = {
HybridHeightFactory: _FactoryDefn(
primary='delta',
std_name='atmosphere_hybrid_height_coordinate',
formula_terms_format='a: {delta} b: {sigma} orog: {orography}'),
HybridPressureFactory: _FactoryDefn(
primary='delta',
std_name='atmosphere_hybrid_sigma_pressure_coordinate',
formula_terms_format='ap: {delta} b: {sigma} '
'ps: {surface_air_pressure}'),
OceanSigmaZFactory: _FactoryDefn(
primary='zlev',
std_name='ocean_sigma_z_coordinate',
formula_terms_format='sigma: {sigma} eta: {eta} depth: {depth} '
'depth_c: {depth_c} nsigma: {nsigma} zlev: {zlev}'),
OceanSigmaFactory: _FactoryDefn(
primary='sigma',
std_name='ocean_sigma_coordinate',
formula_terms_format='sigma: {sigma} eta: {eta} depth: {depth}'),
OceanSFactory: _FactoryDefn(
primary='s',
std_name='ocean_s_coordinate',
formula_terms_format='s: {s} eta: {eta} depth: {depth} a: {a} b: {b} '
'depth_c: {depth_c}'),
OceanSg1Factory: _FactoryDefn(
primary='s',
std_name='ocean_s_coordinate_g1',
formula_terms_format='s: {s} c: {c} eta: {eta} depth: {depth} '
'depth_c: {depth_c}'),
OceanSg2Factory: _FactoryDefn(
primary='s',
std_name='ocean_s_coordinate_g2',
formula_terms_format='s: {s} c: {c} eta: {eta} depth: {depth} '
'depth_c: {depth_c}')
}
class CFNameCoordMap(object):
"""Provide a simple CF name to CF coordinate mapping."""
_Map = collections.namedtuple('_Map', ['name', 'coord'])
def __init__(self):
self._map = []
def append(self, name, coord):
"""
Append the given name and coordinate pair to the mapping.
Args:
* name:
CF name of the associated coordinate.
* coord:
The coordinate of the associated CF name.
Returns:
None.
"""
self._map.append(CFNameCoordMap._Map(name, coord))
@property
def names(self):
"""Return all the CF names."""
return [pair.name for pair in self._map]
@property
def coords(self):
"""Return all the coordinates."""
return [pair.coord for pair in self._map]
def name(self, coord):
"""
Return the CF name, given a coordinate
Args:
* coord:
The coordinate of the associated CF name.
Returns:
Coordinate.
"""
result = None
for pair in self._map:
if coord == pair.coord:
result = pair.name
break
if result is None:
msg = 'Coordinate is not mapped, {!r}'.format(coord)
raise KeyError(msg)
return result
def coord(self, name):
"""
Return the coordinate, given a CF name.
Args:
* name:
CF name of the associated coordinate.
Returns:
CF name.
"""
result = None
for pair in self._map:
if name == pair.name:
result = pair.coord
break
if result is None:
msg = 'Name is not mapped, {!r}'.format(name)
raise KeyError(msg)
return result
def _pyke_kb_engine():
"""Return the PyKE knowledge engine for CF->cube conversion."""
pyke_dir = os.path.join(os.path.dirname(__file__), '_pyke_rules')
compile_dir = os.path.join(pyke_dir, 'compiled_krb')
engine = None
if os.path.exists(compile_dir):
tmpvar = [os.path.getmtime(os.path.join(compile_dir, fname)) for
fname in os.listdir(compile_dir) if not
fname.startswith('_')]
if tmpvar:
oldest_pyke_compile_file = min(tmpvar)
rule_age = os.path.getmtime(
os.path.join(pyke_dir, _PYKE_RULE_BASE + '.krb'))
if oldest_pyke_compile_file >= rule_age:
# Initialise the pyke inference engine.
engine = knowledge_engine.engine(
(None, 'iris.fileformats._pyke_rules.compiled_krb'))
if engine is None:
engine = knowledge_engine.engine(iris.fileformats._pyke_rules)
return engine
class NetCDFDataProxy(object):
"""A reference to the data payload of a single NetCDF file variable."""
__slots__ = ('shape', 'dtype', 'path', 'variable_name', 'fill_value')
def __init__(self, shape, dtype, path, variable_name, fill_value):
self.shape = shape
self.dtype = dtype
self.path = path
self.variable_name = variable_name
self.fill_value = fill_value
@property
def ndim(self):
return len(self.shape)
def __getitem__(self, keys):
dataset = netCDF4.Dataset(self.path)
try:
variable = dataset.variables[self.variable_name]
# Get the NetCDF variable data and slice.
data = variable[keys]
finally:
dataset.close()
return data
def __repr__(self):
fmt = '<{self.__class__.__name__} shape={self.shape}' \
' dtype={self.dtype!r} path={self.path!r}' \
' variable_name={self.variable_name!r}>'
return fmt.format(self=self)
def __getstate__(self):
return {attr: getattr(self, attr) for attr in self.__slots__}
def __setstate__(self, state):
for key, value in six.iteritems(state):
setattr(self, key, value)
def _assert_case_specific_facts(engine, cf, cf_group):
# Initialise pyke engine "provides" hooks.
engine.provides['coordinates'] = []
# Assert facts for CF coordinates.
for cf_name in six.iterkeys(cf_group.coordinates):
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'coordinate',
(cf_name,))
# Assert facts for CF auxiliary coordinates.
for cf_name in six.iterkeys(cf_group.auxiliary_coordinates):
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'auxiliary_coordinate',
(cf_name,))
# Assert facts for CF grid_mappings.
for cf_name in six.iterkeys(cf_group.grid_mappings):
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'grid_mapping',
(cf_name,))
# Assert facts for CF labels.
for cf_name in six.iterkeys(cf_group.labels):
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'label',
(cf_name,))
# Assert facts for CF formula terms associated with the cf_group
# of the CF data variable.
formula_root = set()
for cf_var in six.itervalues(cf.cf_group.formula_terms):
for cf_root, cf_term in six.iteritems(cf_var.cf_terms_by_root):
# Only assert this fact if the formula root variable is
# defined in the CF group of the CF data variable.
if cf_root in cf_group:
formula_root.add(cf_root)
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'formula_term',
(cf_var.cf_name, cf_root,
cf_term))
for cf_root in formula_root:
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'formula_root',
(cf_root,))
def _pyke_stats(engine, cf_name):
if DEBUG:
print('-' * 80)
print('CF Data Variable: %r' % cf_name)
engine.print_stats()
print('Rules Triggered:')
for rule in sorted(list(engine.rule_triggered)):
print('\t%s' % rule)
print('Case Specific Facts:')
kb_facts = engine.get_kb(_PYKE_FACT_BASE)
for key in six.iterkeys(kb_facts.entity_lists):
for arg in kb_facts.entity_lists[key].case_specific_facts:
print('\t%s%s' % (key, arg))
def _set_attributes(attributes, key, value):
"""Set attributes dictionary, converting unicode strings appropriately."""
if isinstance(value, unicode):
try:
attributes[str(key)] = str(value)
except UnicodeEncodeError:
attributes[str(key)] = value
else:
attributes[str(key)] = value
def _load_cube(engine, cf, cf_var, filename):
"""Create the cube associated with the CF-netCDF data variable."""
# Figure out what the eventual data type will be after any scale/offset
# transforms.
dummy_data = np.zeros(1, dtype=cf_var.dtype)
if hasattr(cf_var, 'scale_factor'):
dummy_data = cf_var.scale_factor * dummy_data
if hasattr(cf_var, 'add_offset'):
dummy_data = cf_var.add_offset + dummy_data
# Create cube with deferred data, but no metadata
fill_value = getattr(cf_var.cf_data, '_FillValue',
netCDF4.default_fillvals[cf_var.dtype.str[1:]])
proxy = NetCDFDataProxy(cf_var.shape, dummy_data.dtype,
filename, cf_var.cf_name, fill_value)
data = biggus.OrthoArrayAdapter(proxy)
cube = iris.cube.Cube(data)
# Reset the pyke inference engine.
engine.reset()
# Initialise pyke engine rule processing hooks.
engine.cf_var = cf_var
engine.cube = cube
engine.provides = {}
engine.requires = {}
engine.rule_triggered = set()
engine.filename = filename
# Assert any case-specific facts.
_assert_case_specific_facts(engine, cf, cf_var.cf_group)
# Run pyke inference engine with forward chaining rules.
engine.activate(_PYKE_RULE_BASE)
# Populate coordinate attributes with the untouched attributes from the
# associated CF-netCDF variable.
coordinates = engine.provides.get('coordinates', [])
attribute_predicate = lambda item: item[0] not in _CF_ATTRS
for coord, cf_var_name in coordinates:
tmpvar = filter(attribute_predicate,
cf.cf_group[cf_var_name].cf_attrs_unused())
for attr_name, attr_value in tmpvar:
_set_attributes(coord.attributes, attr_name, attr_value)
tmpvar = filter(attribute_predicate, cf_var.cf_attrs_unused())
# Attach untouched attributes of the associated CF-netCDF data variable to
# the cube.
for attr_name, attr_value in tmpvar:
_set_attributes(cube.attributes, attr_name, attr_value)
# Show pyke session statistics.
_pyke_stats(engine, cf_var.cf_name)
return cube
def _load_aux_factory(engine, cube):
"""
Convert any CF-netCDF dimensionless coordinate to an AuxCoordFactory.
"""
formula_type = engine.requires.get('formula_type')
if formula_type in ['atmosphere_hybrid_height_coordinate',
'atmosphere_hybrid_sigma_pressure_coordinate',
'ocean_sigma_z_coordinate', 'ocean_sigma_coordinate',
'ocean_s_coordinate', 'ocean_s_coordinate_g1',
'ocean_s_coordinate_g2']:
def coord_from_term(term):
# Convert term names to coordinates (via netCDF variable names).
name = engine.requires['formula_terms'][term]
for coord, cf_var_name in engine.provides['coordinates']:
if cf_var_name == name:
return coord
warnings.warn('Unable to find coordinate for variable '
'{!r}'.format(name))
if formula_type == 'atmosphere_hybrid_height_coordinate':
delta = coord_from_term('a')
sigma = coord_from_term('b')
orography = coord_from_term('orog')
factory = HybridHeightFactory(delta, sigma, orography)
elif formula_type == 'atmosphere_hybrid_sigma_pressure_coordinate':
# Hybrid pressure has two valid versions of its formula terms:
# "p0: var1 a: var2 b: var3 ps: var4" or
# "ap: var1 b: var2 ps: var3" where "ap = p0 * a"
try:
# Attempt to get the "ap" term.
delta = coord_from_term('ap')
except (KeyError, ValueError):
# The "ap" term is unavailable, so try getting terms "p0"
# and "a" terms in order to derive an "ap" equivalent term.
coord_p0 = coord_from_term('p0')
if coord_p0.shape != (1,):
msg = 'Expecting {!r} to be a scalar reference pressure ' \
'coordinate, got shape {!r}'.format(coord_p0.var_name,
coord_p0.shape)
raise ValueError(msg)
if coord_p0.has_bounds():
msg = 'Ignoring atmosphere hybrid sigma pressure scalar ' \
'coordinate {!r} bounds.'.format(coord_p0.name())
warnings.warn(msg)
coord_a = coord_from_term('a')
delta = coord_a * coord_p0.points[0]
delta.units = coord_a.units * coord_p0.units
delta.rename('vertical pressure')
delta.var_name = 'ap'
cube.add_aux_coord(delta, cube.coord_dims(coord_a))
sigma = coord_from_term('b')
surface_air_pressure = coord_from_term('ps')
factory = HybridPressureFactory(delta, sigma, surface_air_pressure)
elif formula_type == 'ocean_sigma_z_coordinate':
sigma = coord_from_term('sigma')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
depth_c = coord_from_term('depth_c')
nsigma = coord_from_term('nsigma')
zlev = coord_from_term('zlev')
factory = OceanSigmaZFactory(sigma, eta, depth,
depth_c, nsigma, zlev)
elif formula_type == 'ocean_sigma_coordinate':
sigma = coord_from_term('sigma')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
factory = OceanSigmaFactory(sigma, eta, depth)
elif formula_type == 'ocean_s_coordinate':
s = coord_from_term('s')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
a = coord_from_term('a')
depth_c = coord_from_term('depth_c')
b = coord_from_term('b')
factory = OceanSFactory(s, eta, depth, a, b, depth_c)
elif formula_type == 'ocean_s_coordinate_g1':
s = coord_from_term('s')
c = coord_from_term('c')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
depth_c = coord_from_term('depth_c')
factory = OceanSg1Factory(s, c, eta, depth,
depth_c)
elif formula_type == 'ocean_s_coordinate_g2':
s = coord_from_term('s')
c = coord_from_term('c')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
depth_c = coord_from_term('depth_c')
factory = OceanSg2Factory(s, c, eta, depth,
depth_c)
cube.add_aux_factory(factory)
def load_cubes(filenames, callback=None):
"""
Loads cubes from a list of NetCDF filenames/URLs.
Args:
* filenames (string/list):
One or more NetCDF filenames/DAP URLs to load from.
Kwargs:
* callback (callable function):
Function which can be passed on to :func:`iris.io.run_callback`.
Returns:
Generator of loaded NetCDF :class:`iris.cubes.Cube`.
"""
# Initialise the pyke inference engine.
engine = _pyke_kb_engine()
if isinstance(filenames, six.string_types):
filenames = [filenames]
for filename in filenames:
# Ingest the netCDF file.
cf = iris.fileformats.cf.CFReader(filename)
# Process each CF data variable.
data_variables = (list(cf.cf_group.data_variables.values()) +
list(cf.cf_group.promoted.values()))
for cf_var in data_variables:
cube = _load_cube(engine, cf, cf_var, filename)
# Process any associated formula terms and attach
# the corresponding AuxCoordFactory.
try:
_load_aux_factory(engine, cube)
except ValueError as e:
warnings.warn('{}'.format(e))
# Perform any user registered callback function.
cube = iris.io.run_callback(callback, cube, cf_var, filename)
# Callback mechanism may return None, which must not be yielded
if cube is None:
continue
yield cube
class Saver(object):
"""A manager for saving netcdf files."""
def __init__(self, filename, netcdf_format):
"""
A manager for saving netcdf files.
Args:
* filename (string):
Name of the netCDF file to save the cube.
* netcdf_format (string):
Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format.
Returns:
None.
For example::
# Initialise Manager for saving
with Saver(filename, netcdf_format) as sman:
# Iterate through the cubelist.
for cube in cubes:
sman.write(cube)
"""
if netcdf_format not in ['NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
raise ValueError('Unknown netCDF file format, got %r' %
netcdf_format)
# All persistent variables
#: CF name mapping with iris coordinates
self._name_coord_map = CFNameCoordMap()
#: List of dimension coordinates added to the file
self._dim_coords = []
#: List of grid mappings added to the file
self._coord_systems = []
#: A dictionary, listing dimension names and corresponding length
self._existing_dim = {}
#: A dictionary, mapping formula terms to owner cf variable name
self._formula_terms_cache = {}
#: NetCDF dataset
try:
self._dataset = netCDF4.Dataset(filename, mode='w',
format=netcdf_format)
except RuntimeError:
dir_name = os.path.dirname(filename)
if not os.path.isdir(dir_name):
msg = 'No such file or directory: {}'.format(dir_name)
raise IOError(msg)
if not os.access(dir_name, os.R_OK | os.W_OK):
msg = 'Permission denied: {}'.format(filename)
raise IOError(msg)
else:
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Flush any buffered data to the CF-netCDF file before closing."""
self._dataset.sync()
self._dataset.close()
def write(self, cube, local_keys=None, unlimited_dimensions=None,
zlib=False, complevel=4, shuffle=True, fletcher32=False,
contiguous=False, chunksizes=None, endian='native',
least_significant_digit=None):
"""
Wrapper for saving cubes to a NetCDF file.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
Kwargs:
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
Explicit list of coordinate names (or coordinate objects)
corresponding to coordinate dimensions of `cube` to save with the
NetCDF dimension variable length 'UNLIMITED'. By default, the
outermost (first) dimension for each cube is used. Only the
'NETCDF4' format supports multiple 'UNLIMITED' dimensions. To save
no unlimited dimensions, use `unlimited_dimensions=[]` (an empty
list).
* zlib (bool):
If `True`, the data will be compressed in the netCDF file using
gzip compression (default `False`).
* complevel (int):
An integer between 1 and 9 describing the level of compression
desired (default 4). Ignored if `zlib=False`.
* shuffle (bool):
If `True`, the HDF5 shuffle filter will be applied before
compressing the data (default `True`). This significantly improves
compression. Ignored if `zlib=False`.
* fletcher32 (bool):
If `True`, the Fletcher32 HDF5 checksum algorithm is activated to
detect errors. Default `False`.
* contiguous (bool):
If `True`, the variable data is stored contiguously on disk.
Default `False`. Setting to `True` for a variable with an unlimited
dimension will trigger an error.
* chunksizes (tuple of int):
Used to manually specify the HDF5 chunksizes for each dimension of
the variable. A detailed discussion of HDF chunking and I/O
performance is available here:
http://www.hdfgroup.org/HDF5/doc/H5.user/Chunking.html. Basically,
you want the chunk size for each dimension to match as closely as
possible the size of the data block that users will read from the
file. `chunksizes` cannot be set if `contiguous=True`.
* endian (string):
Used to control whether the data is stored in little or big endian
format on disk. Possible values are 'little', 'big' or 'native'
(default). The library will automatically handle endian conversions
when the data is read, but if the data is always going to be read
on a computer with the opposite format as the one used to create
the file, there may be some performance advantage to be gained by
setting the endian-ness.
* least_significant_digit (int):
If `least_significant_digit` is specified, variable data will be
truncated (quantized). In conjunction with `zlib=True` this
produces 'lossy', but significantly more efficient compression. For
example, if `least_significant_digit=1`, data will be quantized
using `numpy.around(scale*data)/scale`, where `scale = 2**bits`,
and `bits` is determined so that a precision of 0.1 is retained (in
this case `bits=4`). From
http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml:
"least_significant_digit -- power of ten of the smallest decimal
place in unpacked data that is a reliable value". Default is
`None`, or no quantization, or 'lossless' compression.
Returns:
None.
.. note::
The `zlib`, `complevel`, `shuffle`, `fletcher32`, `contiguous`,
`chunksizes` and `endian` keywords are silently ignored for netCDF
3 files that do not use HDF5.
.. deprecated:: 1.8.0
NetCDF default saving behaviour currently assigns the outermost
dimension as unlimited. This behaviour is to be deprecated, in
favour of no automatic assignment. To switch to the new behaviour,
set `iris.FUTURE.netcdf_no_unlimited` to True.
"""
if unlimited_dimensions is None:
if iris.FUTURE.netcdf_no_unlimited:
unlimited_dimensions = []
else:
_no_unlim_dep_warning()
cf_profile_available = (iris.site_configuration.get('cf_profile') not
in [None, False])
if cf_profile_available:
# Perform a CF profile of the cube. This may result in an exception
# being raised if mandatory requirements are not satisfied.
profile = iris.site_configuration['cf_profile'](cube)
# Get suitable dimension names.
dimension_names = self._get_dim_names(cube)
# Create the CF-netCDF data dimensions.
self._create_cf_dimensions(cube, dimension_names, unlimited_dimensions)
# Create the associated cube CF-netCDF data variable.
cf_var_cube = self._create_cf_data_variable(
cube, dimension_names, local_keys, zlib=zlib, complevel=complevel,
shuffle=shuffle, fletcher32=fletcher32, contiguous=contiguous,
chunksizes=chunksizes, endian=endian,
least_significant_digit=least_significant_digit)
# Add coordinate variables.
self._add_dim_coords(cube, dimension_names)
# Add the auxiliary coordinate variable names and associate the data
# variable to them
self._add_aux_coords(cube, cf_var_cube, dimension_names)
# Add the formula terms to the appropriate cf variables for each
# aux factory in the cube.
self._add_aux_factories(cube, cf_var_cube, dimension_names)
# Add data variable-only attribute names to local_keys.
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
local_keys.update(_CF_DATA_ATTRS, _UKMO_DATA_ATTRS)
# Add global attributes taking into account local_keys.
global_attributes = {k: v for k, v in six.iteritems(cube.attributes)
if (k not in local_keys and
k.lower() != 'conventions')}
self.update_global_attributes(global_attributes)
if cf_profile_available:
cf_patch = iris.site_configuration.get('cf_patch')
if cf_patch is not None:
# Perform a CF patch of the dataset.
cf_patch(profile, self._dataset, cf_var_cube)
else:
msg = 'cf_profile is available but no {} defined.'.format(
'cf_patch')
warnings.warn(msg)
def update_global_attributes(self, attributes=None, **kwargs):
"""
Update the CF global attributes based on the provided
iterable/dictionary and/or keyword arguments.
Args:
* attributes (dict or iterable of key, value pairs):
CF global attributes to be updated.
"""
if attributes is not None:
# Handle sequence e.g. [('fruit', 'apple'), ...].
if not hasattr(attributes, 'keys'):
attributes = dict(attributes)
for attr_name in sorted(attributes):
self._dataset.setncattr(attr_name, attributes[attr_name])
for attr_name in sorted(kwargs):
self._dataset.setncattr(attr_name, kwargs[attr_name])
def _create_cf_dimensions(self, cube, dimension_names,
unlimited_dimensions=None):
"""
Create the CF-netCDF data dimensions.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` in which to lookup coordinates.
Kwargs:
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
List of coordinates to make unlimited. By default, the
outermost dimension is made unlimited.
Returns:
None.
"""
unlimited_dim_names = []
if (unlimited_dimensions is None and
not iris.FUTURE.netcdf_no_unlimited):
if dimension_names:
unlimited_dim_names.append(dimension_names[0])
else:
for coord in unlimited_dimensions:
try:
coord = cube.coord(name_or_coord=coord, dim_coords=True)
except iris.exceptions.CoordinateNotFoundError:
# coordinate isn't used for this cube, but it might be
# used for a different one
pass
else:
dim_name = self._get_coord_variable_name(cube, coord)
unlimited_dim_names.append(dim_name)
for dim_name in dimension_names:
if dim_name not in self._dataset.dimensions:
if dim_name in unlimited_dim_names:
size = None
else:
size = self._existing_dim[dim_name]
self._dataset.createDimension(dim_name, size)
def _add_aux_coords(self, cube, cf_var_cube, dimension_names):
"""
Add aux. coordinate to the dataset and associate with the data variable
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
auxiliary_coordinate_names = []
# Add CF-netCDF variables for the associated auxiliary coordinates.
for coord in sorted(cube.aux_coords, key=lambda coord: coord.name()):
# Create the associated coordinate CF-netCDF variable.
if coord not in self._name_coord_map.coords:
cf_name = self._create_cf_variable(cube, dimension_names,
coord)
self._name_coord_map.append(cf_name, coord)
else:
cf_name = self._name_coord_map.name(coord)
if cf_name is not None:
auxiliary_coordinate_names.append(cf_name)
# Add CF-netCDF auxiliary coordinate variable references to the
# CF-netCDF data variable.
if auxiliary_coordinate_names:
cf_var_cube.coordinates = ' '.join(
sorted(auxiliary_coordinate_names))
def _add_dim_coords(self, cube, dimension_names):
"""
Add coordinate variables to NetCDF dataset.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
# Ensure we create the netCDF coordinate variables first.
for coord in cube.dim_coords:
# Create the associated coordinate CF-netCDF variable.
if coord not in self._name_coord_map.coords:
cf_name = self._create_cf_variable(cube, dimension_names,
coord)
self._name_coord_map.append(cf_name, coord)
def _add_aux_factories(self, cube, cf_var_cube, dimension_names):
"""
Modifies the variables of the NetCDF dataset to represent
the presence of dimensionless vertical coordinates based on
the aux factories of the cube (if any).
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`)
CF variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
primaries = []
for factory in cube.aux_factories:
factory_defn = _FACTORY_DEFNS.get(type(factory), None)
if factory_defn is None:
msg = 'Unable to determine formula terms ' \
'for AuxFactory: {!r}'.format(factory)
warnings.warn(msg)
else:
# Override `standard_name`, `long_name`, and `axis` of the
# primary coord that signals the presense of a dimensionless
# vertical coord, then set the `formula_terms` attribute.
primary_coord = factory.dependencies[factory_defn.primary]
if primary_coord in primaries:
msg = 'Cube {!r} has multiple aux factories that share ' \
'a common primary coordinate {!r}. Unable to save ' \
'to netCDF as having multiple formula terms on a ' \
'single coordinate is not supported.'
raise ValueError(msg.format(cube, primary_coord.name()))
primaries.append(primary_coord)
cf_name = self._name_coord_map.name(primary_coord)
cf_var = self._dataset.variables[cf_name]
names = {key: self._name_coord_map.name(coord) for
key, coord in six.iteritems(factory.dependencies)}
formula_terms = factory_defn.formula_terms_format.format(
**names)
std_name = factory_defn.std_name
if hasattr(cf_var, 'formula_terms'):
if cf_var.formula_terms != formula_terms or \
cf_var.standard_name != std_name:
# TODO: We need to resolve this corner-case where
# the dimensionless vertical coordinate containing the
# formula_terms is a dimension coordinate of the
# associated cube and a new alternatively named
# dimensionless vertical coordinate is required with
# new formula_terms and a renamed dimension.
if cf_name in dimension_names:
msg = 'Unable to create dimensonless vertical ' \
'coordinate.'
raise ValueError(msg)
key = (cf_name, std_name, formula_terms)
name = self._formula_terms_cache.get(key)
if name is None:
# Create a new variable
name = self._create_cf_variable(cube,
dimension_names,
primary_coord)
cf_var = self._dataset.variables[name]
cf_var.standard_name = std_name
cf_var.axis = 'Z'
# Update the formula terms.
ft = formula_terms.split()
ft = [name if t == cf_name else t for t in ft]
cf_var.formula_terms = ' '.join(ft)
# Update the cache.
self._formula_terms_cache[key] = name
# Update the associated cube variable.
coords = cf_var_cube.coordinates.split()
coords = [name if c == cf_name else c for c in coords]
cf_var_cube.coordinates = ' '.join(coords)
else:
cf_var.standard_name = std_name
cf_var.axis = 'Z'
cf_var.formula_terms = formula_terms
def _get_dim_names(self, cube):
"""
Determine suitable CF-netCDF data dimension names.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
Returns:
List of dimension names with length equal the number of dimensions
in the cube.
"""
dimension_names = []
for dim in range(cube.ndim):
coords = cube.coords(dimensions=dim, dim_coords=True)
if coords:
coord = coords[0]
dim_name = self._get_coord_variable_name(cube, coord)
# Add only dimensions that have not already been added.
if coord not in self._dim_coords:
# Determine unique dimension name
while (dim_name in self._existing_dim or
dim_name in self._name_coord_map.names):
dim_name = self._increment_name(dim_name)
# Update names added, current cube dim names used and
# unique coordinates added.
self._existing_dim[dim_name] = coord.shape[0]
dimension_names.append(dim_name)
self._dim_coords.append(coord)
else:
# Return the dim_name associated with the existing
# coordinate.
dim_name = self._name_coord_map.name(coord)
dimension_names.append(dim_name)
else:
# No CF-netCDF coordinates describe this data dimension.
dim_name = 'dim%d' % dim
if dim_name in self._existing_dim:
# Increment name if conflicted with one already existing.
if self._existing_dim[dim_name] != cube.shape[dim]:
while (dim_name in self._existing_dim and
self._existing_dim[dim_name] !=
cube.shape[dim] or
dim_name in self._name_coord_map.names):
dim_name = self._increment_name(dim_name)
# Update dictionary with new entry
self._existing_dim[dim_name] = cube.shape[dim]
else:
# Update dictionary with new entry
self._existing_dim[dim_name] = cube.shape[dim]
dimension_names.append(dim_name)
return dimension_names
def _cf_coord_identity(self, coord):
"""
Determine a suitable units from a given coordinate.
Args:
* coord (:class:`iris.coords.Coord`):
A coordinate of a cube.
Returns:
The (standard_name, long_name, unit) of the given
:class:`iris.coords.Coord` instance.
"""
units = str(coord.units)
# TODO: Use #61 to get the units.
if isinstance(coord.coord_system, iris.coord_systems.GeogCS):
if "latitude" in coord.standard_name:
units = 'degrees_north'
elif "longitude" in coord.standard_name:
units = 'degrees_east'
elif isinstance(coord.coord_system, iris.coord_systems.RotatedGeogCS):
units = 'degrees'
elif isinstance(coord.coord_system,
iris.coord_systems.TransverseMercator):
units = 'm'
return coord.standard_name, coord.long_name, units
def _ensure_valid_dtype(self, values, src_name, src_object):
# NetCDF3 does not support int64 or unsigned ints, so we check
# if we can store them as int32 instead.
if ((np.issubdtype(values.dtype, np.int64) or
np.issubdtype(values.dtype, np.unsignedinteger)) and
self._dataset.file_format in ('NETCDF3_CLASSIC',
'NETCDF3_64BIT')):
# Cast to an integer type supported by netCDF3.
if not np.can_cast(values.max(), np.int32) or \
not np.can_cast(values.min(), np.int32):
msg = 'The data type of {} {!r} is not supported by {} and' \
' its values cannot be safely cast to a supported' \
' integer type.'
msg = msg.format(src_name, src_object,
self._dataset.file_format)
raise ValueError(msg)
values = values.astype(np.int32)
return values
def _create_cf_bounds(self, coord, cf_var, cf_name):
"""
Create the associated CF-netCDF bounds variable.
Args:
* coord (:class:`iris.coords.Coord`):
A coordinate of a cube.
* cf_var:
CF-netCDF variable
* cf_name (string):
name of the CF-NetCDF variable.
Returns:
None
"""
if coord.has_bounds():
# Get the values in a form which is valid for the file format.
bounds = self._ensure_valid_dtype(coord.bounds,
'the bounds of coordinate',
coord)
n_bounds = bounds.shape[-1]
if n_bounds == 2:
bounds_dimension_name = 'bnds'
else:
bounds_dimension_name = 'bnds_%s' % n_bounds
if bounds_dimension_name not in self._dataset.dimensions:
# Create the bounds dimension with the appropriate extent.
self._dataset.createDimension(bounds_dimension_name, n_bounds)
cf_var.bounds = cf_name + '_bnds'
cf_var_bounds = self._dataset.createVariable(
cf_var.bounds, bounds.dtype.newbyteorder('='),
cf_var.dimensions + (bounds_dimension_name,))
cf_var_bounds[:] = bounds
def _get_cube_variable_name(self, cube):
"""
Returns a CF-netCDF variable name for the given cube.
Args:
* cube (class:`iris.cube.Cube`):
An instance of a cube for which a CF-netCDF variable
name is required.
Returns:
A CF-netCDF variable name as a string.
"""
if cube.var_name is not None:
cf_name = cube.var_name
else:
# Convert to lower case and replace whitespace by underscores.
cf_name = '_'.join(cube.name().lower().split())
return cf_name
def _get_coord_variable_name(self, cube, coord):
"""
Returns a CF-netCDF variable name for the given coordinate.
Args:
* cube (:class:`iris.cube.Cube`):
The cube that contains the given coordinate.
* coord (:class:`iris.coords.Coord`):
An instance of a coordinate for which a CF-netCDF variable
name is required.
Returns:
A CF-netCDF variable name as a string.
"""
if coord.var_name is not None:
cf_name = coord.var_name
else:
name = coord.standard_name or coord.long_name
if not name or set(name).intersection(string.whitespace):
# Auto-generate name based on associated dimensions.
name = ''
for dim in cube.coord_dims(coord):
name += 'dim{}'.format(dim)
# Handle scalar coordinate (dims == ()).
if not name:
name = 'unknown_scalar'
# Convert to lower case and replace whitespace by underscores.
cf_name = '_'.join(name.lower().split())
return cf_name
def _create_cf_variable(self, cube, dimension_names, coord):
"""
Create the associated CF-netCDF variable in the netCDF dataset for the
given coordinate. If required, also create the CF-netCDF bounds
variable and associated dimension.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
Names for each dimension of the cube.
* coord (:class:`iris.coords.Coord`):
The coordinate to be saved to CF-netCDF file.
Returns:
The string name of the associated CF-netCDF variable saved.
"""
cf_name = self._get_coord_variable_name(cube, coord)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# Derive the data dimension names for the coordinate.
cf_dimensions = [dimension_names[dim] for dim in
cube.coord_dims(coord)]
if np.issubdtype(coord.points.dtype, np.str):
string_dimension_depth = coord.points.dtype.itemsize
string_dimension_name = 'string%d' % string_dimension_depth
# Determine whether to create the string length dimension.
if string_dimension_name not in self._dataset.dimensions:
self._dataset.createDimension(string_dimension_name,
string_dimension_depth)
# Add the string length dimension to dimension names.
cf_dimensions.append(string_dimension_name)
# Create the label coordinate variable.
cf_var = self._dataset.createVariable(cf_name, '|S1',
cf_dimensions)
# Add the payload to the label coordinate variable.
if len(cf_dimensions) == 1:
cf_var[:] = list('%- *s' % (string_dimension_depth,
coord.points[0]))
else:
for index in np.ndindex(coord.points.shape):
index_slice = tuple(list(index) + [slice(None, None)])
cf_var[index_slice] = list('%- *s' %
(string_dimension_depth,
coord.points[index]))
else:
# Identify the collection of coordinates that represent CF-netCDF
# coordinate variables.
cf_coordinates = cube.dim_coords
if coord in cf_coordinates:
# By definition of a CF-netCDF coordinate variable this
# coordinate must be 1-D and the name of the CF-netCDF variable
# must be the same as its dimension name.
cf_name = cf_dimensions[0]
# Get the values in a form which is valid for the file format.
points = self._ensure_valid_dtype(coord.points, 'coordinate',
coord)
# Create the CF-netCDF variable.
cf_var = self._dataset.createVariable(
cf_name, points.dtype.newbyteorder('='), cf_dimensions)
# Add the axis attribute for spatio-temporal CF-netCDF coordinates.
if coord in cf_coordinates:
axis = iris.util.guess_coord_axis(coord)
if axis is not None and axis.lower() in SPATIO_TEMPORAL_AXES:
cf_var.axis = axis.upper()
# Add the data to the CF-netCDF variable.
cf_var[:] = points
# Create the associated CF-netCDF bounds variable.
self._create_cf_bounds(coord, cf_var, cf_name)
# Deal with CF-netCDF units and standard name.
standard_name, long_name, units = self._cf_coord_identity(coord)
if units != 'unknown':
cf_var.units = units
if standard_name is not None:
cf_var.standard_name = standard_name
if long_name is not None:
cf_var.long_name = long_name
# Add the CF-netCDF calendar attribute.
if coord.units.calendar:
cf_var.calendar = coord.units.calendar
# Add any other custom coordinate attributes.
for name in sorted(coord.attributes):
value = coord.attributes[name]
if name == 'STASH':
# Adopting provisional Metadata Conventions for representing MO
# Scientific Data encoded in NetCDF Format.
name = 'um_stash_source'
value = str(value)
# Don't clobber existing attributes.
if not hasattr(cf_var, name):
setattr(cf_var, name, value)
return cf_name
def _create_cf_cell_methods(self, cube, dimension_names):
"""
Create CF-netCDF string representation of a cube cell methods.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* dimension_names (list):
Names associated with the dimensions of the cube.
Returns:
CF-netCDF string representation of a cube cell methods.
"""
cell_methods = []
# Identify the collection of coordinates that represent CF-netCDF
# coordinate variables.
cf_coordinates = cube.dim_coords
for cm in cube.cell_methods:
names = ''
for name in cm.coord_names:
coord = cube.coords(name)
if coord:
coord = coord[0]
if coord in cf_coordinates:
name = dimension_names[cube.coord_dims(coord)[0]]
names += '%s: ' % name
interval = ' '.join(['interval: %s' % interval for interval in
cm.intervals or []])
comment = ' '.join(['comment: %s' % comment for comment in
cm.comments or []])
extra = ' '.join([interval, comment]).strip()
if extra:
extra = ' (%s)' % extra
cell_methods.append(names + cm.method + extra)
return ' '.join(cell_methods)
def _create_cf_grid_mapping(self, cube, cf_var_cube):
"""
Create CF-netCDF grid mapping variable and associated CF-netCDF
data variable grid mapping attribute.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
Returns:
None
"""
cs = cube.coord_system('CoordSystem')
if cs is not None:
# Grid var not yet created?
if cs not in self._coord_systems:
while cs.grid_mapping_name in self._dataset.variables:
cs.grid_mapping_name = (
self._increment_name(cs.grid_mapping_name))
cf_var_grid = self._dataset.createVariable(
cs.grid_mapping_name, np.int32)
cf_var_grid.grid_mapping_name = cs.grid_mapping_name
def add_ellipsoid(ellipsoid):
cf_var_grid.longitude_of_prime_meridian = (
ellipsoid.longitude_of_prime_meridian)
semi_major = ellipsoid.semi_major_axis
semi_minor = ellipsoid.semi_minor_axis
if semi_minor == semi_major:
cf_var_grid.earth_radius = semi_major
else:
cf_var_grid.semi_major_axis = semi_major
cf_var_grid.semi_minor_axis = semi_minor
# latlon
if isinstance(cs, iris.coord_systems.GeogCS):
add_ellipsoid(cs)
# rotated latlon
elif isinstance(cs, iris.coord_systems.RotatedGeogCS):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.grid_north_pole_latitude = (
cs.grid_north_pole_latitude)
cf_var_grid.grid_north_pole_longitude = (
cs.grid_north_pole_longitude)
cf_var_grid.north_pole_grid_longitude = (
cs.north_pole_grid_longitude)
# tmerc
elif isinstance(cs, iris.coord_systems.TransverseMercator):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.longitude_of_central_meridian = (
cs.longitude_of_central_meridian)
cf_var_grid.latitude_of_projection_origin = (
cs.latitude_of_projection_origin)
cf_var_grid.false_easting = cs.false_easting
cf_var_grid.false_northing = cs.false_northing
cf_var_grid.scale_factor_at_central_meridian = (
cs.scale_factor_at_central_meridian)
# osgb (a specific tmerc)
elif isinstance(cs, iris.coord_systems.OSGB):
warnings.warn('OSGB coordinate system not yet handled')
# other
else:
warnings.warn('Unable to represent the horizontal '
'coordinate system. The coordinate system '
'type %r is not yet implemented.' % type(cs))
self._coord_systems.append(cs)
# Refer to grid var
cf_var_cube.grid_mapping = cs.grid_mapping_name
def _create_cf_data_variable(self, cube, dimension_names, local_keys=None,
**kwargs):
"""
Create CF-netCDF data variable for the cube and any associated grid
mapping.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
String names for each dimension of the cube.
Kwargs:
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes
with matching keys will become attributes on the data variable.
All other keywords are passed through to the dataset's `createVariable`
method.
Returns:
The newly created CF-netCDF data variable.
"""
cf_name = self._get_cube_variable_name(cube)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# if netcdf3 avoid streaming due to dtype handling
if (not cube.has_lazy_data()
or self._dataset.file_format in ('NETCDF3_CLASSIC',
'NETCDF3_64BIT')):
# Determine whether there is a cube MDI value.
fill_value = None
if isinstance(cube.data, ma.core.MaskedArray):
fill_value = cube.data.fill_value
# Get the values in a form which is valid for the file format.
data = self._ensure_valid_dtype(cube.data, 'cube', cube)
# Create the cube CF-netCDF data variable with data payload.
cf_var = self._dataset.createVariable(
cf_name, data.dtype.newbyteorder('='), dimension_names,
fill_value=fill_value, **kwargs)
cf_var[:] = data
else:
# Create the cube CF-netCDF data variable.
# Explicitly assign the fill_value, which will be the type default
# in the case of an unmasked array.
cf_var = self._dataset.createVariable(
cf_name, cube.lazy_data().dtype.newbyteorder('='),
dimension_names, fill_value=cube.lazy_data().fill_value,
**kwargs)
# stream the data
biggus.save([cube.lazy_data()], [cf_var], masked=True)
if cube.standard_name:
cf_var.standard_name = cube.standard_name
if cube.long_name:
cf_var.long_name = cube.long_name
if cube.units != 'unknown':
cf_var.units = str(cube.units)
# Add data variable-only attribute names to local_keys.
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
local_keys.update(_CF_DATA_ATTRS, _UKMO_DATA_ATTRS)
# Add any cube attributes whose keys are in local_keys as
# CF-netCDF data variable attributes.
attr_names = set(cube.attributes).intersection(local_keys)
for attr_name in sorted(attr_names):
# Do not output 'conventions' attribute.
if attr_name.lower() == 'conventions':
continue
value = cube.attributes[attr_name]
if attr_name == 'STASH':
# Adopting provisional Metadata Conventions for representing MO
# Scientific Data encoded in NetCDF Format.
attr_name = 'um_stash_source'
value = str(value)
if attr_name == "ukmo__process_flags":
value = " ".join([x.replace(" ", "_") for x in value])
if attr_name in _CF_GLOBAL_ATTRS:
msg = '{attr_name!r} is being added as CF data variable ' \
'attribute, but {attr_name!r} should only be a CF ' \
'global attribute.'.format(attr_name=attr_name)
warnings.warn(msg)
setattr(cf_var, attr_name, value)
# Create the CF-netCDF data variable cell method attribute.
cell_methods = self._create_cf_cell_methods(cube, dimension_names)
if cell_methods:
cf_var.cell_methods = cell_methods
# Create the CF-netCDF grid mapping.
self._create_cf_grid_mapping(cube, cf_var)
return cf_var
def _increment_name(self, varname):
"""
Increment string name or begin increment.
Avoidance of conflicts between variable names, where the name is
incremented to distinguish it from others.
Args:
* varname (string):
Variable name to increment.
Returns:
Incremented varname.
"""
num = 0
try:
name, endnum = varname.rsplit('_', 1)
if endnum.isdigit():
num = int(endnum) + 1
varname = name
except ValueError:
pass
return '{}_{}'.format(varname, num)
def save(cube, filename, netcdf_format='NETCDF4', local_keys=None,
unlimited_dimensions=None, zlib=False, complevel=4, shuffle=True,
fletcher32=False, contiguous=False, chunksizes=None, endian='native',
least_significant_digit=None):
"""
Save cube(s) to a netCDF file, given the cube and the filename.
* Iris will write CF 1.5 compliant NetCDF files.
* The attributes dictionaries on each cube in the saved cube list
will be compared and common attributes saved as NetCDF global
attributes where appropriate.
* Keyword arguments specifying how to save the data are applied
to each cube. To use different settings for different cubes, use
the NetCDF Context manager (:class:`~Saver`) directly.
* The save process will stream the data payload to the file using biggus,
enabling large data payloads to be saved and maintaining the 'lazy'
status of the cube's data payload, unless the netcdf_format is explicitly
specified to be 'NETCDF3' or 'NETCDF3_CLASSIC'.
Args:
* cube (:class:`iris.cube.Cube` or :class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or other
iterable of cubes to be saved to a netCDF file.
* filename (string):
Name of the netCDF file to save the cube(s).
Kwargs:
* netcdf_format (string):
Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format.
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
Explicit list of coordinate names (or coordinate objects) corresponding
to coordinate dimensions of `cube` to save with the NetCDF dimension
variable length 'UNLIMITED'. By default, the outermost (first)
dimension for each cube is used. Only the 'NETCDF4' format supports
multiple 'UNLIMITED' dimensions. To save no unlimited dimensions, use
`unlimited_dimensions=[]` (an empty list).
* zlib (bool):
If `True`, the data will be compressed in the netCDF file using gzip
compression (default `False`).
* complevel (int):
An integer between 1 and 9 describing the level of compression desired
(default 4). Ignored if `zlib=False`.
* shuffle (bool):
If `True`, the HDF5 shuffle filter will be applied before compressing
the data (default `True`). This significantly improves compression.
Ignored if `zlib=False`.
* fletcher32 (bool):
If `True`, the Fletcher32 HDF5 checksum algorithm is activated to
detect errors. Default `False`.
* contiguous (bool):
If `True`, the variable data is stored contiguously on disk. Default
`False`. Setting to `True` for a variable with an unlimited dimension
will trigger an error.
* chunksizes (tuple of int):
Used to manually specify the HDF5 chunksizes for each dimension of the
variable. A detailed discussion of HDF chunking and I/O performance is
available here: http://www.hdfgroup.org/HDF5/doc/H5.user/Chunking.html.
Basically, you want the chunk size for each dimension to match as
closely as possible the size of the data block that users will read
from the file. `chunksizes` cannot be set if `contiguous=True`.
* endian (string):
Used to control whether the data is stored in little or big endian
format on disk. Possible values are 'little', 'big' or 'native'
(default). The library will automatically handle endian conversions
when the data is read, but if the data is always going to be read on a
computer with the opposite format as the one used to create the file,
there may be some performance advantage to be gained by setting the
endian-ness.
* least_significant_digit (int):
If `least_significant_digit` is specified, variable data will be
truncated (quantized). In conjunction with `zlib=True` this produces
'lossy', but significantly more efficient compression. For example, if
`least_significant_digit=1`, data will be quantized using
`numpy.around(scale*data)/scale`, where `scale = 2**bits`, and `bits`
is determined so that a precision of 0.1 is retained (in this case
`bits=4`). From
http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml:
"least_significant_digit -- power of ten of the smallest decimal place
in unpacked data that is a reliable value". Default is `None`, or no
quantization, or 'lossless' compression.
Returns:
None.
.. note::
The `zlib`, `complevel`, `shuffle`, `fletcher32`, `contiguous`,
`chunksizes` and `endian` keywords are silently ignored for netCDF 3
files that do not use HDF5.
.. seealso::
NetCDF Context manager (:class:`~Saver`).
.. deprecated:: 1.8.0
NetCDF default saving behaviour currently assigns the outermost
dimensions to unlimited. This behaviour is to be deprecated, in
favour of no automatic assignment. To switch to the new behaviour,
set `iris.FUTURE.netcdf_no_unlimited` to True.
"""
if unlimited_dimensions is None:
if iris.FUTURE.netcdf_no_unlimited:
unlimited_dimensions = []
else:
_no_unlim_dep_warning()
if isinstance(cube, iris.cube.Cube):
cubes = iris.cube.CubeList()
cubes.append(cube)
else:
cubes = cube
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
# Determine the attribute keys that are common across all cubes and
# thereby extend the collection of local_keys for attributes
# that should be attributes on data variables.
attributes = cubes[0].attributes
common_keys = set(attributes)
for cube in cubes[1:]:
keys = set(cube.attributes)
local_keys.update(keys.symmetric_difference(common_keys))
common_keys.intersection_update(keys)
different_value_keys = []
for key in common_keys:
if np.any(attributes[key] != cube.attributes[key]):
different_value_keys.append(key)
common_keys.difference_update(different_value_keys)
local_keys.update(different_value_keys)
# Initialise Manager for saving
with Saver(filename, netcdf_format) as sman:
# Iterate through the cubelist.
for cube in cubes:
sman.write(cube, local_keys, unlimited_dimensions, zlib, complevel,
shuffle, fletcher32, contiguous, chunksizes, endian,
least_significant_digit)
conventions = CF_CONVENTIONS_VERSION
# Perform a CF patch of the conventions attribute.
cf_profile_available = (iris.site_configuration.get('cf_profile') not
in [None, False])
if cf_profile_available:
conventions_patch = iris.site_configuration.get(
'cf_patch_conventions')
if conventions_patch is not None:
conventions = conventions_patch(conventions)
else:
msg = 'cf_profile is available but no {} defined.'.format(
'cf_patch_conventions')
warnings.warn(msg)
# Add conventions attribute.
sman.update_global_attributes(Conventions=conventions)
def _no_unlim_dep_warning():
msg = ('NetCDF default saving behaviour currently assigns the '
'outermost dimensions to unlimited. This behaviour is to be '
'deprecated, in favour of no automatic assignment. To switch '
'to the new behaviour, set iris.FUTURE.netcdf_no_unlimited to '
'True.')
warnings.warn(msg)
|
ghislainp/iris
|
lib/iris/fileformats/netcdf.py
|
Python
|
gpl-3.0
| 69,416
|
[
"NetCDF"
] |
bf06403d42f34a27bab8d5abfab7cfe31157c84dc82590b947e5e6e2b7d6b6a3
|
# This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2010 Brian J. Crowell <brian@fluggo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .scene import *
from ..ruler import TimeRuler
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from fluggo.media import timecode, process
class RulerView(QWidget):
'''This is a view combined with a ruler separately. It's a workaround for
a bug in whatever version of Qt is running on Ubuntu 10.04. Qt ignores the
viewport margins for determining where a drag operation has hit the scene;
it works just fine for normal mouse ops.'''
def __init__(self, uimgr, space):
QWidget.__init__(self)
self.vbox = QVBoxLayout(self)
self.vbox.setStretch(1, 1)
self.vbox.setSpacing(0)
width = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
self.vbox.setContentsMargins(width, 0, width, 0)
self.ruler = TimeRuler(self, timecode=timecode.NtscDropFrame())
self.view = View(uimgr, space, ruler=self.ruler)
self.view.setFrameShape(QFrame.NoFrame)
self.vbox.addWidget(self.ruler)
self.vbox.addWidget(self.view)
self.setLayout(self.vbox)
def __getattr__(self, name):
# Pass on to the view
return getattr(self.view, name)
class View(QGraphicsView):
black_pen = QPen(QColor.fromRgbF(0.0, 0.0, 0.0))
white_pen = QPen(QColor.fromRgbF(1.0, 1.0, 1.0))
handle_width = 10.0
snap_marker_color = QColor.fromRgbF(0.0, 1.0, 0.0)
snap_marker_width = 5.0
snap_distance = 8.0
max_zoom_x = 100000.0
min_zoom_x = 0.01
def __init__(self, uimgr, space, ruler=None):
QGraphicsView.__init__(self)
self.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.setViewportUpdateMode(self.FullViewportUpdate)
self.setResizeAnchor(self.AnchorUnderMouse)
self.setTransformationAnchor(self.AnchorUnderMouse)
self.ruler = ruler
if not self.ruler:
self.setViewportMargins(0, 30, 0, 0)
self.ruler = TimeRuler(self, timecode=timecode.NtscDropFrame())
self.ruler.move(self.frameWidth(), self.frameWidth())
self.playback_timer = None
self.uimgr = uimgr
self.uimgr.clock_state_changed.connect(self._clock_changed)
self.clock_frame = 0
self.undo_stack = QUndoStack(self)
self.white = False
self.frame = 0
self.blink_timer = self.startTimer(1000)
self.ruler.current_frame_changed.connect(self.handle_ruler_current_frame_changed)
self.setScene(Scene(space, uimgr.asset_list, self.undo_stack))
self._reset_ruler_scroll()
self.set_current_frame(0)
self.scene().sceneRectChanged.connect(self.handle_scene_rect_changed)
self.scene().marker_added.connect(self._handle_marker_changed)
self.scene().marker_removed.connect(self._handle_marker_changed)
self.scale(4 * 24, 1)
self.canvas_group = QActionGroup(self)
self.canvas_bring_forward_action = QAction('Bring Forward', self.canvas_group,
statusTip='Bring the current item(s) forward', triggered=self.canvas_bring_forward,
icon=self.style().standardIcon(QStyle.SP_ArrowUp))
self.canvas_send_backward_action = QAction('Send Backward', self.canvas_group,
statusTip='Bring the current item(s) forward', triggered=self.canvas_send_backward,
icon=self.style().standardIcon(QStyle.SP_ArrowDown))
self.top_toolbar = QToolBar(self)
for action in self.canvas_group.actions():
self.top_toolbar.addAction(action)
def get_toolbars(self):
return [self.top_toolbar]
def _clock_changed(self, speed, time, data):
if speed.numerator and self.playback_timer is None:
self.playback_timer = self.startTimer(20)
elif not speed.numerator and self.playback_timer is not None:
self.killTimer(self.playback_timer)
self.playback_timer = None
self._update_clock_frame(time)
def selected_model_items(self):
return self.scene().selected_model_items()
def load_selection(self, items):
return self.scene().load_selection(items)
def scale(self, sx, sy):
self.scale_x = fractions.Fraction(sx)
self.scale_y = fractions.Fraction(sy)
self.ruler.set_scale(sx / self.scene().frame_rate)
self.setTransform(QTransform.fromScale(float(sx), float(sy)))
self._reset_ruler_scroll()
self.scene().update_view_decorations(self)
def set_current_frame(self, frame):
'''
view.set_current_frame(frame)
Moves the view's current frame marker.
'''
self._invalidate_marker(self.frame)
self.frame = frame
self._invalidate_marker(frame)
self.ruler.set_current_frame(frame)
self.uimgr.seek(process.get_frame_time(self.scene().frame_rate, int(frame)))
def _update_clock_frame(self, time=None):
if not time:
time = self.uimgr.get_presentation_time()
frame = process.get_time_frame(self.scene().frame_rate, time)
self._set_clock_frame(frame)
def _set_clock_frame(self, frame):
'''
view._set_clock_frame(frame)
Moves the view's current clock frame marker.
'''
self._invalidate_marker(self.clock_frame)
self.clock_frame = frame
self._invalidate_marker(frame)
def resizeEvent(self, event):
self.ruler.resize(self.width() - self.frameWidth(), 30)
def wheelEvent(self, event):
if event.delta() > 0:
factor = 2 ** (event.delta() / 120)
if self.scale_x * factor > self.max_zoom_x:
return
self.scale(self.scale_x * factor, self.scale_y)
else:
factor = 2 ** (-event.delta() / 120)
if self.scale_x / factor < self.min_zoom_x:
return
self.scale(self.scale_x / factor, self.scale_y)
def handle_scene_rect_changed(self, rect):
self._reset_ruler_scroll()
def handle_ruler_current_frame_changed(self, frame):
self.set_current_frame(frame)
def updateSceneRect(self, rect):
QGraphicsView.updateSceneRect(self, rect)
self._reset_ruler_scroll()
def scrollContentsBy(self, dx, dy):
QGraphicsView.scrollContentsBy(self, dx, dy)
if dx and self.scene():
self._reset_ruler_scroll()
def _reset_ruler_scroll(self):
left = self.mapToScene(0, 0).x() * float(self.scene().frame_rate)
self.ruler.set_left_frame(left)
def _invalidate_marker(self, frame):
# BJC: No, for some reason, invalidateScene() did not work here
top = self.mapFromScene(frame / float(self.scene().frame_rate), self.scene().scene_top)
bottom = self.mapFromScene(frame / float(self.scene().frame_rate), self.scene().scene_bottom)
top = self.mapToScene(top.x() - 1, top.y())
bottom = self.mapToScene(bottom.x() + 1, bottom.y())
self.updateScene([QRectF(top, bottom)])
def timerEvent(self, event):
if event.timerId() == self.blink_timer:
self.white = not self.white
self._invalidate_marker(self.frame)
elif event.timerId() == self.playback_timer:
self._update_clock_frame()
def drawForeground(self, painter, rect):
'''
Draws the marker in the foreground.
'''
QGraphicsView.drawForeground(self, painter, rect)
# Clock frame line
x = self.clock_frame / float(self.scene().frame_rate)
painter.setPen(self.black_pen)
painter.drawLine(QPointF(x, rect.y()), QPointF(x, rect.y() + rect.height()))
# Current frame line, which blinks
x = self.frame / float(self.scene().frame_rate)
painter.setPen(self.white_pen if self.white else self.black_pen)
painter.drawLine(QPointF(x, rect.y()), QPointF(x, rect.y() + rect.height()))
for marker in self.scene().markers:
marker.paint(self, painter, rect)
def _handle_marker_changed(self, marker):
rect = self.viewportTransform().inverted()[0].mapRect(marker.bounding_rect(self))
self.updateScene([rect])
def find_snap_items_horizontal(self, item, time):
'''
Find the nearest horizontal snap point for the given item and time. (The
item is only used to avoid finding it as its own snap point.)
'''
top = self.mapFromScene(time, self.scene().scene_top)
bottom = self.mapFromScene(time, self.scene().scene_bottom)
items = self.items(QRect(top.x() - self.snap_distance, top.y(), self.snap_distance * 2, bottom.y() - top.y()), Qt.IntersectsItemBoundingRect)
# TODO: Find something more generic than video items
items = [a for a in items if isinstance(a, ClipItem) and a is not item]
# Transform the snap_distance into time units
distance = self.viewportTransform().inverted()[0].mapRect(QRectF(0.0, 0.0, self.snap_distance, 1.0)).width()
x = None
#if distance < 1.0:
# distance = 1.0
for item in items:
if abs(item.item.x / item.units_per_second - time) < distance:
x = item.item.x / item.units_per_second
distance = abs(x - time)
if abs((item.item.x + item.item.length) / item.units_per_second - time) < distance:
x = (item.item.x + item.item.length) / item.units_per_second
distance = abs(x - time)
return x
def canvas_bring_forward(self):
items = list(self.view.selected_model_items())
command = None
if len(items) == 0:
return
if len(items) == 1:
# Gosh I hope the active stack is the right one
command = model.BringItemForwardCommand(items[0])
self.undo_group.activeStack().push(command)
self.view.load_selection(items)
return
command = CompoundCommand('Bring items forward',
[model.BringItemForwardCommand(item) for item in items])
self.undo_group.activeStack().push(command)
self.view.load_selection(items)
def canvas_send_backward(self):
items = list(self.view.selected_model_items())
command = None
if len(items) == 0:
return
if len(items) == 1:
# Gosh I hope the active stack is the right one
command = model.SendItemBackCommand(items[0])
self.undo_group.activeStack().push(command)
self.view.load_selection(items)
return
command = CompoundCommand('Send items back',
[model.SendItemBackCommand(item) for item in items])
self.undo_group.activeStack().push(command)
self.view.load_selection(items)
|
fluggo/Canvas
|
fluggo/editor/ui/canvas/view.py
|
Python
|
gpl-3.0
| 11,571
|
[
"Brian"
] |
f64c3dcf87446568cd76992c9777f8811daf395655c7e1c033942b24c8ec0dea
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provide view augmentations for the dataset."""
import copy
import functools
from typing import Optional, Tuple
import tensorflow as tf
from brave.datasets import datasets
DEFAULT_RANDOM_CONVOLVE_MAX_K = 11
def normalize_video(view: datasets.View) -> datasets.View:
"""Return view with video normalized to range [0, 1]."""
result = copy.copy(view)
result.video = view.video * (1.0 / 255.0)
return result
def random_color_augment_video(view: datasets.View, *,
prob_color_augment: float,
prob_color_drop: float) -> datasets.View:
"""Apply random color augmentations to the video in a view."""
video = _color_default_augm(
view.video,
zero_centering_image=False,
prob_color_augment=prob_color_augment,
prob_color_drop=prob_color_drop)
result = copy.copy(view)
result.video = video
return result
def random_gaussian_blur_video(
view: datasets.View, *, kernel_size: int,
sigma_range: Tuple[float, float]) -> datasets.View:
"""Apply a gaussian blur with a random sigma value in the range sigma_range.
Args:
view: The input view to augment.
kernel_size: The kernel size of the blur kernel.
sigma_range: A random value in this range is chosen as the sigma value for
the gaussian blur.
Returns:
A new view where the video has a guassian gaussian blur applied.
"""
sigma = tf.random.uniform((),
sigma_range[0],
sigma_range[1],
dtype=tf.float32)
def blur(img):
return _gaussian_blur(img, kernel_size=kernel_size, sigma=sigma)
result = copy.copy(view)
result.video = tf.map_fn(blur, view.video, fn_output_signature=tf.float32)
return result
def random_horizontal_flip_video(view: datasets.View) -> datasets.View:
"""Randomly flip all frames within a video."""
flip = tf.random.uniform((), minval=0, maxval=2, dtype=tf.int32)
video = tf.cond(
pred=tf.equal(flip, 1),
true_fn=lambda: tf.image.flip_left_right(view.video),
false_fn=lambda: view.video)
result = copy.copy(view)
result.video = video
return result
def random_convolve_video(view: datasets.View,
*,
max_k=DEFAULT_RANDOM_CONVOLVE_MAX_K) -> datasets.View:
"""Apply a random convolution to the input view's video."""
video = _random_convolve(view.video, max_k=max_k)
result = copy.copy(view)
result.video = video
return result
def _gaussian_blur(image: tf.Tensor,
kernel_size: int,
sigma: float,
padding='SAME'):
"""Blurs the given image with separable convolution.
Args:
image: Tensor of shape [height, width, channels] and dtype float to blur.
kernel_size: Integer Tensor for the size of the blur kernel. This is should
be an odd number. If it is an even number, the actual kernel size will be
size + 1.
sigma: Sigma value for gaussian operator.
padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'.
Returns:
A Tensor representing the blurred image.
"""
radius = tf.cast(kernel_size // 2, tf.int32)
kernel_size = radius * 2 + 1
x = tf.cast(tf.range(-radius, radius + 1), tf.float32)
blur_filter = tf.exp(-tf.pow(x, 2.0) /
(2.0 * tf.pow(tf.cast(sigma, tf.float32), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def _color_default_augm(frames: tf.Tensor,
zero_centering_image: bool = False,
prob_color_augment: float = 0.8,
prob_color_drop: float = 0.0,
seed: Optional[int] = None):
"""Standard color augmentation for videos.
Args:
frames: A float32 tensor of shape [timesteps, input_h, input_w, channels].
zero_centering_image: If `True`, results are in [-1, 1], if `False`, results
are in [0, 1].
prob_color_augment: Probability of applying color augmentation.
prob_color_drop: Probability of droping the colors to gray scale.
seed: A seed to use for the random sampling.
Returns:
A tensor of same shape as the input with color eventually altered.
"""
def color_augment(video: tf.Tensor) -> tf.Tensor:
"""Do standard color augmentations."""
# Note the same augmentation will be applied to all frames of the video.
if zero_centering_image:
video = 0.5 * (video + 1.0)
video = tf.image.random_brightness(video, max_delta=32. / 255.)
video = tf.image.random_saturation(video, lower=0.6, upper=1.4)
video = tf.image.random_contrast(video, lower=0.6, upper=1.4)
video = tf.image.random_hue(video, max_delta=0.2)
video = tf.clip_by_value(video, 0.0, 1.0)
if zero_centering_image:
video = 2 * (video - 0.5)
return video
def color_drop(video: tf.Tensor) -> tf.Tensor:
"""Do color drop."""
video = tf.image.rgb_to_grayscale(video)
video = tf.tile(video, [1, 1, 1, 3])
return video
should_color_augment = tf.random.uniform([],
minval=0,
maxval=1,
dtype=tf.float32,
seed=seed)
frames = tf.cond(
pred=tf.less(should_color_augment, tf.cast(prob_color_augment,
tf.float32)),
true_fn=lambda: color_augment(frames),
false_fn=lambda: frames)
should_color_drop = tf.random.uniform([],
minval=0,
maxval=1,
dtype=tf.float32,
seed=seed)
frames = tf.cond(
pred=tf.less(should_color_drop, tf.cast(prob_color_drop, tf.float32)),
true_fn=lambda: color_drop(frames),
false_fn=lambda: frames)
return frames
def _random_convolve(x: tf.Tensor, max_k: int, init='he') -> tf.Tensor:
"""Applies a random convolution of random odd kernel size <= max_k."""
if init == 'he':
he_normal_init = tf.initializers.he_normal
w_init = he_normal_init()
else:
raise NotImplementedError(f'Unknown init: {init} for RandConv.')
_, _, _, ch = x.get_shape().as_list()
# Prepare the switch case operation, depending on the dynamically sampled k.
values_k = range(1, max_k + 1, 2)
nb_values_k = len(values_k)
random_conv_fns = {}
def apply_conv2d_fn(x, k, ch, w_init):
k_h, k_w, k_ic, k_oc = k, k, ch, ch
w_shape = [k_h, k_w, k_ic, k_oc]
strides = 1
w = w_init(w_shape)
return tf.nn.conv2d(x, w, strides, 'SAME', name='random_conv')
for ind_k in range(nb_values_k):
k = 2 * ind_k + 1
apply_conv_k_fn = functools.partial(apply_conv2d_fn, x, k, ch, w_init)
random_conv_fns[ind_k] = apply_conv_k_fn
# Sample k uniformly in 1:max_k:2.
ind_k = tf.cast(tf.floor(tf.random.uniform([], maxval=nb_values_k)), tf.int32)
x = tf.switch_case(ind_k, random_conv_fns, name='sample_random_conv')
return x
|
deepmind/brave
|
brave/datasets/augmentations.py
|
Python
|
apache-2.0
| 8,591
|
[
"Gaussian"
] |
9dbda54fa56d0542ccaa4927809984436221c240116a86203e736dff961d4a76
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import zeroinstall
import os
from zeroinstall.support import tasks
from zeroinstall.injector.model import Interface, Feed, stable, testing, developer, stability_levels
from zeroinstall.injector import writer, namespaces, gpg
from zeroinstall.gtkui import help_box
import gtk
from logging import warn
from dialog import DialogResponse, Template
from impl_list import ImplementationList
import time
import dialog
_dialogs = {} # Interface -> Properties
def enumerate(items):
x = 0
for i in items:
yield x, i
x += 1
def format_para(para):
lines = [l.strip() for l in para.split('\n')]
return ' '.join(lines)
def have_source_for(policy, interface):
iface_cache = policy.config.iface_cache
# Note: we don't want to actually fetch the source interfaces at
# this point, so we check whether:
# - We have a feed of type 'src' (not fetched), or
# - We have a source implementation in a regular feed
for f in iface_cache.get_feed_imports(interface):
if f.machine == 'src':
return True
# Don't have any src feeds. Do we have a source implementation
# as part of a regular feed?
for x in iface_cache.get_implementations(interface):
if x.machine == 'src':
return True
return False
class Description:
def __init__(self, widgets):
description = widgets.get_widget('description')
description.connect('button-press-event', self.button_press)
self.buffer = description.get_buffer()
self.heading_style = self.buffer.create_tag(underline = True, scale = 1.2)
self.link_style = self.buffer.create_tag(underline = True, foreground = 'blue')
description.set_size_request(-1, 100)
def button_press(self, tv, bev):
if bev.type == gtk.gdk.BUTTON_PRESS and bev.button == 1:
x, y = tv.window_to_buffer_coords(tv.get_window_type(bev.window),
int(bev.x), int(bev.y))
itr = tv.get_iter_at_location(x, y)
if itr and self.link_style in itr.get_tags():
if not itr.begins_tag(self.link_style):
itr.backward_to_tag_toggle(self.link_style)
end = itr.copy()
end.forward_to_tag_toggle(self.link_style)
target = itr.get_text(end).strip()
import browser
browser.open_in_browser(target)
def strtime(self, secs):
try:
from locale import nl_langinfo, D_T_FMT
return time.strftime(nl_langinfo(D_T_FMT), time.localtime(secs))
except (ImportError, ValueError):
return time.ctime(secs)
def set_details(self, iface_cache, feed):
buffer = self.buffer
heading_style = self.heading_style
buffer.delete(buffer.get_start_iter(), buffer.get_end_iter())
iter = buffer.get_start_iter()
if feed is None:
buffer.insert(iter, 'Not yet downloaded.')
return
if isinstance(feed, Exception):
buffer.insert(iter, unicode(feed))
return
buffer.insert_with_tags(iter,
'%s ' % feed.get_name(), heading_style)
buffer.insert(iter, '(%s)' % feed.summary)
buffer.insert(iter, '\n%s\n' % feed.url)
# (converts to local time)
if feed.last_modified:
buffer.insert(iter, '\n' + _('Last upstream change: %s') % self.strtime(feed.last_modified))
if feed.last_checked:
buffer.insert(iter, '\n' + _('Last checked: %s') % self.strtime(feed.last_checked))
last_check_attempt = iface_cache.get_last_check_attempt(feed.url)
if last_check_attempt:
if feed.last_checked and feed.last_checked >= last_check_attempt:
pass # Don't bother reporting successful attempts
else:
buffer.insert(iter, '\n' + _('Last check attempt: %s (failed or in progress)') %
self.strtime(last_check_attempt))
buffer.insert_with_tags(iter, '\n\n' + _('Description') + '\n', heading_style)
paragraphs = [format_para(p) for p in (feed.description or "-").split('\n\n')]
buffer.insert(iter, '\n\n'.join(paragraphs))
buffer.insert(iter, '\n')
need_gap = True
for x in feed.get_metadata(namespaces.XMLNS_IFACE, 'homepage'):
if need_gap:
buffer.insert(iter, '\n')
need_gap = False
buffer.insert(iter, _('Homepage: '))
buffer.insert_with_tags(iter, '%s\n' % x.content, self.link_style)
buffer.insert_with_tags(iter, '\n' + _('Signatures') + '\n', heading_style)
sigs = iface_cache.get_cached_signatures(feed.url)
if sigs:
for sig in sigs:
if isinstance(sig, gpg.ValidSig):
name = _('<unknown>')
details = sig.get_details()
for item in details:
if item[0] == 'uid' and len(item) > 9:
name = item[9]
break
buffer.insert_with_tags(iter, _('Valid signature by "%(name)s"\n- Dated: %(sig_date)s\n- Fingerprint: %(sig_fingerprint)s\n') %
{'name': name, 'sig_date': time.strftime('%c', time.localtime(sig.get_timestamp())), 'sig_fingerprint': sig.fingerprint})
if not sig.is_trusted():
if os.path.isabs(feed.url):
buffer.insert_with_tags(iter, _('WARNING: This key is not in the trusted list') + '\n')
else:
buffer.insert_with_tags(iter, _('WARNING: This key is not in the trusted list (either you removed it, or '
'you trust one of the other signatures)') + '\n')
else:
buffer.insert_with_tags(iter, '%s\n' % sig)
else:
buffer.insert_with_tags(iter, _('No signature information (old style feed or out-of-date cache)') + '\n')
class Feeds:
URI = 0
ARCH = 1
USED = 2
def __init__(self, policy, interface, widgets):
self.policy = policy
self.interface = interface
self.model = gtk.ListStore(str, str, bool)
self.description = Description(widgets)
self.lines = self.build_model()
for line in self.lines:
self.model.append(line)
add_remote_feed_button = widgets.get_widget('add_remote_feed')
add_remote_feed_button.connect('clicked', lambda b: add_remote_feed(policy, widgets.get_widget(), interface))
add_local_feed_button = widgets.get_widget('add_local_feed')
add_local_feed_button.connect('clicked', lambda b: add_local_feed(policy, interface))
self.remove_feed_button = widgets.get_widget('remove_feed')
def remove_feed(button):
model, iter = self.tv.get_selection().get_selected()
feed_uri = model[iter][Feeds.URI]
for x in interface.extra_feeds:
if x.uri == feed_uri:
if x.user_override:
interface.extra_feeds.remove(x)
writer.save_interface(interface)
import main
main.recalculate()
return
else:
dialog.alert(self.get_toplevel(),
_("Can't remove '%s' as you didn't add it.") % feed_uri)
return
raise Exception(_("Missing feed '%s'!") % feed_uri)
self.remove_feed_button.connect('clicked', remove_feed)
self.tv = widgets.get_widget('feeds_list')
self.tv.set_model(self.model)
text = gtk.CellRendererText()
self.tv.append_column(gtk.TreeViewColumn(_('Source'), text, text = Feeds.URI, sensitive = Feeds.USED))
self.tv.append_column(gtk.TreeViewColumn(_('Arch'), text, text = Feeds.ARCH, sensitive = Feeds.USED))
sel = self.tv.get_selection()
sel.set_mode(gtk.SELECTION_BROWSE)
sel.connect('changed', self.sel_changed)
sel.select_path((0,))
def build_model(self):
iface_cache = self.policy.config.iface_cache
usable_feeds = frozenset(self.policy.usable_feeds(self.interface))
unusable_feeds = frozenset(iface_cache.get_feed_imports(self.interface)) - usable_feeds
out = [[self.interface.uri, None, True]]
for feed in usable_feeds:
out.append([feed.uri, feed.arch, True])
for feed in unusable_feeds:
out.append([feed.uri, feed.arch, False])
return out
def sel_changed(self, sel):
iface_cache = self.policy.config.iface_cache
model, miter = sel.get_selected()
if not miter: return # build in progress
feed_url = model[miter][Feeds.URI]
# Only enable removing user_override feeds
enable_remove = False
for x in self.interface.extra_feeds:
if x.uri == feed_url:
if x.user_override:
enable_remove = True
self.remove_feed_button.set_sensitive( enable_remove )
try:
self.description.set_details(iface_cache, iface_cache.get_feed(feed_url))
except zeroinstall.SafeException, ex:
self.description.set_details(iface_cache, ex)
def updated(self):
new_lines = self.build_model()
if new_lines != self.lines:
self.lines = new_lines
self.model.clear()
for line in self.lines:
self.model.append(line)
self.tv.get_selection().select_path((0,))
else:
self.sel_changed(self.tv.get_selection())
class Properties:
interface = None
use_list = None
window = None
policy = None
def __init__(self, policy, interface, compile, show_versions = False):
self.policy = policy
widgets = Template('interface_properties')
self.interface = interface
window = widgets.get_widget('interface_properties')
self.window = window
window.set_title(_('Properties for %s') % interface.get_name())
window.set_default_size(-1, gtk.gdk.screen_height() / 3)
self.compile_button = widgets.get_widget('compile')
self.compile_button.connect('clicked', lambda b: compile(interface))
window.set_default_response(gtk.RESPONSE_CANCEL)
def response(dialog, resp):
if resp == gtk.RESPONSE_CANCEL:
window.destroy()
elif resp == gtk.RESPONSE_HELP:
properties_help.display()
window.connect('response', response)
notebook = widgets.get_widget('interface_notebook')
assert notebook
feeds = Feeds(policy, interface, widgets)
stability = widgets.get_widget('preferred_stability')
stability.set_active(0)
if interface.stability_policy:
i = [stable, testing, developer].index(interface.stability_policy)
i += 1
if i == 0:
warn(_("Unknown stability policy %s"), interface.stability_policy)
else:
i = 0
stability.set_active(i)
def set_stability_policy(combo, stability = stability): # (pygtk bug?)
i = stability.get_active()
if i == 0:
new_stability = None
else:
name = ['stable', 'testing', 'developer'][i-1]
new_stability = stability_levels[name]
interface.set_stability_policy(new_stability)
writer.save_interface(interface)
import main
main.recalculate()
stability.connect('changed', set_stability_policy)
self.use_list = ImplementationList(policy, interface, widgets)
self.update_list()
feeds.tv.grab_focus()
def updated():
self.update_list()
feeds.updated()
self.shade_compile()
window.connect('destroy', lambda s: policy.watchers.remove(updated))
policy.watchers.append(updated)
self.shade_compile()
if show_versions:
notebook.next_page()
def show(self):
self.window.show()
def destroy(self):
self.window.destroy()
def shade_compile(self):
self.compile_button.set_sensitive(have_source_for(self.policy, self.interface))
def update_list(self):
ranked_items = self.policy.solver.details.get(self.interface, None)
if ranked_items is None:
# The Solver didn't get this far, but we should still display them!
ranked_items = [(impl, _("(solve aborted before here)"))
for impl in self.interface.implementations.values()]
# Always sort by version
ranked_items.sort()
self.use_list.set_items(ranked_items)
@tasks.async
def add_remote_feed(policy, parent, interface):
try:
iface_cache = policy.config.iface_cache
d = gtk.MessageDialog(parent, 0, gtk.MESSAGE_QUESTION, gtk.BUTTONS_CANCEL,
_('Enter the URL of the new source of implementations of this interface:'))
d.add_button(gtk.STOCK_ADD, gtk.RESPONSE_OK)
d.set_default_response(gtk.RESPONSE_OK)
entry = gtk.Entry()
align = gtk.VBox(False, 0)
align.set_border_width(4)
align.add(entry)
d.vbox.pack_start(align)
entry.set_activates_default(True)
entry.set_text('')
d.vbox.show_all()
error_label = gtk.Label('')
error_label.set_padding(4, 4)
align.pack_start(error_label)
d.show()
def error(message):
if message:
error_label.set_text(message)
error_label.show()
else:
error_label.hide()
while True:
got_response = DialogResponse(d)
yield got_response
tasks.check(got_response)
resp = got_response.response
error(None)
if resp == gtk.RESPONSE_OK:
try:
url = entry.get_text()
if not url:
raise zeroinstall.SafeException(_('Enter a URL'))
fetch = policy.fetcher.download_and_import_feed(url, iface_cache)
if fetch:
d.set_sensitive(False)
yield fetch
d.set_sensitive(True)
tasks.check(fetch)
iface = iface_cache.get_interface(url)
d.set_sensitive(True)
if not iface.name:
error(_('Failed to read interface'))
return
if not iface.feed_for:
error(_("Feed '%(feed)s' is not a feed for '%(feed_for)s'.") % {'feed': iface.get_name(), 'feed_for': interface.get_name()})
elif interface.uri not in iface.feed_for:
error(_("This is not a feed for '%(uri)s'.\nOnly for:\n%(feed_for)s") %
{'uri': interface.uri, 'feed_for': '\n'.join(iface.feed_for)})
elif iface.uri in [f.uri for f in interface.extra_feeds]:
error(_("Feed from '%s' has already been added!") % iface.uri)
else:
interface.extra_feeds.append(Feed(iface.uri, arch = None, user_override = True))
writer.save_interface(interface)
d.destroy()
import main
main.recalculate()
except zeroinstall.SafeException, ex:
error(str(ex))
else:
d.destroy()
return
except Exception, ex:
import traceback
traceback.print_exc()
policy.handler.report_error(ex)
def add_local_feed(policy, interface):
chooser = gtk.FileChooserDialog(_('Select XML feed file'), action=gtk.FILE_CHOOSER_ACTION_OPEN, buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
def ok(feed):
from zeroinstall.injector import reader
try:
feed_targets = policy.get_feed_targets(feed)
if interface not in feed_targets:
raise Exception(_("Not a valid feed for '%(uri)s'; this is a feed for:\n%(feed_for)s") %
{'uri': interface.uri,
'feed_for': '\n'.join([f.uri for f in feed_targets])})
if feed in [f.uri for f in interface.extra_feeds]:
dialog.alert(None, _('This feed is already registered.'))
else:
interface.extra_feeds.append(Feed(feed, user_override = True, arch = None))
writer.save_interface(interface)
chooser.destroy()
reader.update_from_cache(interface)
import main
main.recalculate()
except Exception, ex:
dialog.alert(None, _("Error in feed file '%(feed)s':\n\n%(exception)s") % {'feed': feed, 'exception': str(ex)})
def check_response(widget, response):
if response == gtk.RESPONSE_OK:
ok(widget.get_filename())
elif response == gtk.RESPONSE_CANCEL:
widget.destroy()
chooser.connect('response', check_response)
chooser.show()
def edit(policy, interface, compile, show_versions = False):
assert isinstance(interface, Interface)
if interface in _dialogs:
_dialogs[interface].destroy()
_dialogs[interface] = Properties(policy, interface, compile, show_versions = show_versions)
_dialogs[interface].show()
properties_help = help_box.HelpBox(_("Injector Properties Help"),
(_('Interface properties'), '\n' +
_("""This window displays information about an interface. There are two tabs at the top: \
Feeds shows the places where the injector looks for implementations of the interface, while \
Versions shows the list of implementations found (from all feeds) in order of preference.""")),
(_('The Feeds tab'), '\n' +
_("""At the top is a list of feeds. By default, the injector uses the full name of the interface \
as the default feed location (so if you ask it to run the program "http://foo/bar.xml" then it will \
by default get the list of versions by downloading "http://foo/bar.xml".
You can add and remove feeds using the buttons on the right. The main feed may also add \
some extra feeds itself. If you've checked out a developer version of a program, you can use \
the 'Add Local Feed...' button to let the injector know about it, for example.
Below the list of feeds is a box describing the selected one:
- At the top is its short name.
- Below that is the address (a URL or filename).
- 'Last upstream change' shows the version of the cached copy of the interface file.
- 'Last checked' is the last time a fresh copy of the upstream interface file was \
downloaded.
- Then there is a longer description of the interface.""")),
(_('The Versions tab'), '\n' +
_("""This tab shows a list of all known implementations of the interface, from all the feeds. \
The columns have the following meanings:
Version gives the version number. High-numbered versions are considered to be \
better than low-numbered ones.
Released gives the date this entry was added to the feed.
Stability is 'stable' if the implementation is believed to be stable, 'buggy' if \
it is known to contain serious bugs, and 'testing' if its stability is not yet \
known. This information is normally supplied and updated by the author of the \
software, but you can override their rating by right-clicking here (overridden \
values are shown in upper-case). You can also use the special level 'preferred'.
Fetch indicates how much data needs to be downloaded to get this version if you don't \
have it. If the implementation has already been downloaded to your computer, \
it will say (cached). (local) means that you installed this version manually and \
told Zero Install about it by adding a feed. (package) means that this version \
is provided by your distribution's package manager, not by Zero Install. \
In off-line mode, only cached implementations are considered for use.
Arch indicates what kind of computer system the implementation is for, or 'any' \
if it works with all types of system.""") + '\n'),
(_('Sort order'), '\n' +
_("""The implementations are ordered by version number (highest first), with the \
currently selected one in bold. This is the "best" usable version.
Unusable ones are those for incompatible \
architectures, those marked as 'buggy' or 'insecure', versions explicitly marked as incompatible with \
another interface you are using and, in off-line mode, uncached implementations. Unusable \
implementations are shown crossed out.
For the usable implementations, the order is as follows:
- Preferred implementations come first.
- Then, if network use is set to 'Minimal', cached implementations come before \
non-cached.
- Then, implementations at or above the selected stability level come before all others.
- Then, higher-numbered versions come before low-numbered ones.
- Then cached come before non-cached (for 'Full' network use mode).""") + '\n'),
(_('Compiling'), '\n' +
_("""If there is no binary available for your system then you may be able to compile one from \
source by clicking on the Compile button. If no source is available, the Compile button will \
be shown shaded.""") + '\n'))
|
pombredanne/zero-install
|
zeroinstall/0launch-gui/properties.py
|
Python
|
lgpl-2.1
| 18,674
|
[
"VisIt"
] |
5a7eea2406467b02da416444763fa734a8ad2aa04b41a3d57d45403af954e207
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements equivalents of the basic ComputedEntry objects, which
is the basic entity that can be used to perform many analyses. ComputedEntries
contain calculated information, typically from VASP or other electronic
structure codes. For example, ComputedEntries can be used as inputs for phase
diagram analysis.
"""
import abc
import json
import os
import warnings
from itertools import combinations
from typing import List
import numpy as np
from monty.json import MontyDecoder, MontyEncoder, MSONable
from scipy.interpolate import interp1d
from uncertainties import ufloat
from pymatgen.core.composition import Composition
from pymatgen.core.structure import Structure
from pymatgen.entries import Entry
__author__ = "Ryan Kingsbury, Matt McDermott, Shyue Ping Ong, Anubhav Jain"
__copyright__ = "Copyright 2011-2020, The Materials Project"
__version__ = "1.1"
__date__ = "April 2020"
with open(os.path.join(os.path.dirname(__file__), "data/g_els.json")) as f:
G_ELEMS = json.load(f)
with open(os.path.join(os.path.dirname(__file__), "data/nist_gas_gf.json")) as f:
G_GASES = json.load(f)
class EnergyAdjustment(MSONable):
"""
Lightweight class to contain information about an energy adjustment or
energy correction.
"""
def __init__(
self,
value,
uncertainty=np.nan,
name="Manual adjustment",
cls=None,
description="",
):
"""
Args:
value: float, value of the energy adjustment in eV
uncertainty: float, uncertainty of the energy adjustment in eV. Default: np.nan
name: str, human-readable name of the energy adjustment.
(Default: Manual adjustment)
cls: dict, Serialized Compatibility class used to generate the energy adjustment. (Default: None)
description: str, human-readable explanation of the energy adjustment.
"""
self.name = name
self.cls = cls if cls else {}
self.description = description
self._value = value
self._uncertainty = uncertainty
@property
def value(self):
"""
Return the value of the energy correction in eV.
"""
return self._value
@property
def uncertainty(self):
"""
Return the uncertainty in the value of the energy adjustment in eV
"""
return self._uncertainty
@abc.abstractmethod
def normalize(self, factor):
"""
Scale the value of the current energy adjustment by factor in-place.
This method is utilized in ComputedEntry.normalize() to scale the energies to a formula unit basis
(e.g. E_Fe6O9 = 3 x E_Fe2O3).
"""
@property
@abc.abstractmethod
def explain(self):
"""
Return an explanaion of how the energy adjustment is calculated.
"""
def __repr__(self):
output = [
"{}:".format(self.__class__.__name__),
" Name: {}".format(self.name),
" Value: {:.3f} eV".format(self.value),
" Uncertainty: {:.3f} eV".format(self.uncertainty),
" Description: {}".format(self.explain),
" Generated by: {}".format(self.cls.get("@class", None)),
]
return "\n".join(output)
class ConstantEnergyAdjustment(EnergyAdjustment):
"""
A constant energy adjustment applied to a ComputedEntry. Useful in energy referencing
schemes such as the Aqueous energy referencing scheme.
"""
def __init__(
self,
value,
uncertainty=np.nan,
name="Constant energy adjustment",
cls=None,
description="Constant energy adjustment",
):
"""
Args:
value: float, value of the energy adjustment in eV
uncertainty: float, uncertaint of the energy adjustment in eV. (Default: np.nan)
name: str, human-readable name of the energy adjustment.
(Default: Constant energy adjustment)
cls: dict, Serialized Compatibility class used to generate the energy
adjustment. (Default: None)
description: str, human-readable explanation of the energy adjustment.
"""
super().__init__(value, uncertainty, name=name, cls=cls, description=description)
self._value = value
self._uncertainty = uncertainty
@property
def explain(self):
"""
Return an explanaion of how the energy adjustment is calculated.
"""
return self.description + " ({:.3f} eV)".format(self.value)
def normalize(self, factor):
"""
Normalize energy adjustment (in place), dividing value/uncertainty by a
factor.
:param factor: factor to divide by
"""
self._value /= factor
self._uncertainty /= factor
class ManualEnergyAdjustment(ConstantEnergyAdjustment):
"""
A manual energy adjustment applied to a ComputedEntry.
"""
def __init__(self, value):
"""
Args:
value: float, value of the energy adjustment in eV
"""
name = "Manual energy adjustment"
description = "Manual energy adjustment"
super().__init__(value, name=name, cls=None, description=description)
class CompositionEnergyAdjustment(EnergyAdjustment):
"""
An energy adjustment applied to a ComputedEntry based on the atomic composition.
Used in various DFT energy correction schemes.
"""
def __init__(
self,
adj_per_atom,
n_atoms,
uncertainty_per_atom=np.nan,
name="",
cls=None,
description="Composition-based energy adjustment",
):
"""
Args:
adj_per_atom: float, energy adjustment to apply per atom, in eV/atom
n_atoms: float or int, number of atoms.
uncertainty_per_atom: float, uncertainty in energy adjustment to apply per atom, in eV/atom.
(Default: np.nan)
name: str, human-readable name of the energy adjustment.
(Default: "")
cls: dict, Serialized Compatibility class used to generate the energy
adjustment. (Default: None)
description: str, human-readable explanation of the energy adjustment.
"""
self._adj_per_atom = adj_per_atom
self.uncertainty_per_atom = uncertainty_per_atom
self.n_atoms = n_atoms
self.cls = cls if cls else {}
self.name = name
self.description = description
@property
def value(self):
"""
Return the value of the energy adjustment in eV.
"""
return self._adj_per_atom * self.n_atoms
@property
def uncertainty(self):
"""
Return the value of the energy adjustment in eV.
"""
return self.uncertainty_per_atom * self.n_atoms
@property
def explain(self):
"""
Return an explanaion of how the energy adjustment is calculated.
"""
return self.description + " ({:.3f} eV/atom x {} atoms)".format(self._adj_per_atom, self.n_atoms)
def normalize(self, factor):
"""
Normalize energy adjustment (in place), dividing value/uncertainty by a
factor.
:param factor: factor to divide by
"""
self.n_atoms /= factor
class TemperatureEnergyAdjustment(EnergyAdjustment):
"""
An energy adjustment applied to a ComputedEntry based on the temperature.
Used, for example, to add entropy to DFT energies.
"""
def __init__(
self,
adj_per_deg,
temp,
n_atoms,
uncertainty_per_deg=np.nan,
name="",
cls=None,
description="Temperature-based energy adjustment",
):
"""
Args:
adj_per_deg: float, energy adjustment to apply per degree K, in eV/atom
temp: float, temperature in Kelvin
n_atoms: float or int, number of atoms
uncertainty_per_deg: float, uncertainty in energy adjustment to apply per degree K,
in eV/atom. (Default: np.nan)
name: str, human-readable name of the energy adjustment.
(Default: "")
cls: dict, Serialized Compatibility class used to generate the energy
adjustment. (Default: None)
description: str, human-readable explanation of the energy adjustment.
"""
self._adj_per_deg = adj_per_deg
self.uncertainty_per_deg = uncertainty_per_deg
self.temp = temp
self.n_atoms = n_atoms
self.name = name
self.cls = cls if cls else {}
self.description = description
@property
def value(self):
"""
Return the value of the energy correction in eV.
"""
return self._adj_per_deg * self.temp * self.n_atoms
@property
def uncertainty(self):
"""
Return the value of the energy adjustment in eV.
"""
return self.uncertainty_per_deg * self.temp * self.n_atoms
@property
def explain(self):
"""
Return an explanaion of how the energy adjustment is calculated.
"""
return self.description + " ({:.4f} eV/K/atom x {} K x {} atoms)".format(
self._adj_per_deg, self.temp, self.n_atoms
)
def normalize(self, factor):
"""
Normalize energy adjustment (in place), dividing value/uncertainty by a
factor.
:param factor: factor to divide by
"""
self.n_atoms /= factor
class ComputedEntry(Entry):
"""
Lightweight Entry object for computed data. Contains facilities
for applying corrections to the .energy attribute and for storing
calculation parameters.
"""
def __init__(
self,
composition: Composition,
energy: float,
correction: float = 0.0,
energy_adjustments: list = None,
parameters: dict = None,
data: dict = None,
entry_id: object = None,
):
"""
Initializes a ComputedEntry.
Args:
composition (Composition): Composition of the entry. For
flexibility, this can take the form of all the typical input
taken by a Composition, including a {symbol: amt} dict,
a string formula, and others.
energy (float): Energy of the entry. Usually the final calculated
energy from VASP or other electronic structure codes.
correction (float): Manually set an energy correction, will ignore
energy_adjustments if specified.
energy_adjustments: An optional list of EnergyAdjustment to
be applied to the energy. This is used to modify the energy for
certain analyses. Defaults to None.
parameters: An optional dict of parameters associated with
the entry. Defaults to None.
data: An optional dict of any additional data associated
with the entry. Defaults to None.
entry_id: An optional id to uniquely identify the entry.
"""
super().__init__(composition, energy)
self.energy_adjustments = energy_adjustments if energy_adjustments else []
if correction != 0.0:
if energy_adjustments:
raise ValueError(
"Argument conflict! Setting correction = {:.3f} conflicts "
"with setting energy_adjustments. Specify one or the "
"other.".format(correction)
)
self.correction = correction
self.parameters = parameters if parameters else {}
self.data = data if data else {}
self.entry_id = entry_id
self.name = self.composition.reduced_formula
@property
def uncorrected_energy(self) -> float:
"""
Returns:
float: the *uncorrected* energy of the entry
"""
return self._energy
@property
def energy(self) -> float:
"""
:return: the *corrected* energy of the entry.
"""
return self.uncorrected_energy + self.correction
@property
def uncorrected_energy_per_atom(self) -> float:
"""
Returns:
float: the *uncorrected* energy of the entry, normalized by atoms
(units of eV/atom)
"""
return self.uncorrected_energy / self.composition.num_atoms
@property
def correction(self) -> float:
"""
Returns:
float: the total energy correction / adjustment applied to the entry,
in eV.
"""
# adds to ufloat(0.0, 0.0) to ensure that no corrections still result in ufloat object
corr = ufloat(0.0, 0.0) + sum([ufloat(ea.value, ea.uncertainty) for ea in self.energy_adjustments])
return corr.nominal_value
@correction.setter
def correction(self, x: float) -> None:
corr = ManualEnergyAdjustment(x)
self.energy_adjustments = [corr]
@property
def correction_per_atom(self) -> float:
"""
Returns:
float: the total energy correction / adjustment applied to the entry,
normalized by atoms (units of eV/atom)
"""
return self.correction / self.composition.num_atoms
@property
def correction_uncertainty(self) -> float:
"""
Returns:
float: the uncertainty of the energy adjustments applied to the entry, in eV
"""
# adds to ufloat(0.0, 0.0) to ensure that no corrections still result in ufloat object
unc = ufloat(0.0, 0.0) + sum(
[
ufloat(ea.value, ea.uncertainty) if not np.isnan(ea.uncertainty) else ufloat(ea.value, 0)
for ea in self.energy_adjustments
]
)
if unc.nominal_value != 0 and unc.std_dev == 0:
return np.nan
return unc.std_dev
@property
def correction_uncertainty_per_atom(self) -> float:
"""
Returns:
float: the uncertainty of the energy adjustments applied to the entry,
normalized by atoms (units of eV/atom)
"""
return self.correction_uncertainty / self.composition.num_atoms
def normalize(self, mode: str = "formula_unit") -> "ComputedEntry":
"""
Normalize the entry's composition and energy.
Args:
mode: "formula_unit" is the default, which normalizes to
composition.reduced_formula. The other option is "atom", which
normalizes such that the composition amounts sum to 1.
"""
factor = self._normalization_factor(mode)
new_composition = self._composition / factor
new_energy = self._energy / factor
new_entry_dict = self.as_dict()
new_entry_dict["composition"] = new_composition.as_dict()
new_entry_dict["energy"] = new_energy
# TODO: make sure EnergyAdjustments are _also_ immutable to avoid this hacking
new_energy_adjustments = MontyDecoder().process_decoded(new_entry_dict["energy_adjustments"])
for ea in new_energy_adjustments:
ea.normalize(factor)
new_entry_dict["energy_adjustments"] = [ea.as_dict() for ea in new_energy_adjustments]
return self.from_dict(new_entry_dict)
def __repr__(self) -> str:
n_atoms = self.composition.num_atoms
output = [
"{} {:<10} - {:<12} ({})".format(
self.entry_id,
self.__class__.__name__,
self.composition.formula,
self.composition.reduced_formula,
),
"{:<24} = {:<9.4f} eV ({:<8.4f} eV/atom)".format(
"Energy (Uncorrected)", self._energy, self._energy / n_atoms
),
"{:<24} = {:<9.4f} eV ({:<8.4f} eV/atom)".format("Correction", self.correction, self.correction / n_atoms),
"{:<24} = {:<9.4f} eV ({:<8.4f} eV/atom)".format("Energy (Final)", self.energy, self.energy_per_atom),
"Energy Adjustments:",
]
if len(self.energy_adjustments) == 0:
output.append(" None")
else:
for e in self.energy_adjustments:
output.append(" {:<23}: {:<9.4f} eV ({:<8.4f} eV/atom)".format(e.name, e.value, e.value / n_atoms))
output.append("Parameters:")
for k, v in self.parameters.items():
output.append(" {:<22} = {}".format(k, v))
output.append("Data:")
for k, v in self.data.items():
output.append(" {:<22} = {}".format(k, v))
return "\n".join(output)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
# NOTE: Scaled duplicates i.e. physically equivalent materials
# are not equal unless normalized separately.
if self is other:
return True
# Equality is defined based on composition and energy
# If structures are involved, it is assumed that a {composition, energy} is
# vanishingly unlikely to be the same if the structures are different
if not np.allclose(self.energy, other.energy):
return False
# if entry_ids are equivalent, skip the more expensive composition check
if self.entry_id and other.entry_id and self.entry_id == other.entry_id:
return True
if self.composition != other.composition:
return False
# assumes that data, parameters, corrections are equivalent
return True
@classmethod
def from_dict(cls, d) -> "ComputedEntry":
"""
:param d: Dict representation.
:return: ComputedEntry
"""
dec = MontyDecoder()
# the first block here is for legacy ComputedEntry that were
# serialized before we had the energy_adjustments attribute.
if d["correction"] != 0 and not d.get("energy_adjustments"):
return cls(
d["composition"],
d["energy"],
d["correction"],
parameters={k: dec.process_decoded(v) for k, v in d.get("parameters", {}).items()},
data={k: dec.process_decoded(v) for k, v in d.get("data", {}).items()},
entry_id=d.get("entry_id", None),
)
# this is the preferred / modern way of instantiating ComputedEntry
# we don't pass correction explicitly because it will be calculated
# on the fly from energy_adjustments
return cls(
d["composition"],
d["energy"],
correction=0,
energy_adjustments=[dec.process_decoded(e) for e in d.get("energy_adjustments", {})],
parameters={k: dec.process_decoded(v) for k, v in d.get("parameters", {}).items()},
data={k: dec.process_decoded(v) for k, v in d.get("data", {}).items()},
entry_id=d.get("entry_id", None),
)
def as_dict(self) -> dict:
"""
:return: MSONable dict.
"""
return_dict = super().as_dict()
return_dict.update(
{
"entry_id": self.entry_id,
"correction": self.correction,
"energy_adjustments": json.loads(json.dumps(self.energy_adjustments, cls=MontyEncoder)),
"parameters": json.loads(json.dumps(self.parameters, cls=MontyEncoder)),
"data": json.loads(json.dumps(self.data, cls=MontyEncoder)),
}
)
return return_dict
def __hash__(self) -> int:
# NOTE It is assumed that the user will ensure entry_id is a
# unique identifier for ComputedEntry type classes.
if self.entry_id is not None:
return hash(f"{self.__class__.__name__}{self.entry_id}")
return super().__hash__()
class ComputedStructureEntry(ComputedEntry):
"""
A heavier version of ComputedEntry which contains a structure as well. The
structure is needed for some analyses.
"""
def __init__(
self,
structure: Structure,
energy: float,
correction: float = 0.0,
composition: Composition = None,
energy_adjustments: list = None,
parameters: dict = None,
data: dict = None,
entry_id: object = None,
):
"""
Initializes a ComputedStructureEntry.
Args:
structure (Structure): The actual structure of an entry.
energy (float): Energy of the entry. Usually the final calculated
energy from VASP or other electronic structure codes.
energy_adjustments: An optional list of EnergyAdjustment to
be applied to the energy. This is used to modify the energy for
certain analyses. Defaults to None.
parameters: An optional dict of parameters associated with
the entry. Defaults to None.
data: An optional dict of any additional data associated
with the entry. Defaults to None.
entry_id: An optional id to uniquely identify the entry.
"""
if composition:
composition = Composition(composition)
if (
composition.get_integer_formula_and_factor()[0]
!= structure.composition.get_integer_formula_and_factor()[0]
):
raise ValueError("Mismatching composition provided.")
else:
composition = structure.composition
super().__init__(
composition,
energy,
correction=correction,
energy_adjustments=energy_adjustments,
parameters=parameters,
data=data,
entry_id=entry_id,
)
self._structure = structure
@property
def structure(self) -> Structure:
"""
:return: the structure of the entry.
"""
return self._structure
def as_dict(self) -> dict:
"""
:return: MSONAble dict.
"""
d = super().as_dict()
d["structure"] = self.structure.as_dict()
return d
@classmethod
def from_dict(cls, d) -> "ComputedStructureEntry":
"""
:param d: Dict representation.
:return: ComputedStructureEntry
"""
dec = MontyDecoder()
# the first block here is for legacy ComputedEntry that were
# serialized before we had the energy_adjustments attribute.
if d["correction"] != 0 and not d.get("energy_adjustments"):
struct = dec.process_decoded(d["structure"])
return cls(
struct,
d["energy"],
correction=d["correction"],
parameters={k: dec.process_decoded(v) for k, v in d.get("parameters", {}).items()},
data={k: dec.process_decoded(v) for k, v in d.get("data", {}).items()},
entry_id=d.get("entry_id", None),
)
# this is the preferred / modern way of instantiating ComputedEntry
# we don't pass correction explicitly because it will be calculated
# on the fly from energy_adjustments
return cls(
dec.process_decoded(d["structure"]),
d["energy"],
composition=d.get("composition", None),
correction=0,
energy_adjustments=[dec.process_decoded(e) for e in d.get("energy_adjustments", {})],
parameters={k: dec.process_decoded(v) for k, v in d.get("parameters", {}).items()},
data={k: dec.process_decoded(v) for k, v in d.get("data", {}).items()},
entry_id=d.get("entry_id", None),
)
def normalize(self, mode: str = "formula_unit") -> "ComputedStructureEntry":
"""
Normalize the entry's composition and energy. The structure remains
unchanged.
Args:
mode: "formula_unit" is the default, which normalizes to
composition.reduced_formula. The other option is "atom",
which normalizes such that the composition amounts sum to 1.
"""
# TODO this should raise TypeError
# raise TypeError("You cannot normalize a structure.")
warnings.warn(
(
f"Normalization of a `{self.__class__.__name__}` makes "
"`self.composition` and `self.structure.composition` inconsistent"
" - please use self.composition for all further calculations."
)
)
# TODO: find a better solution for creating copies instead of as/from dict
factor = self._normalization_factor(mode)
d = super().normalize(mode).as_dict()
d["structure"] = self.structure.as_dict()
entry = self.from_dict(d)
entry._composition /= factor
return entry
class GibbsComputedStructureEntry(ComputedStructureEntry):
"""
An extension to ComputedStructureEntry which includes the estimated Gibbs
free energy of formation via a machine-learned model.
"""
def __init__(
self,
structure: Structure,
formation_enthalpy_per_atom: float,
temp: float = 300,
gibbs_model: str = "SISSO",
composition: Composition = None,
correction: float = 0.0,
energy_adjustments: list = None,
parameters: dict = None,
data: dict = None,
entry_id: object = None,
):
"""
Args:
structure (Structure): The pymatgen Structure object of an entry.
formation_enthalpy_per_atom (float): Formation enthalpy of the entry;
must be
calculated using phase diagram construction (eV)
temp (float): Temperature in Kelvin. If temperature is not selected from
one of [300, 400, 500, ... 2000 K], then free energies will
be interpolated. Defaults to 300 K.
gibbs_model (str): Model for Gibbs Free energy. Currently the default (and
only supported) option is "SISSO", the descriptor created by Bartel et
al. (2018) -- see reference in documentation.
correction (float): A correction to be applied to the energy. Defaults to 0
parameters (dict): An optional dict of parameters associated with
the entry. Defaults to None.
data (dict): An optional dict of any additional data associated
with the entry. Defaults to None.
entry_id: An optional id to uniquely identify the entry.
"""
if temp < 300 or temp > 2000:
raise ValueError("Temperature must be selected from range: [300, 2000] K.")
integer_formula, _ = structure.composition.get_integer_formula_and_factor()
self.experimental = False
if integer_formula in G_GASES.keys():
self.experimental = True
if "Experimental" not in str(entry_id):
entry_id = f"{entry_id} (Experimental)"
super().__init__(
structure,
energy=0, # placeholder, energy reassigned at end of __init__
composition=composition,
correction=correction,
energy_adjustments=energy_adjustments,
parameters=parameters,
data=data,
entry_id=entry_id,
)
self.temp = temp
self.gibbs_model = gibbs_model
self.formation_enthalpy_per_atom = formation_enthalpy_per_atom
self.interpolated = False
if self.temp % 100:
self.interpolated = True
if gibbs_model.lower() == "sisso":
self.gibbs_fn = self.gf_sisso
else:
raise ValueError(f"{gibbs_model} not a valid model. The only currently " f"available model is 'SISSO'.")
self._energy = self.gibbs_fn()
def gf_sisso(self) -> float:
"""
Gibbs Free Energy of formation as calculated by SISSO descriptor from Bartel
et al. (2018). Units: eV (not normalized)
WARNING: This descriptor only applies to solids. The implementation here
attempts to detect and use downloaded NIST-JANAF data for common
experimental gases (e.g. CO2) where possible. Note that experimental data is
only for Gibbs Free Energy of formation, so expt. entries will register as
having a formation enthalpy of 0.
Reference: Bartel, C. J., Millican, S. L., Deml, A. M., Rumptz, J. R.,
Tumas, W., Weimer, A. W., … Holder, A. M. (2018). Physical descriptor for
the Gibbs energy of inorganic crystalline solids and
temperature-dependent materials chemistry. Nature Communications, 9(1),
4168. https://doi.org/10.1038/s41467-018-06682-4
Returns:
float: the difference between formation enthalpy (T=0 K, Materials
Project) and the predicted Gibbs free energy of formation (eV)
"""
comp = self.composition
if comp.is_element:
return 0
integer_formula, factor = comp.get_integer_formula_and_factor()
if self.experimental:
data = G_GASES[integer_formula]
if self.interpolated:
g_interp = interp1d([int(t) for t in data.keys()], list(data.values()))
energy = g_interp(self.temp)
else:
energy = data[str(self.temp)]
gibbs_energy = energy * factor
else:
num_atoms = self.structure.num_sites
vol_per_atom = self.structure.volume / num_atoms
reduced_mass = self._reduced_mass(self.structure)
gibbs_energy = (
comp.num_atoms
* (self.formation_enthalpy_per_atom + self._g_delta_sisso(vol_per_atom, reduced_mass, self.temp))
- self._sum_g_i()
)
return gibbs_energy
def _sum_g_i(self) -> float:
"""
Sum of the stoichiometrically weighted chemical potentials of the elements
at specified temperature, as acquired from "g_els.json".
Returns:
float: sum of weighted chemical potentials [eV]
"""
elems = self.composition.get_el_amt_dict()
if self.interpolated:
sum_g_i = 0
for elem, amt in elems.items():
g_interp = interp1d(
[float(t) for t in G_ELEMS.keys()],
[g_dict[elem] for g_dict in G_ELEMS.values()],
)
sum_g_i += amt * g_interp(self.temp)
else:
sum_g_i = sum([amt * G_ELEMS[str(self.temp)][elem] for elem, amt in elems.items()])
return sum_g_i
@staticmethod
def _reduced_mass(structure) -> float:
"""
Reduced mass as calculated via Eq. 6 in Bartel et al. (2018)
Args:
structure (Structure): The pymatgen Structure object of the entry.
Returns:
float: reduced mass (amu)
"""
reduced_comp = structure.composition.reduced_composition
num_elems = len(reduced_comp.elements)
elem_dict = reduced_comp.get_el_amt_dict()
denominator = (num_elems - 1) * reduced_comp.num_atoms
all_pairs = combinations(elem_dict.items(), 2)
mass_sum = 0
for pair in all_pairs:
m_i = Composition(pair[0][0]).weight
m_j = Composition(pair[1][0]).weight
alpha_i = pair[0][1]
alpha_j = pair[1][1]
mass_sum += (alpha_i + alpha_j) * (m_i * m_j) / (m_i + m_j) # type: ignore
reduced_mass = (1 / denominator) * mass_sum
return reduced_mass
@staticmethod
def _g_delta_sisso(vol_per_atom, reduced_mass, temp) -> float:
"""
G^delta as predicted by SISSO-learned descriptor from Eq. (4) in
Bartel et al. (2018).
Args:
vol_per_atom (float): volume per atom [Å^3/atom]
reduced_mass (float) - reduced mass as calculated with pair-wise sum formula
[amu]
temp (float) - Temperature [K]
Returns:
float: G^delta [eV/atom]
"""
return (
(-2.48e-4 * np.log(vol_per_atom) - 8.94e-5 * reduced_mass / vol_per_atom) * temp
+ 0.181 * np.log(temp)
- 0.882
)
@classmethod
def from_pd(cls, pd, temp=300, gibbs_model="SISSO") -> List["GibbsComputedStructureEntry"]:
"""
Constructor method for initializing a list of GibbsComputedStructureEntry
objects from an existing T = 0 K phase diagram composed of
ComputedStructureEntry objects, as acquired from a thermochemical database;
(e.g.. The Materials Project)
Args:
pd (PhaseDiagram): T = 0 K phase diagram as created in pymatgen. Must
contain ComputedStructureEntry objects.
temp (int): Temperature [K] for estimating Gibbs free energy of formation.
gibbs_model (str): Gibbs model to use; currently the only option is "SISSO".
Returns:
[GibbsComputedStructureEntry]: list of new entries which replace the orig.
entries with inclusion of Gibbs free energy of formation at the
specified temperature.
"""
gibbs_entries = []
for entry in pd.all_entries:
if entry in pd.el_refs.values() or not entry.composition.is_element:
gibbs_entries.append(
cls(
entry.structure,
formation_enthalpy_per_atom=pd.get_form_energy_per_atom(entry),
temp=temp,
correction=0,
gibbs_model=gibbs_model,
data=entry.data,
entry_id=entry.entry_id,
)
)
return gibbs_entries
@classmethod
def from_entries(cls, entries, temp=300, gibbs_model="SISSO") -> List["GibbsComputedStructureEntry"]:
"""
Constructor method for initializing GibbsComputedStructureEntry objects from
T = 0 K ComputedStructureEntry objects, as acquired from a thermochemical
database e.g. The Materials Project.
Args:
entries ([ComputedStructureEntry]): List of ComputedStructureEntry objects,
as downloaded from The Materials Project API.
temp (int): Temperature [K] for estimating Gibbs free energy of formation.
gibbs_model (str): Gibbs model to use; currently the only option is "SISSO".
Returns:
[GibbsComputedStructureEntry]: list of new entries which replace the orig.
entries with inclusion of Gibbs free energy of formation at the
specified temperature.
"""
from pymatgen.analysis.phase_diagram import PhaseDiagram
pd = PhaseDiagram(entries)
return cls.from_pd(pd, temp, gibbs_model)
def as_dict(self) -> dict:
"""
:return: MSONAble dict.
"""
d = super().as_dict()
d["formation_enthalpy_per_atom"] = self.formation_enthalpy_per_atom
d["temp"] = self.temp
d["gibbs_model"] = self.gibbs_model
d["interpolated"] = self.interpolated
return d
@classmethod
def from_dict(cls, d) -> "GibbsComputedStructureEntry":
"""
:param d: Dict representation.
:return: GibbsComputedStructureEntry
"""
dec = MontyDecoder()
return cls(
dec.process_decoded(d["structure"]),
d["formation_enthalpy_per_atom"],
d["temp"],
d["gibbs_model"],
composition=d.get("composition", None),
correction=d["correction"],
energy_adjustments=[dec.process_decoded(e) for e in d.get("energy_adjustments", {})],
parameters={k: dec.process_decoded(v) for k, v in d.get("parameters", {}).items()},
data={k: dec.process_decoded(v) for k, v in d.get("data", {}).items()},
entry_id=d.get("entry_id", None),
)
def __repr__(self):
output = [
"GibbsComputedStructureEntry {} - {}".format(self.entry_id, self.composition.formula),
"Gibbs Free Energy (Formation) = {:.4f}".format(self.energy),
]
return "\n".join(output)
|
richardtran415/pymatgen
|
pymatgen/entries/computed_entries.py
|
Python
|
mit
| 36,654
|
[
"VASP",
"pymatgen"
] |
0337d9d27218593918a30ec1a57ae48df7d1d45da1493cea20573e3f2e2874ac
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.