text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Checkers for detecting unsupported Python features."""
import gast
from tensorflow.python.autograph.pyct import errors
class UnsupportedFeaturesChecker(gast.NodeVisitor):
"""Quick check for Python features we know we don't support.
Any features detected will cause AutoGraph to not compile a function.
"""
def visit_Attribute(self, node):
if (node.attr is not None
and node.attr.startswith('__') and not node.attr.endswith('__')):
raise errors.UnsupportedLanguageElementError(
'mangled names are not yet supported')
self.generic_visit(node)
def visit_For(self, node):
if node.orelse:
raise errors.UnsupportedLanguageElementError(
'for/else statement not yet supported')
self.generic_visit(node)
def visit_While(self, node):
if node.orelse:
raise errors.UnsupportedLanguageElementError(
'while/else statement not yet supported')
self.generic_visit(node)
# These checks could potentially be replaced with inspect.isgeneratorfunction
# to avoid a getsource/parse/ast-walk round trip.
def visit_Yield(self, node):
raise errors.UnsupportedLanguageElementError('generators are not supported')
def visit_YieldFrom(self, node):
raise errors.UnsupportedLanguageElementError('generators are not supported')
def verify(node):
UnsupportedFeaturesChecker().visit(node)
|
tensorflow/tensorflow
|
tensorflow/python/autograph/core/unsupported_features_checker.py
|
Python
|
apache-2.0
| 2,070
|
[
"VisIt"
] |
531ed4dacc484b1439f8a66fa323abd6d78815679176a5ab316b51f051a9302f
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes representing statistical distributions and ops for working with them.
## Classes for statistical distributions.
Classes that represent batches of statistical distributions. Each class is
initialized with parameters that define the distributions.
### Base classes
@@BaseDistribution
@@ContinuousDistribution
@@DiscreteDistribution
### Univariate (scalar) distributions
@@Chi2
@@Exponential
@@Gamma
@@Gaussian
@@StudentT
@@Uniform
### Multivariate distributions
@@MultivariateNormal
@@DirichletMultinomial
## Posterior inference with conjugate priors.
Functions that transform conjugate prior/likelihood pairs to distributions
representing the posterior or posterior predictive.
### Gaussian likelihood with conjugate prior.
@@gaussian_conjugates_known_sigma_posterior
@@gaussian_congugates_known_sigma_predictive
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.distributions.python.ops.chi2 import *
from tensorflow.contrib.distributions.python.ops.dirichlet_multinomial import *
from tensorflow.contrib.distributions.python.ops.distribution import *
from tensorflow.contrib.distributions.python.ops.exponential import *
from tensorflow.contrib.distributions.python.ops.gamma import *
from tensorflow.contrib.distributions.python.ops.gaussian import *
from tensorflow.contrib.distributions.python.ops.gaussian_conjugate_posteriors import *
from tensorflow.contrib.distributions.python.ops.mvn import *
from tensorflow.contrib.distributions.python.ops.student_t import *
from tensorflow.contrib.distributions.python.ops.uniform import *
|
petewarden/tensorflow_makefile
|
tensorflow/contrib/distributions/__init__.py
|
Python
|
apache-2.0
| 2,389
|
[
"Gaussian"
] |
3adc56aaa3b36e297b112a3a9a93e2424d9b05313a9d68ce5c6e22ac67b6a813
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a Composition class to represent compositions,
and a ChemicalPotential class to represent potentials.
"""
import collections
import numbers
import os
import re
import string
from functools import total_ordering
from itertools import combinations_with_replacement, product
from typing import List, Tuple, Union, Dict
from monty.fractions import gcd, gcd_float
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.core.periodic_table import DummySpecies, Element, Species, get_el_sp
from pymatgen.core.units import Mass
from pymatgen.util.string import formula_double_format
SpeciesLike = Union[str, Element, Species, DummySpecies]
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 10, 2012"
@total_ordering
class Composition(collections.abc.Hashable, collections.abc.Mapping, MSONable):
"""
Represents a Composition, which is essentially a {element:amount} mapping
type. Composition is written to be immutable and hashable,
unlike a standard Python dict.
Note that the key can be either an Element or a Species. Elements and Species
are treated differently. i.e., a Fe2+ is not the same as a Fe3+ Species and
would be put in separate keys. This differentiation is deliberate to
support using Composition to determine the fraction of a particular Species.
Works almost completely like a standard python dictionary, except that
__getitem__ is overridden to return 0 when an element is not found.
(somewhat like a defaultdict, except it is immutable).
Also adds more convenience methods relevant to compositions, e.g.,
get_fraction.
It should also be noted that many Composition related functionality takes
in a standard string as a convenient input. For example,
even though the internal representation of a Fe2O3 composition is
{Element("Fe"): 2, Element("O"): 3}, you can obtain the amount of Fe
simply by comp["Fe"] instead of the more verbose comp[Element("Fe")].
>>> comp = Composition("LiFePO4")
>>> comp.get_atomic_fraction(Element("Li"))
0.14285714285714285
>>> comp.num_atoms
7.0
>>> comp.reduced_formula
'LiFePO4'
>>> comp.formula
'Li1 Fe1 P1 O4'
>>> comp.get_wt_fraction(Element("Li"))
0.04399794666951898
>>> comp.num_atoms
7.0
"""
# Tolerance in distinguishing different composition amounts.
# 1e-8 is fairly tight, but should cut out most floating point arithmetic
# errors.
amount_tolerance = 1e-8
# Special formula handling for peroxides and certain elements. This is so
# that formula output does not write LiO instead of Li2O2 for example.
special_formulas = {
"LiO": "Li2O2",
"NaO": "Na2O2",
"KO": "K2O2",
"HO": "H2O2",
"CsO": "Cs2O2",
"RbO": "Rb2O2",
"O": "O2",
"N": "N2",
"F": "F2",
"Cl": "Cl2",
"H": "H2",
}
oxi_prob = None # prior probability of oxidation used by oxi_state_guesses
def __init__(self, *args, strict: bool = False, **kwargs):
r"""
Very flexible Composition construction, similar to the built-in Python
dict(). Also extended to allow simple string init.
Args:
Any form supported by the Python built-in dict() function.
1. A dict of either {Element/Species: amount},
{string symbol:amount}, or {atomic number:amount} or any mixture
of these. E.g., {Element("Li"):2 ,Element("O"):1},
{"Li":2, "O":1}, {3:2, 8:1} all result in a Li2O composition.
2. Keyword arg initialization, similar to a dict, e.g.,
Composition(Li = 2, O = 1)
In addition, the Composition constructor also allows a single
string as an input formula. E.g., Composition("Li2O").
strict: Only allow valid Elements and Species in the Composition.
allow_negative: Whether to allow negative compositions. This
argument must be popped from the **kwargs due to *args
ambiguity.
"""
self.allow_negative = kwargs.pop("allow_negative", False)
# it's much faster to recognize a composition and use the elmap than
# to pass the composition to dict()
if len(args) == 1 and isinstance(args[0], Composition):
elmap = args[0]
elif len(args) == 1 and isinstance(args[0], str):
elmap = self._parse_formula(args[0])
else:
elmap = dict(*args, **kwargs) # type: ignore
elamt = {}
self._natoms = 0
for k, v in elmap.items():
if v < -Composition.amount_tolerance and not self.allow_negative:
raise ValueError("Amounts in Composition cannot be " "negative!")
if abs(v) >= Composition.amount_tolerance:
elamt[get_el_sp(k)] = v
self._natoms += abs(v)
self._data = elamt
if strict and not self.valid:
raise ValueError("Composition is not valid, contains: {}".format(", ".join(map(str, self.elements))))
def __getitem__(self, item: SpeciesLike):
try:
sp = get_el_sp(item)
return self._data.get(sp, 0)
except ValueError as ex:
raise TypeError(
"Invalid key {}, {} for Composition\n" "ValueError exception:\n{}".format(item, type(item), ex)
)
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.keys().__iter__()
def __contains__(self, item):
try:
sp = get_el_sp(item)
return sp in self._data
except ValueError as ex:
raise TypeError(
"Invalid key {}, {} for Composition\n" "ValueError exception:\n{}".format(item, type(item), ex)
)
def __eq__(self, other):
# elements with amounts < Composition.amount_tolerance don't show up
# in the elmap, so checking len enables us to only check one
# compositions elements
if len(self) != len(other):
return False
for el, v in self.items():
if abs(v - other[el]) > Composition.amount_tolerance:
return False
return True
def __ge__(self, other):
"""
Defines >= for Compositions. Should ONLY be used for defining a sort
order (the behavior is probably not what you'd expect)
"""
for el in sorted(set(self.elements + other.elements)):
if other[el] - self[el] >= Composition.amount_tolerance:
return False
if self[el] - other[el] >= Composition.amount_tolerance:
return True
return True
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
"""
Adds two compositions. For example, an Fe2O3 composition + an FeO
composition gives a Fe3O4 composition.
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] += v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __sub__(self, other):
"""
Subtracts two compositions. For example, an Fe2O3 composition - an FeO
composition gives an FeO2 composition.
Raises:
ValueError if the subtracted composition is greater than the
original composition in any of its elements, unless allow_negative
is True
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] -= v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __mul__(self, other):
"""
Multiply a Composition by an integer or a float.
Fe2O3 * 4 -> Fe8O12
"""
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] * other for el in self}, allow_negative=self.allow_negative)
__rmul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] / other for el in self}, allow_negative=self.allow_negative)
__div__ = __truediv__
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between
Compositions with different elements.
"""
hashcode = 0
for el, amt in self.items():
if abs(amt) > Composition.amount_tolerance:
hashcode += el.Z
return hashcode
@property
def average_electroneg(self) -> float:
"""
:return: Average electronegativity of the composition.
"""
return sum((el.X * abs(amt) for el, amt in self.items())) / self.num_atoms
@property
def total_electrons(self) -> float:
"""
:return: Total number of electrons in composition.
"""
return sum((el.Z * abs(amt) for el, amt in self.items()))
def almost_equals(self, other: "Composition", rtol: float = 0.1, atol: float = 1e-8) -> bool:
"""
Returns true if compositions are equal within a tolerance.
Args:
other (Composition): Other composition to check
rtol (float): Relative tolerance
atol (float): Absolute tolerance
"""
sps = set(self.elements + other.elements)
for sp in sps:
a = self[sp]
b = other[sp]
tol = atol + rtol * (abs(a) + abs(b)) / 2
if abs(b - a) > tol:
return False
return True
@property
def is_element(self) -> bool:
"""
True if composition is for an element.
"""
return len(self) == 1
def copy(self) -> "Composition":
"""
:return: A copy of the composition.
"""
return Composition(self, allow_negative=self.allow_negative)
@property
def formula(self) -> str:
"""
Returns a formula string, with elements sorted by electronegativity,
e.g., Li4 Fe4 P4 O16.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda sym: get_el_sp(sym).X)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def alphabetical_formula(self) -> str:
"""
Returns a formula string, with elements sorted by alphabetically
e.g., Fe4 Li4 O16 P4.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys())
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def iupac_formula(self) -> str:
"""
Returns a formula string, with elements sorted by the iupac
electronegativity ordering defined in Table VI of "Nomenclature of
Inorganic Chemistry (IUPAC Recommendations 2005)". This ordering
effectively follows the groups and rows of the periodic table, except
the Lanthanides, Actanides and hydrogen. Polyanions are still determined
based on the true electronegativity of the elements.
e.g. CH2(SO4)2
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda s: get_el_sp(s).iupac_ordering)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def element_composition(self) -> "Composition":
"""
Returns the composition replacing any species by the corresponding
element.
"""
return Composition(self.get_el_amt_dict(), allow_negative=self.allow_negative)
@property
def fractional_composition(self) -> "Composition":
"""
Returns the normalized composition which the number of species sum to
1.
Returns:
Normalized composition which the number of species sum to 1.
"""
return self / self._natoms
@property
def reduced_composition(self) -> "Composition":
"""
Returns the reduced composition,i.e. amounts normalized by greatest
common denominator. e.g., Composition("FePO4") for
Composition("Fe4P4O16").
"""
return self.get_reduced_composition_and_factor()[0]
def get_reduced_composition_and_factor(self) -> Tuple["Composition", float]:
"""
Calculates a reduced composition and factor.
Returns:
A normalized composition and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (Composition("LiFePO4"), 4).
"""
factor = self.get_reduced_formula_and_factor()[1]
return self / factor, factor
def get_reduced_formula_and_factor(self, iupac_ordering: bool = False) -> Tuple[str, float]:
"""
Calculates a reduced formula and factor.
Args:
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (LiFePO4, 4).
"""
all_int = all(abs(x - round(x)) < Composition.amount_tolerance for x in self.values())
if not all_int:
return self.formula.replace(" ", ""), 1
d = {k: int(round(v)) for k, v in self.get_el_amt_dict().items()}
(formula, factor) = reduce_formula(d, iupac_ordering=iupac_ordering)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor
def get_integer_formula_and_factor(
self, max_denominator: int = 10000, iupac_ordering: bool = False
) -> Tuple[str, float]:
"""
Calculates an integer formula and factor.
Args:
max_denominator (int): all amounts in the el:amt dict are
first converted to a Fraction with this maximum denominator
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li0.5O0.25 returns (Li2O, 0.25). O0.25 returns (O2, 0.125)
"""
el_amt = self.get_el_amt_dict()
g = gcd_float(list(el_amt.values()), 1 / max_denominator)
d = {k: round(v / g) for k, v in el_amt.items()}
(formula, factor) = reduce_formula(d, iupac_ordering=iupac_ordering)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor * g
@property
def reduced_formula(self) -> str:
"""
Returns a pretty normalized formula, i.e., LiFePO4 instead of
Li4Fe4P4O16.
"""
return self.get_reduced_formula_and_factor()[0]
@property
def hill_formula(self) -> str:
"""
:return: Hill formula. The Hill system (or Hill notation) is a system
of writing empirical chemical formulas, molecular chemical formulas and
components of a condensed formula such that the number of carbon atoms
in a molecule is indicated first, the number of hydrogen atoms next,
and then the number of all other chemical elements subsequently, in
alphabetical order of the chemical symbols. When the formula contains
no carbon, all the elements, including hydrogen, are listed
alphabetically.
"""
c = self.element_composition
elements = sorted([el.symbol for el in c.keys()])
if "C" in elements:
elements = ["C"] + [el for el in elements if el != "C"]
formula = ["%s%s" % (el, formula_double_format(c[el]) if c[el] != 1 else "") for el in elements]
return " ".join(formula)
@property
def elements(self) -> List[Union[Element, Species, DummySpecies]]:
"""
Returns view of elements in Composition.
"""
return list(self.keys())
def __str__(self):
return " ".join(
["{}{}".format(k, formula_double_format(v, ignore_ones=False)) for k, v in self.as_dict().items()]
)
@property
def num_atoms(self) -> float:
"""
Total number of atoms in Composition. For negative amounts, sum
of absolute values
"""
return self._natoms
@property
def weight(self) -> float:
"""
Total molecular weight of Composition
"""
return Mass(sum([amount * el.atomic_mass for el, amount in self.items()]), "amu")
def get_atomic_fraction(self, el: SpeciesLike) -> float:
"""
Calculate atomic fraction of an Element or Species.
Args:
el (Element/Species): Element or Species to get fraction for.
Returns:
Atomic fraction for element el in Composition
"""
return abs(self[el]) / self._natoms
def get_wt_fraction(self, el: SpeciesLike):
"""
Calculate weight fraction of an Element or Species.
Args:
el (Element/Species): Element or Species to get fraction for.
Returns:
Weight fraction for element el in Composition
"""
return get_el_sp(el).atomic_mass * abs(self[el]) / self.weight
def contains_element_type(
self,
category: str,
):
"""
Check if Composition contains any elements matching a given category.
Args:
category (str): one of "noble_gas", "transition_metal",
"post_transition_metal", "rare_earth_metal", "metal", "metalloid",
"alkali", "alkaline", "halogen", "chalcogen", "lanthanoid",
"actinoid", "quadrupolar", "s-block", "p-block", "d-block", "f-block"
Returns:
True if any elements in Composition match category, otherwise False
"""
allowed_categories = (
"noble_gas",
"transition_metal",
"post_transition_metal",
"rare_earth_metal",
"metal",
"metalloid",
"alkali",
"alkaline",
"halogen",
"chalcogen",
"lanthanoid",
"actinoid",
"quadrupolar",
"s-block",
"p-block",
"d-block",
"f-block",
)
if category not in allowed_categories:
raise ValueError("Please pick a category from: {}".format(", ".join(allowed_categories)))
if "block" in category:
return any([category[0] in el.block for el in self.elements])
return any([getattr(el, "is_{}".format(category)) for el in self.elements])
def _parse_formula(self, formula):
"""
Args:
formula (str): A string formula, e.g. Fe2O3, Li3Fe2(PO4)3
Returns:
Composition with that formula.
Notes:
In the case of Metallofullerene formula (e.g. Y3N@C80),
the @ mark will be dropped and passed to parser.
"""
# for Metallofullerene like "Y3N@C80"
formula = formula.replace("@", "")
def get_sym_dict(f, factor):
sym_dict = collections.defaultdict(float)
for m in re.finditer(r"([A-Z][a-z]*)\s*([-*\.e\d]*)", f):
el = m.group(1)
amt = 1
if m.group(2).strip() != "":
amt = float(m.group(2))
sym_dict[el] += amt * factor
f = f.replace(m.group(), "", 1)
if f.strip():
raise ValueError("{} is an invalid formula!".format(f))
return sym_dict
m = re.search(r"\(([^\(\)]+)\)\s*([\.e\d]*)", formula)
if m:
factor = 1
if m.group(2) != "":
factor = float(m.group(2))
unit_sym_dict = get_sym_dict(m.group(1), factor)
expanded_sym = "".join(["{}{}".format(el, amt) for el, amt in unit_sym_dict.items()])
expanded_formula = formula.replace(m.group(), expanded_sym)
return self._parse_formula(expanded_formula)
return get_sym_dict(formula, 1)
@property
def anonymized_formula(self) -> str:
"""
An anonymized formula. Unique species are arranged in ordering of
increasing amounts and assigned ascending alphabets. Useful for
prototyping formulas. For example, all stoichiometric perovskites have
anonymized_formula ABC3.
"""
reduced = self.element_composition
if all(x == int(x) for x in self.values()):
reduced /= gcd(*(int(i) for i in self.values()))
anon = ""
for e, amt in zip(string.ascii_uppercase, sorted(reduced.values())):
if amt == 1:
amt_str = ""
elif abs(amt % 1) < 1e-8:
amt_str = str(int(amt))
else:
amt_str = str(amt)
anon += "{}{}".format(e, amt_str)
return anon
@property
def chemical_system(self) -> str:
"""
Get the chemical system of a Composition, for example "O-Si" for
SiO2. Chemical system is a string of a list of elements
sorted alphabetically and joined by dashes, by convention for use
in database keys.
"""
return "-".join(sorted([str(el) for el in self.elements]))
@property
def valid(self) -> bool:
"""
Returns True if Composition contains valid elements or species and
False if the Composition contains any dummy species.
"""
return not any([isinstance(el, DummySpecies) for el in self.elements])
def __repr__(self):
return "Comp: " + self.formula
@classmethod
def from_dict(cls, d) -> "Composition":
"""
Creates a composition from a dict generated by as_dict(). Strictly not
necessary given that the standard constructor already takes in such an
input, but this method preserves the standard pymatgen API of having
from_dict methods to reconstitute objects generated by as_dict(). Allows
for easier introspection.
Args:
d (dict): {symbol: amount} dict.
"""
return cls(d)
def get_el_amt_dict(self) -> Dict[str, float]:
"""
Returns:
Dict with element symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d: Dict[str, float] = collections.defaultdict(float)
for e, a in self.items():
d[e.symbol] += a
return d
def as_dict(self) -> dict:
"""
Returns:
dict with species symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d: Dict[str, float] = collections.defaultdict(float)
for e, a in self.items():
d[str(e)] += a
return d
@property
def to_reduced_dict(self) -> dict:
"""
Returns:
Dict with element symbol and reduced amount e.g.,
{"Fe": 2.0, "O":3.0}
"""
return self.get_reduced_composition_and_factor()[0].as_dict()
@property
def to_data_dict(self) -> dict:
"""
Returns:
A dict with many keys and values relating to Composition/Formula,
including reduced_cell_composition, unit_cell_composition,
reduced_cell_formula, elements and nelements.
"""
return {
"reduced_cell_composition": self.get_reduced_composition_and_factor()[0],
"unit_cell_composition": self.as_dict(),
"reduced_cell_formula": self.reduced_formula,
"elements": list(self.as_dict().keys()),
"nelements": len(self.as_dict().keys()),
}
def oxi_state_guesses(
self,
oxi_states_override: dict = None,
target_charge: float = 0,
all_oxi_states: bool = False,
max_sites: int = None,
) -> List[Dict[str, float]]:
"""
Checks if the composition is charge-balanced and returns back all
charge-balanced oxidation state combinations. Composition must have
integer values. Note that more num_atoms in the composition gives
more degrees of freedom. e.g., if possible oxidation states of
element X are [2,4] and Y are [-3], then XY is not charge balanced
but X2Y2 is. Results are returned from most to least probable based
on ICSD statistics. Use max_sites to improve performance if needed.
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
A list of dicts - each dict reports an element symbol and average
oxidation state across all sites in that composition. If the
composition is not charge balanced, an empty list is returned.
"""
return self._get_oxid_state_guesses(all_oxi_states, max_sites, oxi_states_override, target_charge)[0]
def add_charges_from_oxi_state_guesses(
self,
oxi_states_override: dict = None,
target_charge: float = 0,
all_oxi_states: bool = False,
max_sites: int = None,
) -> "Composition":
"""
Assign oxidation states basedon guessed oxidation states.
See `oxi_state_guesses` for an explanation of how oxidation states are
guessed. This operation uses the set of oxidation states for each site
that were determined to be most likley from the oxidation state guessing
routine.
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
Composition, where the elements are assigned oxidation states based
on the results form guessing oxidation states. If no oxidation state
is possible, returns a Composition where all oxidation states are 0.
"""
_, oxidation_states = self._get_oxid_state_guesses(
all_oxi_states, max_sites, oxi_states_override, target_charge
)
# Special case: No charged compound is possible
if not oxidation_states:
return Composition(dict((Species(e, 0), f) for e, f in self.items()))
# Generate the species
species = []
for el, charges in oxidation_states[0].items():
species.extend([Species(el, c) for c in charges])
# Return the new object
return Composition(collections.Counter(species))
def remove_charges(self) -> "Composition":
"""
Removes the charges from any species in a Composition object.
Returns:
Composition object without charge decoration, for example
{"Fe3+": 2.0, "O2-":3.0} becomes {"Fe": 2.0, "O":3.0}
"""
d: Dict[Element, float] = collections.defaultdict(float)
for e, a in self.items():
d[Element(e.symbol)] += a
return Composition(d)
def _get_oxid_state_guesses(self, all_oxi_states, max_sites, oxi_states_override, target_charge):
"""
Utility operation for guessing oxidation states.
See `oxi_state_guesses` for full details. This operation does the
calculation of the most likely oxidation states
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
A list of dicts - each dict reports an element symbol and average
oxidation state across all sites in that composition. If the
composition is not charge balanced, an empty list is returned.
A list of dicts - each dict maps the element symbol to a list of
oxidation states for each site of that element. For example, Fe3O4 could
return a list of [2,2,2,3,3,3] for the oxidation states of If the composition
is
"""
comp = self.copy()
# reduce Composition if necessary
if max_sites and max_sites < 0:
comp = self.reduced_composition
if max_sites < -1 and comp.num_atoms > abs(max_sites):
raise ValueError("Composition {} cannot accommodate max_sites " "setting!".format(comp))
elif max_sites and comp.num_atoms > max_sites:
reduced_comp, reduced_factor = self.get_reduced_composition_and_factor()
if reduced_factor > 1:
reduced_comp *= max(1, int(max_sites / reduced_comp.num_atoms))
comp = reduced_comp # as close to max_sites as possible
if comp.num_atoms > max_sites:
raise ValueError("Composition {} cannot accommodate max_sites " "setting!".format(comp))
# Load prior probabilities of oxidation states, used to rank solutions
if not Composition.oxi_prob:
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
all_data = loadfn(os.path.join(module_dir, "..", "analysis", "icsd_bv.yaml"))
Composition.oxi_prob = {Species.from_string(sp): data for sp, data in all_data["occurrence"].items()}
oxi_states_override = oxi_states_override or {}
# assert: Composition only has integer amounts
if not all(amt == int(amt) for amt in comp.values()):
raise ValueError("Charge balance analysis requires integer " "values in Composition!")
# for each element, determine all possible sum of oxidations
# (taking into account nsites for that particular element)
el_amt = comp.get_el_amt_dict()
els = el_amt.keys()
el_sums = [] # matrix: dim1= el_idx, dim2=possible sums
el_sum_scores = collections.defaultdict(set) # dict of el_idx, sum -> score
el_best_oxid_combo = {} # dict of el_idx, sum -> oxid combo with best score
for idx, el in enumerate(els):
el_sum_scores[idx] = {}
el_best_oxid_combo[idx] = {}
el_sums.append([])
if oxi_states_override.get(el):
oxids = oxi_states_override[el]
elif all_oxi_states:
oxids = Element(el).oxidation_states
else:
oxids = Element(el).icsd_oxidation_states or Element(el).oxidation_states
# get all possible combinations of oxidation states
# and sum each combination
for oxid_combo in combinations_with_replacement(oxids, int(el_amt[el])):
# List this sum as a possible option
oxid_sum = sum(oxid_combo)
if oxid_sum not in el_sums[idx]:
el_sums[idx].append(oxid_sum)
# Determine how probable is this combo?
score = sum([Composition.oxi_prob.get(Species(el, o), 0) for o in oxid_combo])
# If it is the most probable combo for a certain sum,
# store the combination
if oxid_sum not in el_sum_scores[idx] or score > el_sum_scores[idx].get(oxid_sum, 0):
el_sum_scores[idx][oxid_sum] = score
el_best_oxid_combo[idx][oxid_sum] = oxid_combo
# Determine which combination of oxidation states for each element
# is the most probable
all_sols = [] # will contain all solutions
all_oxid_combo = [] # will contain the best combination of oxidation states for each site
all_scores = [] # will contain a score for each solution
for x in product(*el_sums):
# each x is a trial of one possible oxidation sum for each element
if sum(x) == target_charge: # charge balance condition
el_sum_sol = dict(zip(els, x)) # element->oxid_sum
# normalize oxid_sum by amount to get avg oxid state
sol = {el: v / el_amt[el] for el, v in el_sum_sol.items()}
# add the solution to the list of solutions
all_sols.append(sol)
# determine the score for this solution
score = 0
for idx, v in enumerate(x):
score += el_sum_scores[idx][v]
all_scores.append(score)
# collect the combination of oxidation states for each site
all_oxid_combo.append(dict((e, el_best_oxid_combo[idx][v]) for idx, (e, v) in enumerate(zip(els, x))))
# sort the solutions by highest to lowest score
if all_scores:
all_sols, all_oxid_combo = zip(
*[
(y, x)
for (z, y, x) in sorted(
zip(all_scores, all_sols, all_oxid_combo),
key=lambda pair: pair[0],
reverse=True,
)
]
)
return all_sols, all_oxid_combo
@staticmethod
def ranked_compositions_from_indeterminate_formula(fuzzy_formula, lock_if_strict=True):
"""
Takes in a formula where capitilization might not be correctly entered,
and suggests a ranked list of potential Composition matches.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations
lock_if_strict (bool): If true, a properly entered formula will
only return the one correct interpretation. For example,
"Co1" will only return "Co1" if true, but will return both
"Co1" and "C1 O1" if false.
Returns:
A ranked list of potential Composition matches
"""
# if we have an exact match and the user specifies lock_if_strict, just
# return the exact match!
if lock_if_strict:
# the strict composition parsing might throw an error, we can ignore
# it and just get on with fuzzy matching
try:
comp = Composition(fuzzy_formula)
return [comp]
except ValueError:
pass
all_matches = Composition._comps_from_fuzzy_formula(fuzzy_formula)
# remove duplicates
all_matches = list(set(all_matches))
# sort matches by rank descending
all_matches = sorted(all_matches, key=lambda match: (match[1], match[0]), reverse=True)
all_matches = [m[0] for m in all_matches]
return all_matches
@staticmethod
def _comps_from_fuzzy_formula(fuzzy_formula, m_dict=None, m_points=0, factor=1):
"""
A recursive helper method for formula parsing that helps in
interpreting and ranking indeterminate formulas.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations.
m_dict (dict): A symbol:amt dictionary from the previously parsed
formula.
m_points: Number of points gained from the previously parsed
formula.
factor: Coefficient for this parse, e.g. (PO4)2 will feed in PO4
as the fuzzy_formula with a coefficient of 2.
Returns:
A list of tuples, with the first element being a Composition and
the second element being the number of points awarded that
Composition intepretation.
"""
m_dict = m_dict or {}
def _parse_chomp_and_rank(m, f, m_dict, m_points):
"""
A helper method for formula parsing that helps in interpreting and
ranking indeterminate formulas
Author: Anubhav Jain
Args:
m: A regex match, with the first group being the element and
the second group being the amount
f: The formula part containing the match
m_dict: A symbol:amt dictionary from the previously parsed
formula
m_points: Number of points gained from the previously parsed
formula
Returns:
A tuple of (f, m_dict, points) where m_dict now contains data
from the match and the match has been removed (chomped) from
the formula f. The "goodness" of the match determines the
number of points returned for chomping. Returns
(None, None, None) if no element could be found...
"""
points = 0
# Points awarded if the first element of the element is correctly
# specified as a capital
points_first_capital = 100
# Points awarded if the second letter of the element is correctly
# specified as lowercase
points_second_lowercase = 100
# get element and amount from regex match
el = m.group(1)
if len(el) > 2 or len(el) < 1:
raise ValueError("Invalid element symbol entered!")
amt = float(m.group(2)) if m.group(2).strip() != "" else 1
# convert the element string to proper [uppercase,lowercase] format
# and award points if it is already in that format
char1 = el[0]
char2 = el[1] if len(el) > 1 else ""
if char1 == char1.upper():
points += points_first_capital
if char2 and char2 == char2.lower():
points += points_second_lowercase
el = char1.upper() + char2.lower()
# if it's a valid element, chomp and add to the points
if Element.is_valid_symbol(el):
if el in m_dict:
m_dict[el] += amt * factor
else:
m_dict[el] = amt * factor
return f.replace(m.group(), "", 1), m_dict, m_points + points
# else return None
return None, None, None
fuzzy_formula = fuzzy_formula.strip()
if len(fuzzy_formula) == 0:
# The entire formula has been parsed into m_dict. Return the
# corresponding Composition and number of points
if m_dict:
yield (Composition.from_dict(m_dict), m_points)
else:
# if there is a parenthesis, remove it and match the remaining stuff
# with the appropriate factor
for mp in re.finditer(r"\(([^\(\)]+)\)([\.\d]*)", fuzzy_formula):
mp_points = m_points
mp_form = fuzzy_formula.replace(mp.group(), " ", 1)
mp_dict = dict(m_dict)
mp_factor = 1 if mp.group(2) == "" else float(mp.group(2))
# Match the stuff inside the parenthesis with the appropriate
# factor
for match in Composition._comps_from_fuzzy_formula(mp.group(1), mp_dict, mp_points, factor=mp_factor):
only_me = True
# Match the stuff outside the parentheses and return the
# sum.
for match2 in Composition._comps_from_fuzzy_formula(mp_form, mp_dict, mp_points, factor=1):
only_me = False
yield (match[0] + match2[0], match[1] + match2[1])
# if the stuff inside the parenthesis is nothing, then just
# return the stuff inside the parentheses
if only_me:
yield match
return
# try to match the single-letter elements
m1 = re.match(r"([A-z])([\.\d]*)", fuzzy_formula)
if m1:
m_points1 = m_points
m_form1 = fuzzy_formula
m_dict1 = dict(m_dict)
(m_form1, m_dict1, m_points1) = _parse_chomp_and_rank(m1, m_form1, m_dict1, m_points1)
if m_dict1:
# there was a real match
for match in Composition._comps_from_fuzzy_formula(m_form1, m_dict1, m_points1, factor):
yield match
# try to match two-letter elements
m2 = re.match(r"([A-z]{2})([\.\d]*)", fuzzy_formula)
if m2:
m_points2 = m_points
m_form2 = fuzzy_formula
m_dict2 = dict(m_dict)
(m_form2, m_dict2, m_points2) = _parse_chomp_and_rank(m2, m_form2, m_dict2, m_points2)
if m_dict2:
# there was a real match
for match in Composition._comps_from_fuzzy_formula(m_form2, m_dict2, m_points2, factor):
yield match
def reduce_formula(sym_amt, iupac_ordering: bool = False) -> Tuple[str, float]:
"""
Helper method to reduce a sym_amt dict to a reduced formula and factor.
Args:
sym_amt (dict): {symbol: amount}.
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
(reduced_formula, factor).
"""
syms = sorted(sym_amt.keys(), key=lambda x: [get_el_sp(x).X, x])
syms = list(filter(lambda x: abs(sym_amt[x]) > Composition.amount_tolerance, syms))
factor = 1
# Enforce integers for doing gcd.
if all((int(i) == i for i in sym_amt.values())):
factor = abs(gcd(*(int(i) for i in sym_amt.values())))
polyanion = []
# if the composition contains a poly anion
if len(syms) >= 3 and get_el_sp(syms[-1]).X - get_el_sp(syms[-2]).X < 1.65:
poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor for i in [-2, -1]}
(poly_form, poly_factor) = reduce_formula(poly_sym_amt, iupac_ordering=iupac_ordering)
if poly_factor != 1:
polyanion.append("({}){}".format(poly_form, int(poly_factor)))
syms = syms[: len(syms) - 2 if polyanion else len(syms)]
if iupac_ordering:
syms = sorted(syms, key=lambda x: [get_el_sp(x).iupac_ordering, x])
reduced_form = []
for s in syms:
normamt = sym_amt[s] * 1.0 / factor
reduced_form.append(s)
reduced_form.append(formula_double_format(normamt))
reduced_form = "".join(reduced_form + polyanion) # type: ignore
return reduced_form, factor # type: ignore
class ChemicalPotential(dict, MSONable):
"""
Class to represent set of chemical potentials. Can be:
multiplied/divided by a Number
multiplied by a Composition (returns an energy)
added/subtracted with other ChemicalPotentials.
"""
def __init__(self, *args, **kwargs):
"""
Args:
*args, **kwargs: any valid dict init arguments
"""
d = dict(*args, **kwargs)
super().__init__((get_el_sp(k), v) for k, v in d.items())
if len(d) != len(self):
raise ValueError("Duplicate potential specified")
def __mul__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v * other for k, v in self.items()})
raise NotImplementedError()
__rmul__ = __mul__
def __truediv__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v / other for k, v in self.items()})
raise NotImplementedError()
__div__ = __truediv__
def __sub__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) - other.get(e, 0) for e in els})
raise NotImplementedError()
def __add__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) + other.get(e, 0) for e in els})
raise NotImplementedError()
def get_energy(self, composition: Composition, strict: bool = True) -> float:
"""
Calculates the energy of a composition.
Args:
composition (Composition): input composition
strict (bool): Whether all potentials must be specified
"""
if strict and set(composition.keys()) > set(self.keys()):
s = set(composition.keys()) - set(self.keys())
raise ValueError("Potentials not specified for {}".format(s))
return sum(self.get(k, 0) * v for k, v in composition.items())
def __repr__(self):
return "ChemPots: " + super().__repr__()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
davidwaroquiers/pymatgen
|
pymatgen/core/composition.py
|
Python
|
mit
| 49,735
|
[
"pymatgen"
] |
5872998e537fa2b96ff6a7592ed19970ee1b986a09c8046eb0af67722e06a8cc
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""Script for updating tensorflow/tools/compatibility/renames_v2.py.
To update renames_v2.py, run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
pyformat --in_place third_party/tensorflow/tools/compatibility/renames_v2.py
"""
# pylint: enable=line-too-long
import sys
import six
import tensorflow as tf
from tensorflow import python as tf_python # pylint: disable=unused-import
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import app
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import all_renames_v2
# This import is needed so that TensorFlow python modules are in sys.modules.
_OUTPUT_FILE_PATH = 'third_party/tensorflow/tools/compatibility/renames_v2.py'
_FILE_HEADER = """# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
\"\"\"List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
pyformat --in_place third_party/tensorflow/tools/compatibility/renames_v2.py
This file should be updated whenever endpoints are deprecated.
\"\"\"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""
def get_canonical_name(v2_names, v1_name):
if v2_names:
return v2_names[0]
return 'compat.v1.%s' % v1_name
def get_all_v2_names():
"""Get a set of function/class names available in TensorFlow 2.0."""
v2_names = set() # All op names in TensorFlow 2.0
def visit(unused_path, unused_parent, children):
"""Visitor that collects TF 2.0 names."""
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v2 = tf_export.get_v2_names(attr)
for name in api_names_v2:
v2_names.add(name)
visitor = public_api.PublicAPIVisitor(visit)
visitor.do_not_descend_map['tf'].append('contrib')
visitor.do_not_descend_map['tf.compat'] = ['v1']
traverse.traverse(tf.compat.v2, visitor)
return v2_names
def collect_constant_renames():
"""Looks for constants that need to be renamed in TF 2.0.
Returns:
Set of tuples of the form (current name, new name).
"""
renames = set()
for module in sys.modules.values():
constants_v1_list = tf_export.get_v1_constants(module)
constants_v2_list = tf_export.get_v2_constants(module)
# _tf_api_constants attribute contains a list of tuples:
# (api_names_list, constant_name)
# We want to find API names that are in V1 but not in V2 for the same
# constant_names.
# First, we convert constants_v1_list and constants_v2_list to
# dictionaries for easier lookup.
constants_v1 = {constant_name: api_names
for api_names, constant_name in constants_v1_list}
constants_v2 = {constant_name: api_names
for api_names, constant_name in constants_v2_list}
# Second, we look for names that are in V1 but not in V2.
for constant_name, api_names_v1 in constants_v1.items():
api_names_v2 = constants_v2[constant_name]
for name in api_names_v1:
if name not in api_names_v2:
renames.add((name, get_canonical_name(api_names_v2, name)))
return renames
def collect_function_renames():
"""Looks for functions/classes that need to be renamed in TF 2.0.
Returns:
Set of tuples of the form (current name, new name).
"""
# Set of rename lines to write to output file in the form:
# 'tf.deprecated_name': 'tf.canonical_name'
renames = set()
def visit(unused_path, unused_parent, children):
"""Visitor that collects rename strings to add to rename_line_set."""
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = tf_export.get_v1_names(attr)
api_names_v2 = tf_export.get_v2_names(attr)
deprecated_api_names = set(api_names_v1) - set(api_names_v2)
for name in deprecated_api_names:
renames.add((name, get_canonical_name(api_names_v2, name)))
visitor = public_api.PublicAPIVisitor(visit)
visitor.do_not_descend_map['tf'].append('contrib')
visitor.do_not_descend_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
# It is possible that a different function is exported with the
# same name. For e.g. when creating a different function to
# rename arguments. Exclude it from renames in this case.
v2_names = get_all_v2_names()
renames = set((name, new_name) for name, new_name in renames
if name not in v2_names)
return renames
def get_rename_line(name, canonical_name):
return ' \'tf.%s\': \'tf.%s\'' % (name, canonical_name)
def update_renames_v2(output_file_path):
"""Writes a Python dictionary mapping deprecated to canonical API names.
Args:
output_file_path: File path to write output to. Any existing contents
would be replaced.
"""
function_renames = collect_function_renames()
constant_renames = collect_constant_renames()
all_renames = function_renames.union(constant_renames)
manual_renames = set(
all_renames_v2.manual_symbol_renames.keys())
# List of rename lines to write to output file in the form:
# 'tf.deprecated_name': 'tf.canonical_name'
rename_lines = [
get_rename_line(name, canonical_name)
for name, canonical_name in all_renames
if 'tf.' + six.ensure_str(name) not in manual_renames
]
renames_file_text = '%srenames = {\n%s\n}\n' % (
_FILE_HEADER, ',\n'.join(sorted(rename_lines)))
file_io.write_string_to_file(output_file_path, renames_file_text)
def main(unused_argv):
update_renames_v2(_OUTPUT_FILE_PATH)
if __name__ == '__main__':
app.run(main=main)
|
karllessard/tensorflow
|
tensorflow/tools/compatibility/update/generate_v2_renames_map.py
|
Python
|
apache-2.0
| 7,410
|
[
"VisIt"
] |
0579eb53e37f50995f5b6182e9924a7987f3bf68ce5ada20c933d4c383e8737a
|
#! /usr/bin/env python
"""Unit tests for landlab.io.netcdf module."""
import numpy as np
import pytest
import xarray as xr
from numpy.testing import assert_array_equal
from landlab import HexModelGrid, RasterModelGrid
from landlab.io.netcdf import from_netcdf, to_netcdf
def test_netcdf_write_int64(tmpdir, format):
grid = RasterModelGrid((4, 3))
grid.add_field("topographic__elevation", np.arange(12, dtype=np.int64), at="node")
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
actual = xr.open_dataset("test.nc")
values = actual["at_node:topographic__elevation"]
assert_array_equal(values, grid.at_node["topographic__elevation"])
if format == "NETCDF4":
assert values.dtype == "int64"
else:
assert values.dtype == "int32"
def test_netcdf_write_uint8(tmpdir, format):
grid = RasterModelGrid((4, 3))
grid.add_field("topographic__elevation", np.arange(12, dtype=np.uint8), at="node")
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
actual = xr.open_dataset("test.nc")["at_node:topographic__elevation"]
assert_array_equal(actual, grid.at_node["topographic__elevation"])
assert actual.dtype == np.uint8 if format == "NETCDF4" else np.int8
@pytest.mark.parametrize("dtype", ("int32", "float32", "float64"))
def test_netcdf_write_dtype(tmpdir, format, dtype):
"""Test write_netcdf with a grid that has an uint8 field."""
grid = RasterModelGrid((4, 3))
grid.add_field("topographic__elevation", np.arange(12, dtype=dtype), at="node")
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
actual = xr.open_dataset("test.nc")["at_node:topographic__elevation"]
assert_array_equal(actual, grid.at_node["topographic__elevation"])
assert actual.dtype == dtype
def test_at_keyword(tmpdir, at):
grid = RasterModelGrid((4, 3))
name = "topographic__elevation"
for src_at in {"node", "link", "patch", "corner", "face", "cell"}:
grid.add_field(name, grid.ones(at=src_at) * 10.0, at=src_at)
include = "at_{0}:*".format(at)
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format="NETCDF4", include=include)
with xr.open_dataset("test.nc") as actual:
actual_fields = set(
[name for name in actual.variables if name.startswith("at_")]
)
nc_name = "at_{0}:{1}".format(at, name)
assert actual_fields == set([nc_name])
assert_array_equal(actual[nc_name], getattr(grid, "at_" + at)[name])
def test_raster_model_grid(tmpdir, format):
grid = RasterModelGrid((4, 3), xy_spacing=(2, 5), xy_of_lower_left=(-2.0, 10.0))
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
actual = from_netcdf("test.nc")
assert (actual.dx, actual.dy) == (grid.dx, grid.dy)
assert actual.xy_of_lower_left == grid.xy_of_lower_left
@pytest.mark.parametrize("orientation", ("horizontal", "vertical"))
@pytest.mark.parametrize("node_layout", ("rect", "hex"))
def test_hex_model_grid(tmpdir, format, orientation, node_layout):
grid = HexModelGrid(
shape=(4, 5),
spacing=2.0,
xy_of_lower_left=(-3, -5),
orientation=orientation,
node_layout=node_layout,
)
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
actual = from_netcdf("test.nc")
assert actual.spacing == grid.spacing
assert actual.xy_of_lower_left == grid.xy_of_lower_left
assert actual.orientation == grid.orientation
assert actual.node_layout == grid.node_layout
def test_layers(tmpdir, format):
grid = RasterModelGrid((3, 4))
grid.event_layers.add(10.0, water_depth=[1.0, 2.0])
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", include="at_layer*", format=format)
actual = xr.open_dataset("test.nc")
actual_fields = set(
[name for name in actual.variables if name.startswith("at_")]
)
assert actual_fields == set(["at_layer:water_depth"])
@pytest.mark.parametrize("mode", ("w", "a"))
def test_with_and_without_time(tmpdir, format, mode):
grid = RasterModelGrid((3, 4))
grid.add_full("elevation", 1.0, at="node")
with tmpdir.as_cwd():
to_netcdf(grid, "test-without-time.nc", format=format, mode=mode)
with xr.open_dataset("test-without-time.nc") as actual:
assert "time" not in actual.dims
assert "time" not in actual.variables
assert actual["at_node:elevation"].dims == ("node",)
to_netcdf(grid, "test-with-time.nc", format=format, time=10.0, mode=mode)
with xr.open_dataset("test-with-time.nc") as actual:
assert "time" in actual.dims
assert "time" in actual.variables
assert actual["time"] == [10.0]
assert actual["at_node:elevation"].dims == ("time", "node")
def test_append_with_new_field(tmpdir, format):
grid = RasterModelGrid((3, 4))
grid.add_full("elevation", 1.0, at="node")
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
grid.add_full("temperature", 2.0, at="node")
to_netcdf(grid, "test.nc", format=format, mode="a", time=10.0)
|
amandersillinois/landlab
|
tests/io/netcdf/test_to_netcdf.py
|
Python
|
mit
| 5,322
|
[
"NetCDF"
] |
a59a78e2a675c1a7bea5c50f54e9844bb70f11858f340f290f2db44ffed7854f
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS that utilize the
progress page.
"""
from contextlib import contextmanager
import ddt
from six.moves import range
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...pages.common.logout import LogoutPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.instructor_dashboard import InstructorDashboardPage, StudentSpecificAdmin
from ...pages.lms.problem import ProblemPage
from ...pages.lms.progress import ProgressPage
from ...pages.studio.overview import CourseOutlinePage as StudioCourseOutlinePage
from ...pages.studio.utils import type_in_codemirror
from ...pages.studio.xblock_editor import XBlockEditorView
from ..helpers import (
UniqueCourseTest,
auto_auth,
create_multiple_choice_problem,
create_multiple_choice_xml,
get_modal_alert
)
class ProgressPageBaseTest(UniqueCourseTest):
"""
Provides utility methods for tests retrieving
scores from the progress page.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
SECTION_NAME = 'Test Section 1'
SUBSECTION_NAME = 'Test Subsection 1'
UNIT_NAME = 'Test Unit 1'
PROBLEM_NAME = 'Test Problem 1'
PROBLEM_NAME_2 = 'Test Problem 2'
def setUp(self):
super(ProgressPageBaseTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.problem_page = ProblemPage(self.browser)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.logout_page = LogoutPage(self.browser)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with problems
self.course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
self.problem1 = create_multiple_choice_problem(self.PROBLEM_NAME)
self.problem2 = create_multiple_choice_problem(self.PROBLEM_NAME_2)
self.course_fix.add_children(
XBlockFixtureDesc('chapter', self.SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', self.SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', self.UNIT_NAME).add_children(self.problem1, self.problem2)
)
),
XBlockFixtureDesc('chapter', "Lab Section").add_children(
XBlockFixtureDesc('sequential', "Lab Subsection").add_children(
XBlockFixtureDesc('vertical', "Lab Unit").add_children(
create_multiple_choice_problem("Lab Exercise")
)
)
)
).install()
# Auto-auth register for the course.
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
def _answer_problem_correctly(self):
"""
Submit a correct answer to the problem.
"""
self._answer_problem(choice=2)
def _answer_problem(self, choice):
"""
Submit the given choice for the problem.
"""
self.courseware_page.go_to_sequential_position(1)
self.problem_page.click_choice('choice_choice_{}'.format(choice))
self.problem_page.click_submit()
def _get_section_score(self):
"""
Return a list of scores from the progress page.
"""
self.progress_page.visit()
return self.progress_page.section_score(self.SECTION_NAME, self.SUBSECTION_NAME)
def _get_problem_scores(self):
"""
Return a list of scores from the progress page.
"""
self.progress_page.visit()
return self.progress_page.scores(self.SECTION_NAME, self.SUBSECTION_NAME)
@contextmanager
def _logged_in_session(self, staff=False):
"""
Ensure that the user is logged in and out appropriately at the beginning
and end of the current test. But if there's an error, don't log out
before capturing a screenshot.
"""
self.logout_page.visit()
if staff:
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
else:
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
yield
self.logout_page.visit()
@ddt.ddt
class PersistentGradesTest(ProgressPageBaseTest):
"""
Test that grades for completed assessments are persisted
when various edits are made.
"""
shard = 22
def setUp(self):
super(PersistentGradesTest, self).setUp()
self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
def _change_subsection_structure(self):
"""
Adds a unit to the subsection, which
should not affect a persisted subsection grade.
"""
self.studio_course_outline.visit()
subsection = self.studio_course_outline.section(self.SECTION_NAME).subsection(self.SUBSECTION_NAME)
subsection.expand_subsection()
subsection.add_unit()
self.studio_course_outline.wait_for_ajax()
subsection.publish()
def _set_staff_lock_on_subsection(self, locked):
"""
Sets staff lock for a subsection, which should hide the
subsection score from students on the progress page.
"""
self.studio_course_outline.visit()
subsection = self.studio_course_outline.section_at(0).subsection_at(0)
subsection.set_staff_lock(locked)
self.assertEqual(subsection.has_staff_lock_warning, locked)
def _get_problem_in_studio(self):
"""
Returns the editable problem component in studio,
along with its container unit, so any changes can
be published.
"""
self.studio_course_outline.visit()
self.studio_course_outline.section_at(0).subsection_at(0).expand_subsection()
unit = self.studio_course_outline.section_at(0).subsection_at(0).unit(self.UNIT_NAME).go_to()
component = unit.xblocks[1]
return unit, component
def _change_weight_for_problem(self):
"""
Changes the weight of the problem, which should not affect
persisted grades.
"""
unit, component = self._get_problem_in_studio()
component.edit()
component_editor = XBlockEditorView(self.browser, component.locator)
component_editor.set_field_value_and_save('Problem Weight', 5)
unit.publish()
def _change_correct_answer_for_problem(self, new_correct_choice=1):
"""
Changes the correct answer of the problem.
"""
unit, component = self._get_problem_in_studio()
modal = component.edit()
modified_content = create_multiple_choice_xml(correct_choice=new_correct_choice)
type_in_codemirror(self, 0, modified_content)
modal.q(css='.action-save').click()
unit.publish()
def _student_admin_action_for_problem(self, action_button, has_cancellable_alert=False):
"""
As staff, clicks the "delete student state" button,
deleting the student user's state for the problem.
"""
self.instructor_dashboard_page.visit()
student_admin_section = self.instructor_dashboard_page.select_student_admin(StudentSpecificAdmin)
student_admin_section.set_student_email_or_username(self.USERNAME)
student_admin_section.set_problem_location(self.problem1.locator)
getattr(student_admin_section, action_button).click()
if has_cancellable_alert:
alert = get_modal_alert(student_admin_section.browser)
alert.accept()
alert = get_modal_alert(student_admin_section.browser)
alert.dismiss()
return student_admin_section
def test_progress_page_shows_scored_problems(self):
"""
Checks the progress page before and after answering
the course's first problem correctly.
"""
with self._logged_in_session():
self.assertEqual(self._get_problem_scores(), [(0, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (0, 2))
self.courseware_page.visit()
self._answer_problem_correctly()
self.assertEqual(self._get_problem_scores(), [(1, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (1, 2))
@ddt.data(
_change_subsection_structure,
_change_weight_for_problem
)
def test_content_changes_do_not_change_score(self, edit):
with self._logged_in_session():
self.courseware_page.visit()
self._answer_problem_correctly()
with self._logged_in_session(staff=True):
edit(self)
with self._logged_in_session():
self.assertEqual(self._get_problem_scores(), [(1, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (1, 2))
def test_visibility_change_affects_score(self):
with self._logged_in_session():
self.courseware_page.visit()
self._answer_problem_correctly()
with self._logged_in_session(staff=True):
self._set_staff_lock_on_subsection(True)
with self._logged_in_session():
self.assertEqual(self._get_problem_scores(), None)
self.assertEqual(self._get_section_score(), None)
with self._logged_in_session(staff=True):
self._set_staff_lock_on_subsection(False)
with self._logged_in_session():
self.assertEqual(self._get_problem_scores(), [(1, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (1, 2))
def test_delete_student_state_affects_score(self):
with self._logged_in_session():
self.courseware_page.visit()
self._answer_problem_correctly()
with self._logged_in_session(staff=True):
self._student_admin_action_for_problem('delete_state_button', has_cancellable_alert=True)
with self._logged_in_session():
self.assertEqual(self._get_problem_scores(), [(0, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (0, 2))
class SubsectionGradingPolicyBase(ProgressPageBaseTest):
"""
Base class for testing a subsection and its impact to
the progress page
"""
def setUp(self):
super(SubsectionGradingPolicyBase, self).setUp()
self._set_policy_for_subsection("Homework", 0)
self._set_policy_for_subsection("Lab", 1)
def _set_policy_for_subsection(self, policy, section=0):
"""
Set the grading policy for the first subsection in the specified section.
If a section index is not provided, 0 is assumed.
"""
with self._logged_in_session(staff=True):
self.studio_course_outline.visit()
modal = self.studio_course_outline.section_at(section).subsection_at(0).edit()
modal.policy = policy
modal.save()
def _check_scores_and_page_text(self, problem_scores, section_score, text):
"""
Asserts that the given problem and section scores, and text,
appear on the progress page.
"""
self.assertEqual(self._get_problem_scores(), problem_scores)
self.assertEqual(self._get_section_score(), section_score)
self.assertTrue(self.progress_page.text_on_page(text))
def _check_tick_text(self, index, sr_text, label, label_hidden=True):
"""
Check the label and sr text for a horizontal (X-axis) tick.
"""
self.assertEqual(sr_text, self.progress_page.x_tick_sr_text(index))
self.assertEqual([label, 'true' if label_hidden else None], self.progress_page.x_tick_label(index))
class SubsectionGradingPolicyA11yTest(SubsectionGradingPolicyBase):
"""
Class to test the accessibility of subsection grading
"""
a11y = True
def test_axis_a11y(self):
"""
Tests that the progress chart axes have appropriate a11y (screenreader) markup.
"""
with self._logged_in_session():
self.courseware_page.visit()
# Answer the first HW problem (the unit contains 2 problems, only one will be answered correctly)
self._answer_problem_correctly()
self.courseware_page.click_next_button_on_top()
# Answer the first Lab problem (unit only contains a single problem)
self._answer_problem_correctly()
self.progress_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
self.progress_page.visit()
# Verify the basic a11y of the progress page
self.progress_page.a11y_audit.check_for_accessibility_errors()
# Verify that y-Axis labels are aria-hidden
self.assertEqual(['100%', 'true'], self.progress_page.y_tick_label(0))
self.assertEqual(['0%', 'true'], self.progress_page.y_tick_label(1))
self.assertEqual(['Pass 50%', 'true'], self.progress_page.y_tick_label(2))
# Verify x-Axis labels and sr-text
self._check_tick_text(0, [u'Homework 1 - Test Subsection 1 - 50% (1/2)'], u'HW 01')
# Homeworks 2-10 are checked in the for loop below.
self._check_tick_text(
10,
[u'Homework 11 Unreleased - 0% (?/?)', u'The lowest 2 Homework scores are dropped.'],
u'HW 11'
)
self._check_tick_text(
11,
[u'Homework 12 Unreleased - 0% (?/?)', u'The lowest 2 Homework scores are dropped.'],
u'HW 12'
)
self._check_tick_text(12, [u'Homework Average = 5%'], u'HW Avg')
self._check_tick_text(13, [u'Lab 1 - Lab Subsection - 100% (1/1)'], u'Lab 01')
# Labs 2-10 are checked in the for loop below.
self._check_tick_text(
23,
[u'Lab 11 Unreleased - 0% (?/?)', u'The lowest 2 Lab scores are dropped.'],
u'Lab 11'
)
self._check_tick_text(
24,
[u'Lab 12 Unreleased - 0% (?/?)', u'The lowest 2 Lab scores are dropped.'],
u'Lab 12'
)
self._check_tick_text(25, [u'Lab Average = 10%'], u'Lab Avg')
self._check_tick_text(26, [u'Midterm Exam = 0%'], u'Midterm')
self._check_tick_text(27, [u'Final Exam = 0%'], u'Final')
self._check_tick_text(
28,
[u'Homework = 0.75% of a possible 15.00%', u'Lab = 1.50% of a possible 15.00%'],
u'Total',
False # The label "Total" should NOT be aria-hidden
)
# The grading policy has 12 Homeworks and 12 Labs. Most of them are unpublished,
# with no additional information.
for i in range(1, 10):
self._check_tick_text(
i,
[u'Homework {index} Unreleased - 0% (?/?)'.format(index=i + 1)],
u'HW 0{index}'.format(index=i + 1) if i < 9 else u'HW {index}'.format(index=i + 1)
)
self._check_tick_text(
i + 13,
[u'Lab {index} Unreleased - 0% (?/?)'.format(index=i + 1)],
u'Lab 0{index}'.format(index=i + 1) if i < 9 else u'Lab {index}'.format(index=i + 1)
)
# Verify the overall score. The first element in the array is the sr-only text, and the
# second is the total text (including the sr-only text).
self.assertEqual(['Overall Score', 'Overall Score\n2%'], self.progress_page.graph_overall_score())
class ProgressPageA11yTest(ProgressPageBaseTest):
"""
Class to test the accessibility of the progress page.
"""
a11y = True
def test_progress_page_a11y(self):
"""
Test the accessibility of the progress page.
"""
self.progress_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
self.progress_page.visit()
self.progress_page.a11y_audit.check_for_accessibility_errors()
|
appsembler/edx-platform
|
common/test/acceptance/tests/lms/test_progress_page.py
|
Python
|
agpl-3.0
| 16,675
|
[
"VisIt"
] |
a83a3b22870b84c4748f47a47a2f2eecafa78b5204f1d41d9c4d84004a84b35a
|
from __future__ import unicode_literals, absolute_import
import logging
from functools import lru_cache
from catpy.exceptions import NoMatchingNamesException, MultipleMatchingNamesException
from .base import CatmaidClientApplication
logger = logging.getLogger(__name__)
def is_name():
pass
def is_id():
pass
def name_to_id(fn):
def wrapper(instance, id_or_name, *args, **kwargs):
try:
return int(id_or_name)
except ValueError:
if isinstance(id_or_name, str):
return fn(instance, id_or_name, *args, **kwargs)
raise TypeError(f"Argument was neither integer ID nor string name: {type(id_or_name)}({id_or_name})")
return wrapper
def id_to_name(fn):
def wrapper(instance, id_or_name, *args, **kwargs):
try:
int_id = int(id_or_name)
except ValueError:
if isinstance(id_or_name, str):
return id_or_name
else:
raise TypeError(f"Argument was neither integer ID nor string name: {type(id_or_name)}({id_or_name})")
else:
return fn(instance, int_id, *args, **kwargs)
return wrapper
class NameIdMapping:
def __init__(self, name_id_pairs):
self.name_to_id = dict()
self.id_to_name = dict()
counter = 0
for name, id_ in name_id_pairs:
id_ = int(id_)
self.name_to_id[name] = id_
self.id_to_name[id_] = name
counter += 1
if counter != len(self.name_to_id) or counter != len(self.id_to_name):
raise ValueError("Non-unique names or IDs; cannot make 2-way mapping")
class NameResolver(CatmaidClientApplication):
"""Catmaid client application which looks up integer database IDs for string names for various objects.
For convenience, lookup methods short-circuit if given an int (i.e. you can transparently use either
the ID or the name of an object).
HTTP responses are cached where possible, so there may be a performance benefit to sharing NameResolver instances.
Furthermore, subsequent lookups of IDs of the same object type (e.g. stack, user)
should be much faster than the first.
Lookup methods ensure that one object matches the given name/title for the given project,
raising a NoMatchingNamesException if there are zero matches,
and a MultipleMatchingNamesException if there are more than one,
both of which subclass NameResolverException, which subclasses ValueError.
"""
def _ensure_one(self, match_set, name, obj):
if len(match_set) == 0:
raise NoMatchingNamesException(
"Zero {} objects found with name {} in project {}".format(
obj, repr(name), self.project_id
)
)
elif len(match_set) == 1:
return match_set.pop()
else:
raise MultipleMatchingNamesException(
"Multiple {} objects ({}) found with name {} in project {}".format(
obj,
", ".join(str(i) for i in sorted(match_set)),
name,
self.project_id,
)
)
@lru_cache(1)
def _stacks(self):
logger.debug("Populating _stacks cache")
return self.get((self.project_id, "stacks"))
@name_to_id
def get_stack_id(self, title):
"""Get the ID of the stack with the given title.
Parameters
----------
title : str or int
Stack title
Returns
-------
int
"""
matching_ids = set()
for stack in self._stacks():
if stack["title"] == title:
matching_ids.add(stack["id"])
return self._ensure_one(matching_ids, title, "stack")
@lru_cache(1)
def _user_list(self):
logger.debug("Populating _user_list cache")
return self.get("user-list")
@name_to_id
def get_user_id(self, name):
"""Get the ID of the user with the given login or full name
Parameters
----------
name : str or int
Returns
-------
int
"""
matching_ids = set()
for user in self._user_list():
if name in [user["login"], user["full_name"]]:
matching_ids.add(user["id"])
return self._ensure_one(matching_ids, name, "user")
def get_neuron_names(self, *skeleton_ids):
"""Get a dict of skeleton IDs to neuron names.
Parameters
----------
skeleton_ids
Returns
-------
dict of int to str
"""
# todo: lru cache
return self.post(
(self.project_id, "skeleton", "neuronnames"), {"skids": skeleton_ids}
)
@id_to_name
def get_neuron_name(self, skeleton_id):
"""Get the neuron name associated with the given skeleton ID.
Utilises an LRU cache and can handle being given the name (just returns the name),
so useful for ensuring that a given argument resolves to a name either way.
Parameters
----------
skeleton_id
Returns
-------
str
"""
return self.get((self.project_id, "skeleton", skeleton_id, "neuronname"))[
"neuronname"
]
@lru_cache(1)
def _list_annotations(self):
logger.debug("Populating _list_annotations cache")
response = self.get((self.project_id, "annotations"))
return NameIdMapping(
(obj["name"], obj["id"]) for obj in response["annotations"]
)
@name_to_id
def get_annotation_id(self, annotation_name):
return self._list_annotations().name_to_id[annotation_name]
@id_to_name
def get_annotation_name(self, annotation_id):
return self._list_annotations().id_to_name[int(annotation_id)]
@lru_cache(1)
def _list_volumes(self):
logger.debug("Populating _list_volumes cache")
response = self.get((self.project_id, "volumes"))
return NameIdMapping((name, id_) for id_, name, *_ in response["data"])
@id_to_name
def get_volume_name(self, volume_id):
return self._list_volumes().id_to_name[int(volume_id)]
@name_to_id
def get_volume_id(self, volume_name):
return self._list_volumes().name_to_id[volume_name]
def clear_cache(self, *names):
if not names:
names = [k for k, v in self.__dict__.items() if hasattr(v, "cache_clear")]
for name in names:
getattr(self, name).cache_clear()
|
catmaid/catpy
|
catpy/applications/nameresolver.py
|
Python
|
mit
| 6,615
|
[
"NEURON"
] |
6c805f019e43e8c71dfa7deec3eacc1f86fead2778517f11b28319057053f15c
|
import os
import fnmatch
import shutil
from collections import defaultdict
import glob
import codecs
class GlobDirectoryWalker:
# a forward iterator that traverses a directory tree
def __init__(self, directory, pattern="*"):
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__(self, index):
while True:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
if os.path.isdir(self.directory):
self.files = os.listdir(self.directory)
else:
self.files = []
self.index = 0
else:
# got a filename
fullname = os.path.join(self.directory, file)
if os.path.isdir(fullname) and not os.path.islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
def main(rootpath, destdir):
if not os.path.exists(destdir):
shutil.os.makedirs(destdir)
examplesfnames = [fname for fname in GlobDirectoryWalker(rootpath, '*.py')]
additional_files = [
fname
for fname in GlobDirectoryWalker(rootpath, '*.[!py]*')
if not os.path.basename(fname) == '.gitignore'
]
print('Documenting %d examples' % len(examplesfnames))
examplespaths = []
examplesbasenames = []
relativepaths = []
outnames = []
for f in examplesfnames:
path, file = os.path.split(f)
relpath = os.path.relpath(path, rootpath)
if relpath == '.':
relpath = ''
path = os.path.normpath(path)
filebase, ext = os.path.splitext(file)
exname = filebase
if relpath:
exname = relpath.replace('/', '.').replace('\\', '.') + '.' + exname
examplespaths.append(path)
examplesbasenames.append(filebase)
relativepaths.append(relpath)
outnames.append(exname)
# We assume all files are encoded as UTF-8
examplescode = []
for fname in examplesfnames:
with codecs.open(fname, 'rU', encoding='utf-8') as f:
examplescode.append(f.read())
examplesdocs = []
examplesafterdoccode = []
examplesdocumentablenames = []
for code in examplescode:
codesplit = code.split('\n')
comment_lines = 0
for line in codesplit:
if line.startswith('#') or len(line) == 0:
comment_lines += 1
else:
break
codesplit = codesplit[comment_lines:]
readingdoc = False
doc = []
afterdoccode = ''
for i in range(len(codesplit)):
stripped = codesplit[i].strip()
if stripped[:3] == '"""' or stripped[:3] == "'''":
if not readingdoc:
readingdoc = True
else:
afterdoccode = '\n'.join(codesplit[i + 1 :])
break
elif readingdoc:
doc.append(codesplit[i])
else: # No doc
afterdoccode = '\n'.join(codesplit[i:])
break
examplesdocs.append('\n'.join(doc))
examplesafterdoccode.append(afterdoccode)
categories = defaultdict(list)
examples = zip(
examplesfnames,
examplespaths,
examplesbasenames,
examplescode,
examplesdocs,
examplesafterdoccode,
relativepaths,
outnames,
)
# Get the path relative to the examples director (not relative to the
# directory where this file is installed
if 'BRIAN2_DOCS_EXAMPLE_DIR' in os.environ:
rootdir = os.environ['BRIAN2_DOCS_EXAMPLE_DIR']
else:
rootdir, _ = os.path.split(__file__)
rootdir = os.path.normpath(os.path.join(rootdir, '../../examples'))
eximgpath = os.path.abspath(
os.path.join(rootdir, '../docs_sphinx/resources/examples_images')
)
print('Searching for example images in directory', eximgpath)
for fname, path, basename, code, docs, afterdoccode, relpath, exname in examples:
categories[relpath].append((exname, basename))
title = 'Example: ' + basename
output = '.. currentmodule:: brian2\n\n'
output += '.. ' + basename + ':\n\n'
output += title + '\n' + '=' * len(title) + '\n\n'
note = f'''
.. only:: html
.. |launchbinder| image:: http://mybinder.org/badge.svg
.. _launchbinder: https://mybinder.org/v2/gh/brian-team/brian2-binder/master?filepath=examples/{exname.replace('.', '/')}.ipynb
.. note::
You can launch an interactive, editable version of this
example without installing any local files
using the Binder service (although note that at some times this
may be slow or fail to open): |launchbinder|_
'''
output += note + '\n\n'
output += docs + '\n\n::\n\n'
output += '\n'.join([' ' + line for line in afterdoccode.split('\n')])
output += '\n\n'
eximgpattern = os.path.join(eximgpath, '%s.*.png' % exname)
images = glob.glob(eximgpattern)
for image in sorted(images):
_, image = os.path.split(image)
print('Found example image file', image)
output += '.. image:: ../resources/examples_images/%s\n\n' % image
with codecs.open(os.path.join(destdir, exname + '.rst'), 'w', 'utf-8') as f:
f.write(output)
category_additional_files = defaultdict(list)
for fname in additional_files:
path, file = os.path.split(fname)
relpath = os.path.relpath(path, rootpath)
if relpath == '.':
relpath = ''
full_name = relpath.replace('/', '.').replace('\\', '.') + '.' + file + '.rst'
category_additional_files[relpath].append((file, full_name))
with codecs.open(fname, 'rU', encoding='utf-8') as f:
print(fname)
content = f.read()
output = file + '\n' + '=' * len(file) + '\n\n'
output += '.. code:: none\n\n'
content_lines = ['\t' + l for l in content.split('\n')]
output += '\n'.join(content_lines)
output += '\n\n'
with codecs.open(os.path.join(destdir, full_name), 'w', 'utf-8') as f:
f.write(output)
mainpage_text = 'Examples\n'
mainpage_text += '========\n\n'
def insert_category(category, mainpage_text):
if category:
label = category.lower().replace(' ', '-').replace('/', '.')
mainpage_text += f"\n.. _{label}:\n\n"
mainpage_text += '\n' + category + '\n' + '-' * len(category) + '\n\n'
mainpage_text += '.. toctree::\n'
mainpage_text += ' :maxdepth: 1\n\n'
curpath = ''
for exname, basename in sorted(categories[category]):
mainpage_text += f" {basename} <{exname}>\n"
for fname, full_name in sorted(category_additional_files[category]):
mainpage_text += f" {fname} <{full_name}>\n"
return mainpage_text
mainpage_text = insert_category('', mainpage_text)
for category in sorted(categories.keys()):
if category:
mainpage_text = insert_category(category, mainpage_text)
with open(os.path.join(destdir, 'index.rst'), 'w') as f:
f.write(mainpage_text)
if __name__ == '__main__':
main('../../examples', '../../docs_sphinx/examples')
|
brian-team/brian2cuda
|
brian2cuda/sphinxext/generate_examples.py
|
Python
|
gpl-2.0
| 7,704
|
[
"Brian"
] |
c4b8f7ff5ea2b100878d885f4fa70b7041f74a86b15efd165fdf5e9ee4decefd
|
#!/usr/bin/env python
# −*− coding: UTF−8 −*−
class Node(object):
def __init__(self):
self.children = []
def add(self, node):
self.children.append(node)
def do(self):
print 'Node'
def accept(self, visitor):
visitor.visit(self)
class NodeA(Node):
def do(self):
print "NodeA"
class NodeB(Node):
def do(self):
print "NodeB"
class Visitor():
def visit(self, node):
node.do()
for child in node.children:
child.accept(self)
def test():
root = Node()
node_a = NodeA()
node_b = NodeB()
root.add(node_a)
root.add(node_b)
node_a.add(node_b)
visitor = Visitor()
visitor.visit(root)
if __name__ == '__main__':
test()
|
pgularski/snippets
|
python/design_patterns/visitor.py
|
Python
|
mit
| 772
|
[
"VisIt"
] |
1967a0f0695dfdea0da233b691a75f0e7871bb1156c23453d8b9fdc154b461c9
|
#
# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg)
# 2018-2020 Jan Griesser (U. Freiburg)
# 2015 Adrien Gola (KIT)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from .calculator import InversePowerLawPotential, Polydisperse
|
libAtoms/matscipy
|
matscipy/calculators/polydisperse/__init__.py
|
Python
|
lgpl-2.1
| 998
|
[
"Matscipy"
] |
bda39ec69081ebd77174121d55c2c21f4ac14ef6dec88c5985208ec949658608
|
"""
@package linearFilters
Linear filters for images
"""
# Рекурсивный фильтр скользящего среднего https://habrahabr.ru/post/325590/
import threading
import multiprocessing
import math
import numpy
from PIL import Image
from PyQt5.QtCore import QCoreApplication
from . import apertureService
def meanFilter(colorModelTag, currentImageChannelIndex, pixels, imgSize, filterSize):
""" mean filter || homogeneous smoothing || box filter"""
apertures = apertureService.getApertureMatrixGenerator(imgSize, filterSize)
for x, y, aperture in apertures:
QCoreApplication.processEvents()
rSum = gSum = bSum = kSum = 0
for apertureLine in aperture:
for apertureCoordinate in apertureLine:
pixelPosX, pixelPosY = apertureCoordinate
red, green, blue = pixels[pixelPosX, pixelPosY]
rSum += red
gSum += green
bSum += blue
kSum += 1
if kSum <= 0: kSum = 1
rSum /= kSum
if rSum < 0: rSum = 0
if rSum > 255: rSum = 255
gSum /= kSum
if gSum < 0: gSum = 0
if gSum > 255: gSum = 255
bSum /= kSum
if bSum < 0: bSum = 0
if bSum > 255: bSum = 255
aperturePosX = int(len(aperture)/2)
aperturePosY = int(len(aperture[aperturePosX])/2)
oldColors = pixels[aperture[aperturePosX][aperturePosY]]
if currentImageChannelIndex == 0:
pixels[aperture[aperturePosX][aperturePosY]] = (
int(rSum), int(gSum), int(bSum))
if currentImageChannelIndex == 1:
pixels[aperture[aperturePosX][aperturePosY]] = (
int(rSum), oldColors[1], oldColors[2])
if currentImageChannelIndex == 2:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], int(gSum), oldColors[2])
if currentImageChannelIndex == 3:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], oldColors[1], int(bSum))
def medianFilter(colorModelTag, currentImageChannelIndex, pixels, imgSize, filterSize):
apertures = apertureService.getApertureMatrixGenerator(imgSize, filterSize)
for x, y, aperture in apertures:
QCoreApplication.processEvents()
redList = []
greenList = []
blueList = []
for apertureLine in aperture:
for apertureCoordinate in apertureLine:
pixelPosX, pixelPosY = apertureCoordinate
red, green, blue = pixels[pixelPosX, pixelPosY]
redList.append(red)
greenList.append(green)
blueList.append(blue)
redList.sort()
greenList.sort()
blueList.sort()
apertureCenter = int(len(redList)/2)
rValue = redList[apertureCenter]
gValue = greenList[apertureCenter]
bValue = blueList[apertureCenter]
aperturePosX = int(len(aperture)/2)
aperturePosY = int(len(aperture[aperturePosX])/2)
oldColors = pixels[aperture[aperturePosX][aperturePosY]]
if currentImageChannelIndex == 0:
pixels[aperture[aperturePosX][aperturePosY]] = (
int(rValue), int(gValue), int(bValue))
if currentImageChannelIndex == 1:
pixels[aperture[aperturePosX][aperturePosY]] = (
int(rValue), oldColors[1], oldColors[2])
if currentImageChannelIndex == 2:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], int(gValue), oldColors[2])
if currentImageChannelIndex == 3:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], oldColors[1], int(bValue))
def gaussian(x, sigma):
return (1.0 / (2 * math.pi * (sigma ** 2))) \
* math.exp(- (x ** 2) / (2 * sigma ** 2))
def gaussianBlur(colorModelTag, currentImageChannelIndex, pixels, imgSize, filterSize):
""" Gaussian blur"""
apertures = apertureService.getApertureMatrixGenerator(imgSize, filterSize)
sigma = 0.5
for x, y, aperture in apertures:
QCoreApplication.processEvents()
rSum = gSum = bSum = kSum = 0
for i, apertureLine in enumerate(aperture):
for apertureCoordinate in apertureLine:
pixelPosX, pixelPosY = apertureCoordinate
red, green, blue = pixels[pixelPosX, pixelPosY]
# double kernelVal = blurArray[i][j];
kernelVal = gaussian(i, sigma)
rSum += red * kernelVal
gSum += green * kernelVal
bSum += blue * kernelVal
kSum += kernelVal
if kSum <= 0: kSum = 1
rSum /= kSum
if rSum < 0: rSum = 0
if rSum > 255: rSum = 255
gSum /= kSum
if gSum < 0: gSum = 0
if gSum > 255: gSum = 255
bSum /= kSum
if bSum < 0: bSum = 0
if bSum > 255: bSum = 255
aperturePosX = int(len(aperture)/2)
if len(aperture) != 0 and len(aperture[aperturePosX])/2 != 0:
aperturePosY = int(len(aperture[aperturePosX])/2)
oldColors = pixels[aperture[aperturePosX][aperturePosY]]
if currentImageChannelIndex == 0:
pixels[aperture[aperturePosX][aperturePosY]] = (
int(rSum), int(gSum), int(bSum))
if currentImageChannelIndex == 1:
pixels[aperture[aperturePosX][aperturePosY]] = (
int(rSum), oldColors[1], oldColors[2])
if currentImageChannelIndex == 2:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], int(gSum), oldColors[2])
if currentImageChannelIndex == 3:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], oldColors[1], int(bSum))
def distance(x, y, i, j):
return math.sqrt((x-i)**2 + (y-j)**2)
def bilateralFilter(colorModelTag, currentImageChannelIndex, pixels, imgSize,
filterSize, sigma_i, sigma_s):
""" Bilateral filter """
apertures = apertureService.getApertureMatrixGenerator(imgSize, filterSize)
# sigma_i = 12.5
# sigma_s = 16.5
for x, y, aperture in apertures:
QCoreApplication.processEvents()
filteredRed = WpRed = filteredGreen = WpGreen = filteredBlue = WpBlue = 0
for i, apertureLine in enumerate(aperture):
for apertureCoordinate in apertureLine:
pixelPosX, pixelPosY = apertureCoordinate
red, green, blue = pixels[pixelPosX, pixelPosY]
xyRed, xyGreen, xyBlue = pixels[x, y]
giRed = gaussian(red - xyRed, sigma_i)
gsRed = gaussian(distance(pixelPosX, pixelPosY, x, y), sigma_s)
wRed = giRed * gsRed
filteredRed += red * wRed
WpRed += wRed
giGreen = gaussian(green - xyGreen, sigma_i)
gsGreen = gaussian(distance(pixelPosX, pixelPosY, x, y), sigma_s)
wGreen = giGreen * gsGreen
filteredGreen += green * wGreen
WpGreen += wGreen
giBlue = gaussian(blue - xyBlue, sigma_i)
gsBlue = gaussian(distance(x, y, pixelPosX, pixelPosY), sigma_s)
wBlue = giBlue * gsBlue
filteredBlue += blue * wBlue
WpBlue += wBlue
filteredRed = int(round(filteredRed / WpRed))
filteredGreen = int(round(filteredGreen / WpGreen))
filteredBlue = int(round(filteredBlue / WpBlue))
aperturePosX = int(len(aperture)/2)
aperturePosY = int(len(aperture[aperturePosX])/2)
if len(aperture) != 0 and aperturePosY != 0:
oldColors = pixels[aperture[aperturePosX][aperturePosY]]
if currentImageChannelIndex == 0:
pixels[aperture[aperturePosX][aperturePosY]] = (
filteredRed, filteredGreen, filteredBlue)
if currentImageChannelIndex == 1:
pixels[aperture[aperturePosX][aperturePosY]] = (
filteredRed, oldColors[1], oldColors[2])
if currentImageChannelIndex == 2:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], filteredGreen, oldColors[2])
if currentImageChannelIndex == 3:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], oldColors[1], filteredBlue)
def laplacian(x, y, sigma):
return (-1/(math.pi * (sigma**4))) * \
(1 - (x**2 + y**2)/(2*(sigma**2))) * \
math.exp(-(x**2)/(2*(sigma**2)))
def laplacianBlur(colorModelTag, currentImageChannelIndex, pixels, imgSize,
filterSize, sigma):
""" Laplacian blur"""
# sigma = 2.4
apertures = apertureService.getApertureMatrixGenerator(imgSize, filterSize)
for x, y, aperture in apertures:
QCoreApplication.processEvents()
rSum = gSum = bSum = kSum = 0
for i, apertureLine in enumerate(aperture):
for j, apertureCoordinate in enumerate(apertureLine):
pixelPosX, pixelPosY = apertureCoordinate
red, green, blue = pixels[pixelPosX, pixelPosY]
# double kernelVal = blurArray[i][j];
kernelVal = laplacian(i, j, sigma)
# print(kernelVal)
rSum += red * kernelVal
gSum += green * kernelVal
bSum += blue * kernelVal
kSum += kernelVal
if kSum <= 0: kSum = 1
rSum /= kSum
if rSum < 0: rSum = 0
if rSum > 255: rSum = 255
gSum /= kSum
if gSum < 0: gSum = 0
if gSum > 255: gSum = 255
bSum /= kSum
if bSum < 0: bSum = 0
if bSum > 255: bSum = 255
aperturePosX = int(len(aperture)/2)
if len(aperture) != 0 and len(aperture[aperturePosX])/2 != 0:
aperturePosY = int(len(aperture[aperturePosX])/2)
oldColors = pixels[aperture[aperturePosX][aperturePosY]]
if currentImageChannelIndex == 0:
pixels[aperture[aperturePosX][aperturePosY]] = (
int(rSum), int(gSum), int(bSum))
if currentImageChannelIndex == 1:
pixels[aperture[aperturePosX][aperturePosY]] = (
int(rSum), oldColors[1], oldColors[2])
if currentImageChannelIndex == 2:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], int(gSum), oldColors[2])
if currentImageChannelIndex == 3:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], oldColors[1], int(bSum))
def clippingColor(value1, value0, threshold):
if value1 - value0 <= threshold:
return value1
return 0
def suitabilityValue(value1, value0, threshold):
if value1 - value0 <= threshold:
return 1
return 0
def cleanerFilterByJimCasaburi(colorModelTag, currentImageChannelIndex, pixels,
imgSize, filterSize, threshold):
""" Laplacian blur"""
threshold = 50
apertures = apertureService.getApertureMatrixGenerator(imgSize, filterSize)
for x, y, aperture in apertures:
QCoreApplication.processEvents()
ccRed = svRed = ccGreen = svGreen = ccBlue = svBlue = 0
centerRed, centerGreen, centerBlue = pixels[x, y]
for i, apertureLine in enumerate(aperture):
for j, apertureCoordinate in enumerate(apertureLine):
pixelPosX, pixelPosY = apertureCoordinate
red, green, blue = pixels[pixelPosX, pixelPosY]
ccRed += clippingColor(red, centerRed, threshold)
svRed += suitabilityValue(red, centerRed, threshold)
ccGreen += clippingColor(green, centerGreen, threshold)
svGreen += suitabilityValue(green, centerGreen, threshold)
ccBlue += clippingColor(blue, centerBlue, threshold)
svBlue += suitabilityValue(blue, centerBlue, threshold)
if svRed != 0:
R = int(ccRed/svRed)
else:
R = centerRed
if svGreen != 0:
G = int(ccGreen/svGreen)
else:
G = centerGreen
if svBlue != 0:
B = int(ccBlue/svBlue)
else:
B = centerBlue
aperturePosX = int(len(aperture)/2)
if len(aperture) != 0 and len(aperture[aperturePosX])/2 != 0:
aperturePosY = int(len(aperture[aperturePosX])/2)
oldColors = pixels[aperture[aperturePosX][aperturePosY]]
if currentImageChannelIndex == 0:
pixels[aperture[aperturePosX][aperturePosY]] = (
R, G, B)
if currentImageChannelIndex == 1:
pixels[aperture[aperturePosX][aperturePosY]] = (
R, oldColors[1], oldColors[2])
if currentImageChannelIndex == 2:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], G, oldColors[2])
if currentImageChannelIndex == 3:
pixels[aperture[aperturePosX][aperturePosY]] = (
oldColors[0], oldColors[1], B)
|
drewdru/AOI
|
imageFilters/filters.py
|
Python
|
gpl-3.0
| 13,345
|
[
"Gaussian"
] |
f0a8dca379b7d9738053daa14b8715a565f4f8e7c4bc5a8492372df4dfd845c9
|
# -*- coding: utf-8 -*-
#
# test_parrot_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script tests the parrot_neuron in NEST.
# See test_parrot_neuron_ps.py for an equivalent test of the precise parrot.
import nest
import unittest
import math
@nest.ll_api.check_stack
class ParrotNeuronTestCase(unittest.TestCase):
"""Check parrot_neuron spike repetition properties"""
def setUp(self):
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
# set up source spike generator, as well as parrot neurons
self.spike_time = 1.
self.delay = .2
self.source = nest.Create("spike_generator", 1,
{"spike_times": [self.spike_time]})
self.parrot = nest.Create('parrot_neuron')
self.spikes = nest.Create("spike_detector")
# record source and parrot spikes
nest.Connect(self.source, self.spikes)
nest.Connect(self.parrot, self.spikes)
def test_ParrotNeuronRepeatSpike(self):
"""Check parrot_neuron repeats spikes on port 0"""
# connect with arbitrary delay
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Simulate(self.spike_time + 2 * self.delay)
# get spike from parrot neuron
events = nest.GetStatus(self.spikes)[0]["events"]
post_time = events['times'][
events['senders'] == self.parrot[0].get('global_id')]
# assert spike was repeated at correct time
assert post_time, "Parrot neuron failed to repeat spike."
assert self.spike_time + self.delay == post_time, \
"Parrot neuron repeated spike at wrong delay"
def test_ParrotNeuronIgnoreSpike(self):
"""Check parrot_neuron ignores spikes on port 1"""
# connect with arbitrary delay to port 1
nest.Connect(self.source, self.parrot,
syn_spec={"receptor_type": 1, "delay": self.delay})
nest.Simulate(self.spike_time + 2. * self.delay)
# get spike from parrot neuron, assert it was ignored
events = nest.GetStatus(self.spikes)[0]["events"]
post_time = events['times'][
events['senders'] == self.parrot.get('global_id')]
assert len(post_time) == 0, \
"Parrot neuron failed to ignore spike arriving on port 1"
def test_ParrotNeuronOutgoingMultiplicity(self):
"""
Check parrot_neuron correctly repeats multiple spikes
The parrot_neuron receives two spikes in a single time step.
We check that both spikes are forwarded to the spike_detector.
"""
# connect twice
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Simulate(self.spike_time + 2. * self.delay)
# get spikes from parrot neuron, assert two were transmitted
events = nest.GetStatus(self.spikes)[0]["events"]
post_times = events['times'][
events['senders'] == self.parrot.get('global_id')]
assert len(post_times) == 2 and post_times[0] == post_times[1], \
"Parrot neuron failed to correctly repeat multiple spikes."
@nest.ll_api.check_stack
class ParrotNeuronPoissonTestCase(unittest.TestCase):
"""Check parrot_neuron spike repetition properties"""
def test_ParrotNeuronIncomingMultiplicity(self):
"""
Check parrot_neuron heeds multiplicity information in incoming spikes.
This test relies on the fact that poisson_generator transmits
multiple spikes during a time step using multiplicity, and that
these spikes are delivered directly, i.e., without multiplicity-
unrolling in send_remote().
We create a high-rate poisson_generator. If parrot_neuron
ignored multiplicity, it would only transmit one spike per time
step. We chain two parrot_neurons to check against any loss.
"""
# set up source spike generator, as well as parrot neurons
h = 0.1 # ms
rate = 1000000. # spikes / s
delay = 1. # ms
t_base = 1000. # ms
t_sim = t_base + 3 * delay # after t_sim, spikes from t_base arrived
spikes_expected = rate * t_base / 1000.
spikes_std = math.sqrt(spikes_expected)
# if the test is to be meaningful we must expect signficantly more
# spikes than time steps
assert spikes_expected - 3 * spikes_std > 10. * t_sim / h, \
"Internal inconsistency: too few spikes."
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
nest.SetKernelStatus({'resolution': h,
'grng_seed': 123,
'rng_seeds': [456]})
source = nest.Create('poisson_generator', params={'rate': rate})
parrots = nest.Create('parrot_neuron', 2)
detect = nest.Create('spike_detector')
nest.Connect(source, parrots[:1], syn_spec={'delay': delay})
nest.Connect(parrots[:1], parrots[1:], syn_spec={'delay': delay})
nest.Connect(parrots[1:], detect)
nest.Simulate(t_sim)
n_spikes = nest.GetStatus(detect)[0]['n_events']
assert n_spikes > spikes_expected - 3 * spikes_std, \
"parrot_neuron loses spikes."
assert n_spikes < spikes_expected + 3 * spikes_std, \
"parrot_neuron adds spikes."
@nest.ll_api.check_stack
class ParrotNeuronSTDPTestCase(unittest.TestCase):
"""
Check STDP protocol between two parrot_neurons connected by a stdp_synapse.
Exact pre- and post-synaptic spike times are set by spike_generators
connected to each parrot neuron. Additional spikes sent through the
stdp_synapse are explicitly ignored in the postsynaptic parrot_neuron
by setting the stdp_synapse to connect to port 1.
"""
def run_protocol(self, dt):
"""Set up a network with pre-post spike pairings
with t_post - t_pre = dt"""
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
# set pre and postsynaptic spike times
delay = 1. # delay for connections
dspike = 100. # ISI
# set the correct real spike times for generators (correcting for
# delays)
pre_times = [100., 100. + dspike]
post_times = [k + dt for k in pre_times]
# create spike_generators with these times
pre_spikes = nest.Create("spike_generator", 1, {
"spike_times": pre_times})
post_spikes = nest.Create("spike_generator", 1, {
"spike_times": post_times})
# create parrot neurons and connect spike_generators
pre_parrot = nest.Create("parrot_neuron", 1)
post_parrot = nest.Create("parrot_neuron", 1)
nest.Connect(pre_spikes, pre_parrot, syn_spec={"delay": delay})
nest.Connect(post_spikes, post_parrot, syn_spec={"delay": delay})
# create spike detector
spikes = nest.Create("spike_detector")
nest.Connect(pre_parrot, spikes)
nest.Connect(post_parrot, spikes)
# connect both parrot neurons with a stdp synapse onto port 1
# thereby spikes transmitted through the stdp connection are
# not repeated postsynaptically.
syn_spec = {
"synapse_model": "stdp_synapse",
# set receptor 1 postsynaptically, to not generate extra spikes
"receptor_type": 1,
}
conn_spec = {
"rule": "one_to_one",
}
nest.Connect(pre_parrot, post_parrot,
syn_spec=syn_spec, conn_spec=conn_spec)
# get STDP synapse and weight before protocol
syn = nest.GetConnections(
source=pre_parrot, synapse_model="stdp_synapse")
w_pre = syn.get('weight')
last_time = max(pre_times[-1], post_times[-1])
nest.Simulate(last_time + 2 * delay)
# get weight post protocol
w_post = syn.get('weight')
return w_pre, w_post
def test_ParrotNeuronSTDPProtocolPotentiation(self):
"""Check pre-post spike pairings between parrot_neurons
increments weights."""
dt = 10.
w_pre, w_post = self.run_protocol(dt)
assert w_pre < w_post, "Parrot neuron STDP potentiation \
protocol failed to elicit positive weight changes."
def test_ParrotNeuronSTDPProtocolDepression(self):
"""Check post-pre spike pairings between parrot_neurons
decrement weights."""
dt = -10.
w_pre, w_post = self.run_protocol(dt)
assert w_pre > w_post, "Parrot neuron STDP potentiation \
protocol failed to elicit negative weight changes."
def suite():
# makeSuite is sort of obsolete http://bugs.python.org/issue2721
# using loadTestsFromTestCase instead.
suite1 = unittest.TestLoader().loadTestsFromTestCase(
ParrotNeuronTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(
ParrotNeuronPoissonTestCase)
suite3 = unittest.TestLoader().loadTestsFromTestCase(
ParrotNeuronSTDPTestCase)
return unittest.TestSuite([suite1, suite2, suite3])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
Silmathoron/nest-simulator
|
pynest/nest/tests/test_parrot_neuron.py
|
Python
|
gpl-2.0
| 10,008
|
[
"NEURON"
] |
a265f085e6deaf6bea37435636269d36597440f73aa821011c6254969809a28a
|
__author__ = 'sibirrer'
import astrofunc.LensingProfiles.calc_util as calc_util
import pytest
class TestCalcUtil(object):
"""
tests the Gaussian methods
"""
def setup(self):
pass
def test_d_r_dx(self):
x = 1
y = 0
out = calc_util.d_r_dx(x, y)
assert out == 1
def test_d_r_dy(self):
x = 1
y = 0
out = calc_util.d_r_dy(x, y)
assert out == 0
def test_d_x_diffr_dx(self):
x = 1
y = 0
out = calc_util.d_x_diffr_dx(x, y)
assert out == 0
x = 0
y = 1
out = calc_util.d_x_diffr_dx(x, y)
assert out == 1
def test_d_y_diffr_dx(self):
x = 1
y = 0
out = calc_util.d_y_diffr_dx(x, y)
assert out == 0
x = 0
y = 1
out = calc_util.d_y_diffr_dx(x, y)
print out
assert out == 0
def test_d_y_diffr_dy(self):
x = 1
y = 0
out = calc_util.d_y_diffr_dy(x, y)
assert out == 1
x = 0
y = 1
out = calc_util.d_y_diffr_dy(x, y)
assert out == 0
def test_d_x_diffr_dy(self):
x = 1
y = 0
out = calc_util.d_x_diffr_dy(x, y)
assert out == 0
x = 0
y = 1
out = calc_util.d_x_diffr_dy(x, y)
print out
assert out == 0
if __name__ == '__main__':
pytest.main()
|
sibirrer/astrofunc
|
test/test_calc_util.py
|
Python
|
mit
| 1,421
|
[
"Gaussian"
] |
7b71d43fa66df0e258c23c773d3cf927e985bb1b3097ec96a013e94873c82789
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import uuid
import mock
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from nova.compute import flavors
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
from nova import test
from nova.tests.unit.compute.monitors import test_monitors
from nova.tests.unit.objects import test_migration
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
from nova.virt import hardware
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = hardware.VirtNUMAHostTopology(
cells=[hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 3072),
hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 3072)])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = hardware.VirtNUMALimitTopology(
cells=[hardware.VirtNUMATopologyCellLimit(
0, set([1, 2]), 3072, 4, 10240),
hardware.VirtNUMATopologyCellLimit(
1, set([3, 4]), 3072, 4, 10240)])
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [{
'label': 'forza-napoli',
'dev_type': 'foo',
'compute_node_id': 1,
'address': '0000:00:00.1',
'product_id': 'p1',
'vendor_id': 'v1',
'status': 'available',
'extra_k1': 'v1'}] if self.pci_support else []
self.pci_stats = [{
'count': 1,
'vendor_id': 'v1',
'product_id': 'p1'}] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology.to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._numa_topologies = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(db, 'instance_extra_get_by_instance_uuid',
self._fake_instance_extra_get_by_instance_uuid)
self.stubs.Set(self.conductor.db,
'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": {
"num_instances": "1",
},
"hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in flavors.system_metadata_flavor_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
sys_meta = self._fake_instance_system_metadata(flavor)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(flavor, 'new_') +
self._fake_instance_system_metadata(flavor, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': sys_meta,
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
}
numa_topology = kwargs.pop('numa_topology', None)
if numa_topology:
numa_topology = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance['uuid'],
'numa_topology': numa_topology.to_json()
}
instance.update(kwargs)
self._instances[instance_uuid] = instance
self._numa_topologies[instance_uuid] = numa_topology
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename,
columns_to_join=None):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_instance_extra_get_by_instance_uuid(self, context,
instance_uuid, columns=None):
return self._numa_topologies.get(instance_uuid)
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_instance_update_and_get_original(self, context, instance_uuid,
values):
instance = self._instances[instance_uuid]
instance.update(values)
# the test doesn't care what the original instance values are, it's
# only used in the subsequent notification:
return (instance, instance)
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_context_claim(self):
# instance context manager variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node()
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def test_create_compute_node(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.updated = False
self.deleted = False
self.update_call_count = 0
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology.to_json() if numa_topology else None
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, hardware.VirtNUMAHostTopology.from_json(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_create_resource(self):
self.tracker._write_ext_resources = mock.Mock()
self.tracker.conductor_api.compute_node_create = mock.Mock(
return_value=dict(id=1))
values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
self.tracker._create(self.context, values)
expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
'id': 1}
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
def test_update_resource(self):
self.tracker._write_ext_resources = mock.Mock()
values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
self.tracker._update(self.context, values)
expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
'id': 1}
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def setUp(self):
super(TrackerExtraResourcesTestCase, self).setUp()
self.driver = self._driver()
def _driver(self):
return FakeVirtDriver()
def test_set_empty_ext_resources(self):
resources = self.driver.get_available_resource(self.tracker.nodename)
self.assertNotIn('stats', resources)
self.tracker._write_ext_resources(resources)
self.assertIn('stats', resources)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self.driver.get_available_resource(self.tracker.nodename)
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": 12}
self.assertEqual(sorted(expected),
sorted(resources['stats']))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return hardware.VirtNUMAInstanceTopology(
cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), mem),
hardware.VirtNUMATopologyCellInstance(1, set([3]), mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return hardware.VirtNUMAHostTopology(
cells=[hardware.VirtNUMATopologyCellUsage(
0, set([1, 2]), 3072, cpu_usage=cpus,
memory_usage=mem),
hardware.VirtNUMATopologyCellUsage(
1, set([3, 4]), 3072, cpu_usage=cpus,
memory_usage=mem)])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_audit(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
ephemeral_gb=0, numa_topology=instance_topology)
self.tracker.instance_claim(self.context, instance, self.limits)
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["memory_mb"])
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
# 1st pretend that the compute operation finished and claimed the
# desired resources from the virt layer
driver = self.tracker.driver
driver.memory_mb_used = claim_mem
driver.local_gb_used = claim_disk
self.tracker.update_available_resource(self.context)
# confirm tracker is adding in host_ip
self.assertIsNotNone(self.compute.get('host_ip'))
# confirm that resource usage is derived from instance usages,
# not virt layer:
self.assertEqual(claim_mem_total, self.compute['memory_mb_used'])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute['free_ram_mb'])
self.assertEqualNUMAHostTopology(
claim_topology, hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute['local_gb_used'])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute['free_disk_gb'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(memory_mb)
instance_topology = self._instance_topology(memory_mb)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD.to_json()}
instance = self._fake_instance(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node['local_gb_used'])
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_context_claim_with_exception(self, mock_get):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_context_claim(self, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_cpu_stats(self, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
vcpus = 1
instance = self._fake_instance(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
def _fake_migration_create(mig_self, ctxt):
self._migrations[mig_self.instance_uuid] = mig_self
mig_self.obj_reset_changes()
self.stubs.Set(objects.Migration, 'create',
_fake_migration_create)
self.instance = self._fake_instance()
self.instance_type = self._fake_flavor_create()
def _fake_migration_create(self, context, values=None):
instance_uuid = str(uuid.uuid1())
mig_dict = test_migration.fake_db_migration()
mig_dict.update({
'id': 1,
'source_compute': 'host1',
'source_node': 'fakenode',
'dest_compute': 'host2',
'dest_node': 'fakenode',
'dest_host': '127.0.0.1',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'instance_uuid': instance_uuid,
'status': 'pre-migrating',
'updated_at': timeutils.utcnow()
})
if values:
mig_dict.update(values)
migration = objects.Migration()
migration.update(mig_dict)
# This hits the stub in setUp()
migration.create('fake')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_audit(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_same_host(self, mock_get):
self.limits['vcpu'] = 3
src_dict = {
'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'vcpus': 1}
dest_dict = dict((k, v + 1) for (k, v) in src_dict.iteritems())
src_type = self._fake_flavor_create(
id=10, name="srcflavor", **src_dict)
dest_type = self._fake_flavor_create(
id=11, name="destflavor", **dest_dict)
# make an instance of src_type:
instance = self._fake_instance(flavor=src_type)
instance['system_metadata'] = self._fake_instance_system_metadata(
dest_type)
self.tracker.instance_claim(self.context, instance, self.limits)
# resize to dest_type:
claim = self.tracker.resize_claim(self.context, instance,
dest_type, self.limits)
self._assert(src_dict['memory_mb'] + dest_dict['memory_mb']
+ 2 * FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb']
+ dest_dict['root_gb'] + dest_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'] + dest_dict['vcpus'], 'vcpus_used')
self.tracker.update_available_resource(self.context)
claim.abort()
# only the original instance should remain, not the migration:
self._assert(src_dict['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'], 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_resize_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert_reserve_source(self, mock_get):
# if a revert has started at the API and audit runs on
# the source compute before the instance flips back to source,
# resources should still be held at the source based on the
# migration:
dest = "desthost"
dest_tracker = self._tracker(host=dest)
dest_tracker.update_available_resource(self.context)
self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
values = {'source_compute': self.host, 'dest_compute': dest,
'old_instance_type_id': 1, 'new_instance_type_id': 1,
'status': 'post-migrating',
'instance_uuid': self.instance['uuid']}
self._fake_migration_create(self.context, values)
# attach an instance to the destination host tracker:
dest_tracker.instance_claim(self.context, self.instance)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# audit and recheck to confirm migration doesn't get double counted
# on dest:
dest_tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# apply the migration to the source host tracker:
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
# flag the instance and migration as reverting and re-audit:
self.instance['vm_state'] = vm_states.RESIZED
self.instance['task_state'] = task_states.RESIZE_REVERTING
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB + 1, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
def test_dupe_filter(self):
instance = self._fake_instance(host=self.host)
values = {'source_compute': self.host, 'dest_compute': self.host,
'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
self._fake_flavor_create(id=2)
self._fake_migration_create(self.context, values)
self._fake_migration_create(self.context, values)
self.tracker.update_available_resource(self.context)
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_set_instance_host_and_node(self, mock_get):
instance = self._fake_instance()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
"""Make sure we handle the case where the following are true:
#) Compute node C gets upgraded to code that looks for instance types in
system metadata. AND
#) C already has instances in the process of migrating that do not have
stashed instance types.
bug 1164110
"""
def setUp(self):
super(NoInstanceTypesInSysMetadata, self).setUp()
self.instance = self._fake_instance(stash=False)
def test_get_instance_type_stash_false(self):
with (mock.patch.object(objects.Flavor, 'get_by_id',
return_value=self.instance_type)):
flavor = self.tracker._get_instance_type(self.context,
self.instance, "new_")
self.assertEqual(self.instance_type, flavor)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
|
luzheqi1987/nova-annotation
|
nova/tests/unit/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 61,351
|
[
"exciting"
] |
1f46fbfafd9bebfbdc5a6d6fbffe8330f902fd8dec31e115779784e22a27e4c9
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import numpy.ma as ma
import dask.array as da
import logging
import warnings
from scipy import ndimage
try:
# For scikit-image >= 0.17.0
from skimage.registration._phase_cross_correlation import _upsampled_dft
except ModuleNotFoundError:
from skimage.feature.register_translation import _upsampled_dft
from hyperspy.defaults_parser import preferences
from hyperspy.external.progressbar import progressbar
from hyperspy.misc.math_tools import symmetrize, antisymmetrize, optimal_fft_size
from hyperspy.signal import BaseSignal
from hyperspy._signals.lazy import LazySignal
from hyperspy._signals.common_signal2d import CommonSignal2D
from hyperspy.signal_tools import PeaksFinder2D
from hyperspy.docstrings.plot import (
BASE_PLOT_DOCSTRING, PLOT2D_DOCSTRING, KWARGS_DOCSTRING)
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG
from hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT
from hyperspy.utils.peakfinders2D import (
find_local_max, find_peaks_max, find_peaks_minmax, find_peaks_zaefferer,
find_peaks_stat, find_peaks_log, find_peaks_dog, find_peaks_xc)
_logger = logging.getLogger(__name__)
def shift_image(im, shift=0, interpolation_order=1, fill_value=np.nan):
if np.any(shift):
fractional, integral = np.modf(shift)
if fractional.any():
order = interpolation_order
else:
# Disable interpolation
order = 0
return ndimage.shift(im, shift, cval=fill_value, order=order)
else:
return im
def triu_indices_minus_diag(n):
"""Returns the indices for the upper-triangle of an (n, n) array
excluding its diagonal
Parameters
----------
n : int
The length of the square array
"""
ti = np.triu_indices(n)
isnotdiag = ti[0] != ti[1]
return ti[0][isnotdiag], ti[1][isnotdiag]
def hanning2d(M, N):
"""
A 2D hanning window created by outer product.
"""
return np.outer(np.hanning(M), np.hanning(N))
def sobel_filter(im):
sx = ndimage.sobel(im, axis=0, mode='constant')
sy = ndimage.sobel(im, axis=1, mode='constant')
sob = np.hypot(sx, sy)
return sob
def fft_correlation(in1, in2, normalize=False, real_only=False):
"""Correlation of two N-dimensional arrays using FFT.
Adapted from scipy's fftconvolve.
Parameters
----------
in1, in2 : array
Input arrays to convolve.
normalize: bool, default False
If True performs phase correlation.
real_only : bool, default False
If True, and in1 and in2 are real-valued inputs, uses
rfft instead of fft for approx. 2x speed-up.
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
size = s1 + s2 - 1
# Calculate optimal FFT size
complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c')
fsize = [optimal_fft_size(a, not complex_result) for a in size]
# For real-valued inputs, rfftn is ~2x faster than fftn
if not complex_result and real_only:
fft_f, ifft_f = np.fft.rfftn, np.fft.irfftn
else:
fft_f, ifft_f = np.fft.fftn, np.fft.ifftn
fprod = fft_f(in1, fsize)
fprod *= fft_f(in2, fsize).conjugate()
if normalize is True:
fprod = np.nan_to_num(fprod / np.absolute(fprod))
ret = ifft_f(fprod).real.copy()
return ret, fprod
def estimate_image_shift(ref, image, roi=None, sobel=True,
medfilter=True, hanning=True, plot=False,
dtype='float', normalize_corr=False,
sub_pixel_factor=1,
return_maxval=True):
"""Estimate the shift in a image using phase correlation
This method can only estimate the shift by comparing
bidimensional features that should not change the position
in the given axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient
to select a region of interest by setting the roi keyword.
Parameters
----------
ref : 2D numpy.ndarray
Reference image
image : 2D numpy.ndarray
Image to register
roi : tuple of ints (top, bottom, left, right)
Define the region of interest
sobel : bool
apply a sobel filter for edge enhancement
medfilter : bool
apply a median filter for noise reduction
hanning : bool
Apply a 2d hanning filter
plot : bool or matplotlib.Figure
If True, plots the images after applying the filters and the phase
correlation. If a figure instance, the images will be plotted to the
given figure.
reference : 'current' or 'cascade'
If 'current' (default) the image at the current
coordinates is taken as reference. If 'cascade' each image
is aligned with the previous one.
dtype : str or dtype
Typecode or data-type in which the calculations must be
performed.
normalize_corr : bool
If True use phase correlation instead of standard correlation
sub_pixel_factor : float
Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor parts
of a pixel. Default is 1, i.e. no sub-pixel accuracy.
Returns
-------
shifts: np.array
containing the estimate shifts
max_value : float
The maximum value of the correlation
Notes
-----
The statistical analysis approach to the translation estimation
when using reference='stat' roughly follows [*]_ . If you use
it please cite their article.
References
----------
.. [*] Bernhard Schaffer, Werner Grogger and Gerald Kothleitner.
“Automated Spatial Drift Correction for EFTEM Image Series.”
Ultramicroscopy 102, no. 1 (December 2004): 27–36.
"""
ref, image = da.compute(ref, image)
# Make a copy of the images to avoid modifying them
ref = ref.copy().astype(dtype)
image = image.copy().astype(dtype)
if roi is not None:
top, bottom, left, right = roi
else:
top, bottom, left, right = [None, ] * 4
# Select region of interest
ref = ref[top:bottom, left:right]
image = image[top:bottom, left:right]
# Apply filters
for im in (ref, image):
if hanning is True:
im *= hanning2d(*im.shape)
if medfilter is True:
# This is faster than sp.signal.med_filt,
# which was the previous implementation.
# The size is fixed at 3 to be consistent
# with the previous implementation.
im[:] = sp.ndimage.median_filter(im, size=3)
if sobel is True:
im[:] = sobel_filter(im)
# If sub-pixel alignment not being done, use faster real-valued fft
real_only = (sub_pixel_factor == 1)
phase_correlation, image_product = fft_correlation(
ref, image, normalize=normalize_corr, real_only=real_only)
# Estimate the shift by getting the coordinates of the maximum
argmax = np.unravel_index(np.argmax(phase_correlation),
phase_correlation.shape)
threshold = (phase_correlation.shape[0] / 2 - 1,
phase_correlation.shape[1] / 2 - 1)
shift0 = argmax[0] if argmax[0] < threshold[0] else \
argmax[0] - phase_correlation.shape[0]
shift1 = argmax[1] if argmax[1] < threshold[1] else \
argmax[1] - phase_correlation.shape[1]
max_val = phase_correlation.real.max()
shifts = np.array((shift0, shift1))
# The following code is more or less copied from
# skimage.feature.register_feature, to gain access to the maximum value:
if sub_pixel_factor != 1:
# Initial shift estimate in upsampled grid
shifts = np.round(shifts * sub_pixel_factor) / sub_pixel_factor
upsampled_region_size = np.ceil(sub_pixel_factor * 1.5)
# Center of output array at dftshift + 1
dftshift = np.fix(upsampled_region_size / 2.0)
sub_pixel_factor = np.array(sub_pixel_factor, dtype=np.float64)
normalization = (image_product.size * sub_pixel_factor ** 2)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts * sub_pixel_factor
correlation = _upsampled_dft(image_product.conj(),
upsampled_region_size,
sub_pixel_factor,
sample_region_offset).conj()
correlation /= normalization
# Locate maximum and map back to original pixel grid
maxima = np.array(np.unravel_index(
np.argmax(np.abs(correlation)),
correlation.shape),
dtype=np.float64)
maxima -= dftshift
shifts = shifts + maxima / sub_pixel_factor
max_val = correlation.real.max()
# Plot on demand
if plot is True or isinstance(plot, plt.Figure):
if isinstance(plot, plt.Figure):
fig = plot
axarr = plot.axes
if len(axarr) < 3:
for i in range(3):
fig.add_subplot(1, 3, i + 1)
axarr = fig.axes
else:
fig, axarr = plt.subplots(1, 3)
full_plot = len(axarr[0].images) == 0
if full_plot:
axarr[0].set_title('Reference')
axarr[1].set_title('Image')
axarr[2].set_title('Phase correlation')
axarr[0].imshow(ref)
axarr[1].imshow(image)
d = (np.array(phase_correlation.shape) - 1) // 2
extent = [-d[1], d[1], -d[0], d[0]]
axarr[2].imshow(np.fft.fftshift(phase_correlation),
extent=extent)
plt.show()
else:
axarr[0].images[0].set_data(ref)
axarr[1].images[0].set_data(image)
axarr[2].images[0].set_data(np.fft.fftshift(phase_correlation))
# TODO: Renormalize images
fig.canvas.draw_idle()
# Liberate the memory. It is specially necessary if it is a
# memory map
del ref
del image
if return_maxval:
return -shifts, max_val
else:
return -shifts
class Signal2D(BaseSignal, CommonSignal2D):
"""
"""
_signal_dimension = 2
_lazy = False
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
if self.axes_manager.signal_dimension != 2:
self.axes_manager.set_signal_dimension(2)
def plot(self,
colorbar=True,
scalebar=True,
scalebar_color="white",
axes_ticks=None,
axes_off=False,
saturated_pixels=None,
vmin=None,
vmax=None,
gamma=1.0,
no_nans=False,
centre_colormap="auto",
min_aspect=0.1,
**kwargs
):
"""%s
%s
%s
"""
super(Signal2D, self).plot(
colorbar=colorbar,
scalebar=scalebar,
scalebar_color=scalebar_color,
axes_ticks=axes_ticks,
axes_off=axes_off,
saturated_pixels=saturated_pixels,
vmin=vmin,
vmax=vmax,
gamma=gamma,
no_nans=no_nans,
centre_colormap=centre_colormap,
min_aspect=min_aspect,
**kwargs
)
plot.__doc__ %= (BASE_PLOT_DOCSTRING, PLOT2D_DOCSTRING, KWARGS_DOCSTRING)
def create_model(self, dictionary=None):
"""Create a model for the current signal
Parameters
----------
dictionary : {None, dict}, optional
A dictionary to be used to recreate a model. Usually generated
using :meth:`hyperspy.model.as_dictionary`
Returns
-------
A Model class
"""
from hyperspy.models.model2d import Model2D
return Model2D(self, dictionary=dictionary)
def estimate_shift2D(self,
reference='current',
correlation_threshold=None,
chunk_size=30,
roi=None,
normalize_corr=False,
sobel=True,
medfilter=True,
hanning=True,
plot=False,
dtype='float',
show_progressbar=None,
sub_pixel_factor=1):
"""Estimate the shifts in an image using phase correlation.
This method can only estimate the shift by comparing
bi-dimensional features that should not change position
between frames. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient
to select a region of interest by setting the ``roi`` argument.
Parameters
----------
reference : {'current', 'cascade' ,'stat'}
If 'current' (default) the image at the current
coordinates is taken as reference. If 'cascade' each image
is aligned with the previous one. If 'stat' the translation
of every image with all the rest is estimated and by
performing statistical analysis on the result the
translation is estimated.
correlation_threshold : {None, 'auto', float}
This parameter is only relevant when reference='stat'.
If float, the shift estimations with a maximum correlation
value lower than the given value are not used to compute
the estimated shifts. If 'auto' the threshold is calculated
automatically as the minimum maximum correlation value
of the automatically selected reference image.
chunk_size : {None, int}
If int and reference='stat' the number of images used
as reference are limited to the given value.
roi : tuple of ints or floats (left, right, top, bottom)
Define the region of interest. If int(float) the position
is given axis index(value). Note that ROIs can be used
in place of a tuple.
normalize_corr : bool, default False
If True, use phase correlation to align the images, otherwise
use cross correlation.
sobel : bool, default True
Apply a Sobel filter for edge enhancement
medfilter : bool, default True
Apply a median filter for noise reduction
hanning : bool, default True
Apply a 2D hanning filter
plot : bool or 'reuse'
If True plots the images after applying the filters and
the phase correlation. If 'reuse', it will also plot the images,
but it will only use one figure, and continuously update the images
in that figure as it progresses through the stack.
dtype : str or dtype
Typecode or data-type in which the calculations must be
performed.
%s
sub_pixel_factor : float
Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor
parts of a pixel. Default is 1, i.e. no sub-pixel accuracy.
Returns
-------
shifts : list of array
List of estimated shifts
Notes
-----
The statistical analysis approach to the translation estimation
when using ``reference='stat'`` roughly follows [Schaffer2004]_.
If you use it please cite their article.
References
----------
.. [Schaffer2004] Schaffer, Bernhard, Werner Grogger, and Gerald Kothleitner.
“Automated Spatial Drift Correction for EFTEM Image Series.”
Ultramicroscopy 102, no. 1 (December 2004): 27–36.
See Also
--------
* :py:meth:`~._signals.signal2d.Signal2D.align2D`
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_two()
if roi is not None:
# Get the indices of the roi
yaxis = self.axes_manager.signal_axes[1]
xaxis = self.axes_manager.signal_axes[0]
roi = tuple([xaxis._get_index(i) for i in roi[2:]] +
[yaxis._get_index(i) for i in roi[:2]])
ref = None if reference == 'cascade' else \
self.__call__().copy()
shifts = []
nrows = None
images_number = self.axes_manager._max_index + 1
if plot == 'reuse':
# Reuse figure for plots
plot = plt.figure()
if reference == 'stat':
nrows = images_number if chunk_size is None else \
min(images_number, chunk_size)
pcarray = ma.zeros((nrows, self.axes_manager._max_index + 1,
),
dtype=np.dtype([('max_value', np.float),
('shift', np.int32,
(2,))]))
nshift, max_value = estimate_image_shift(
self(),
self(),
roi=roi,
sobel=sobel,
medfilter=medfilter,
hanning=hanning,
normalize_corr=normalize_corr,
plot=plot,
dtype=dtype,
sub_pixel_factor=sub_pixel_factor)
np.fill_diagonal(pcarray['max_value'], max_value)
pbar_max = nrows * images_number
else:
pbar_max = images_number
# Main iteration loop. Fills the rows of pcarray when reference
# is stat
with progressbar(total=pbar_max,
disable=not show_progressbar,
leave=True) as pbar:
for i1, im in enumerate(self._iterate_signal()):
if reference in ['current', 'cascade']:
if ref is None:
ref = im.copy()
shift = np.array([0, 0])
nshift, max_val = estimate_image_shift(
ref, im, roi=roi, sobel=sobel, medfilter=medfilter,
hanning=hanning, plot=plot,
normalize_corr=normalize_corr, dtype=dtype,
sub_pixel_factor=sub_pixel_factor)
if reference == 'cascade':
shift += nshift
ref = im.copy()
else:
shift = nshift
shifts.append(shift.copy())
pbar.update(1)
elif reference == 'stat':
if i1 == nrows:
break
# Iterate to fill the columns of pcarray
for i2, im2 in enumerate(
self._iterate_signal()):
if i2 > i1:
nshift, max_value = estimate_image_shift(
im,
im2,
roi=roi,
sobel=sobel,
medfilter=medfilter,
hanning=hanning,
normalize_corr=normalize_corr,
plot=plot,
dtype=dtype,
sub_pixel_factor=sub_pixel_factor)
pcarray[i1, i2] = max_value, nshift
del im2
pbar.update(1)
del im
if reference == 'stat':
# Select the reference image as the one that has the
# higher max_value in the row
sqpcarr = pcarray[:, :nrows]
sqpcarr['max_value'][:] = symmetrize(sqpcarr['max_value'])
sqpcarr['shift'][:] = antisymmetrize(sqpcarr['shift'])
ref_index = np.argmax(pcarray['max_value'].min(1))
self.ref_index = ref_index
shifts = (pcarray['shift'] +
pcarray['shift'][ref_index, :nrows][:, np.newaxis])
if correlation_threshold is not None:
if correlation_threshold == 'auto':
correlation_threshold = \
(pcarray['max_value'].min(0)).max()
_logger.info("Correlation threshold = %1.2f",
correlation_threshold)
shifts[pcarray['max_value'] <
correlation_threshold] = ma.masked
shifts.mask[ref_index, :] = False
shifts = shifts.mean(0)
else:
shifts = np.array(shifts)
del ref
return shifts
estimate_shift2D.__doc__ %= SHOW_PROGRESSBAR_ARG
def align2D(
self,
crop=True,
fill_value=np.nan,
shifts=None,
expand=False,
interpolation_order=1,
show_progressbar=None,
parallel=None,
max_workers=None,
**kwargs,
):
"""Align the images in-place using :py:func:`scipy.ndimage.shift`.
The images can be aligned using either user-provided shifts or
by first estimating the shifts.
See :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D`
for more details on estimating image shifts.
Parameters
----------
crop : bool
If True, the data will be cropped not to include regions
with missing data
fill_value : int, float, nan
The areas with missing data are filled with the given value.
Default is nan.
shifts : None or list of tuples
If None the shifts are estimated using
:py:meth:`~._signals.signal2D.estimate_shift2D`.
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
interpolation_order: int, default 1.
The order of the spline interpolation. Default is 1, linear
interpolation.
%s
%s
%s
**kwargs :
Keyword arguments passed to :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D`
Returns
-------
shifts : np.array
The estimated shifts are returned only if ``shifts`` is None
See Also
--------
* :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D`
"""
self._check_signal_dimension_equals_two()
return_shifts = False
if shifts is None:
shifts = self.estimate_shift2D(**kwargs)
return_shifts = True
if not np.any(shifts):
warnings.warn(
"The estimated shifts are all zero, suggesting "
"the images are already aligned",
UserWarning,
)
return shifts
elif not np.any(shifts):
warnings.warn(
"The provided shifts are all zero, no alignment done",
UserWarning,
)
return None
if expand:
# Expand to fit all valid data
left, right = (
int(np.floor(shifts[:, 1].min())) if shifts[:, 1].min() < 0 else 0,
int(np.ceil(shifts[:, 1].max())) if shifts[:, 1].max() > 0 else 0,
)
top, bottom = (
int(np.floor(shifts[:, 0].min())) if shifts[:, 0].min() < 0 else 0,
int(np.ceil(shifts[:, 0].max())) if shifts[:, 0].max() > 0 else 0,
)
xaxis = self.axes_manager.signal_axes[0]
yaxis = self.axes_manager.signal_axes[1]
padding = []
for i in range(self.data.ndim):
if i == xaxis.index_in_array:
padding.append((right, -left))
elif i == yaxis.index_in_array:
padding.append((bottom, -top))
else:
padding.append((0, 0))
self.data = np.pad(
self.data, padding, mode="constant", constant_values=(fill_value,)
)
if left < 0:
xaxis.offset += left * xaxis.scale
if np.any((left < 0, right > 0)):
xaxis.size += right - left
if top < 0:
yaxis.offset += top * yaxis.scale
if np.any((top < 0, bottom > 0)):
yaxis.size += bottom - top
# Translate, with sub-pixel precision if necesary,
# note that we operate in-place here
self._map_iterate(
shift_image,
iterating_kwargs=(("shift", -shifts),),
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers,
ragged=False,
inplace=True,
fill_value=fill_value,
interpolation_order=interpolation_order,
)
if crop and not expand:
max_shift = np.max(shifts, axis=0) - np.min(shifts, axis=0)
if np.any(max_shift >= np.array(self.axes_manager.signal_shape)):
raise ValueError("Shift outside range of signal axes. Cannot crop signal.")
# Crop the image to the valid size
shifts = -shifts
bottom, top = (
int(np.floor(shifts[:, 0].min())) if shifts[:, 0].min() < 0 else None,
int(np.ceil(shifts[:, 0].max())) if shifts[:, 0].max() > 0 else 0,
)
right, left = (
int(np.floor(shifts[:, 1].min())) if shifts[:, 1].min() < 0 else None,
int(np.ceil(shifts[:, 1].max())) if shifts[:, 1].max() > 0 else 0,
)
self.crop_image(top, bottom, left, right)
shifts = -shifts
self.events.data_changed.trigger(obj=self)
if return_shifts:
return shifts
align2D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def crop_image(self, top=None, bottom=None,
left=None, right=None, convert_units=False):
"""Crops an image in place.
Parameters
----------
top, bottom, left, right : {int | float}
If int the values are taken as indices. If float the values are
converted to indices.
convert_units : bool
Default is False
If True, convert the signal units using the 'convert_to_units'
method of the `axes_manager`. If False, does nothing.
See also
--------
crop
"""
self._check_signal_dimension_equals_two()
self.crop(self.axes_manager.signal_axes[1].index_in_axes_manager,
top,
bottom)
self.crop(self.axes_manager.signal_axes[0].index_in_axes_manager,
left,
right)
if convert_units:
self.axes_manager.convert_units('signal')
def add_ramp(self, ramp_x, ramp_y, offset=0):
"""Add a linear ramp to the signal.
Parameters
----------
ramp_x: float
Slope of the ramp in x-direction.
ramp_y: float
Slope of the ramp in y-direction.
offset: float, optional
Offset of the ramp at the signal fulcrum.
Notes
-----
The fulcrum of the linear ramp is at the origin and the slopes are
given in units of the axis with the according scale taken into
account. Both are available via the `axes_manager` of the signal.
"""
yy, xx = np.indices(self.axes_manager._signal_shape_in_array)
if self._lazy:
import dask.array as da
ramp = offset * da.ones(self.data.shape, dtype=self.data.dtype,
chunks=self.data.chunks)
else:
ramp = offset * np.ones(self.data.shape, dtype=self.data.dtype)
ramp += ramp_x * xx
ramp += ramp_y * yy
self.data += ramp
def find_peaks(self, method='local_max', interactive=True,
current_index=False, show_progressbar=None,
parallel=None, max_workers=None, display=True, toolkit=None,
**kwargs):
"""Find peaks in a 2D signal.
Function to locate the positive peaks in an image using various, user
specified, methods. Returns a structured array containing the peak
positions.
Parameters
----------
method : str
Select peak finding algorithm to implement. Available methods
are:
* 'local_max' - simple local maximum search using the
:py:func:`skimage.feature.peak_local_max` function
* 'max' - simple local maximum search using the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_max`.
* 'minmax' - finds peaks by comparing maximum filter results
with minimum filter, calculates centers of mass. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_minmax`
function.
* 'zaefferer' - based on gradient thresholding and refinement
by local region of interest optimisation. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_zaefferer`
function.
* 'stat' - based on statistical refinement and difference with
respect to mean intensity. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_stat`
function.
* 'laplacian_of_gaussian' - a blob finder using the laplacian of
Gaussian matrices approach. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_log`
function.
* 'difference_of_gaussian' - a blob finder using the difference
of Gaussian matrices approach. See the
:py:func:`~hyperspy.utils.peakfinders2D.find_peaks_log`
function.
* 'template_matching' - A cross correlation peakfinder. This
method requires providing a template with the ``template``
parameter, which is used as reference pattern to perform the
template matching to the signal. It uses the
:py:func:`skimage.feature.match_template` function and the peaks
position are obtained by using `minmax` method on the
template matching result.
interactive : bool
If True, the method parameter can be adjusted interactively.
If False, the results will be returned.
current_index : bool
if True, the computation will be performed for the current index.
%s
%s
%s
%s
%s
**kwargs : dict
Keywords parameters associated with above methods, see the
documentation of each method for more details.
Notes
-----
As a convenience, the 'local_max' method accepts the 'distance' and
'threshold' argument, which will be map to the 'min_distance' and
'threshold_abs' of the :py:func:`skimage.feature.peak_local_max`
function.
Returns
-------
peaks : :py:class:`~hyperspy.signal.BaseSignal` or numpy.ndarray if current_index=True
Array of shape `_navigation_shape_in_array` in which each cell
contains an array with dimensions (npeaks, 2) that contains
the `x, y` pixel coordinates of peaks found in each image sorted
first along `y` and then along `x`.
"""
method_dict = {
'local_max': find_local_max,
'max': find_peaks_max,
'minmax': find_peaks_minmax,
'zaefferer': find_peaks_zaefferer,
'stat': find_peaks_stat,
'laplacian_of_gaussian': find_peaks_log,
'difference_of_gaussian': find_peaks_dog,
'template_matching' : find_peaks_xc,
}
# As a convenience, we map 'distance' to 'min_distance' and
# 'threshold' to 'threshold_abs' when using the 'local_max' method to
# match with the arguments of skimage.feature.peak_local_max.
if method == 'local_max':
if 'distance' in kwargs.keys():
kwargs['min_distance'] = kwargs.pop('distance')
if 'threshold' in kwargs.keys():
kwargs['threshold_abs'] = kwargs.pop('threshold')
if method in method_dict.keys():
method_func = method_dict[method]
else:
raise NotImplementedError(f"The method `{method}` is not "
"implemented. See documentation for "
"available implementations.")
if interactive:
# Create a peaks signal with the same navigation shape as a
# placeholder for the output
axes_dict = self.axes_manager._get_axes_dicts(
self.axes_manager.navigation_axes)
peaks = BaseSignal(np.empty(self.axes_manager.navigation_shape),
axes=axes_dict)
pf2D = PeaksFinder2D(self, method=method, peaks=peaks, **kwargs)
pf2D.gui(display=display, toolkit=toolkit)
elif current_index:
peaks = method_func(self.__call__(), **kwargs)
else:
peaks = self.map(method_func, show_progressbar=show_progressbar,
parallel=parallel, inplace=False, ragged=True,
max_workers=max_workers, **kwargs)
return peaks
find_peaks.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG,
DISPLAY_DT, TOOLKIT_DT)
class LazySignal2D(LazySignal, Signal2D):
_lazy = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
dnjohnstone/hyperspy
|
hyperspy/_signals/signal2d.py
|
Python
|
gpl-3.0
| 35,008
|
[
"Gaussian"
] |
70bc88012c88d356efbcbd588eb98376cabf40741ffcaa0bfbfc8ff8aa7b7fe7
|
#!/usr/bin/env python
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
from ase.units import Bohr
import os.path
from pyDFTutils.ase_utils.kpoints import cubic_kpath
from collections import namedtuple
def plot_phon_from_nc(fname, title='BaT', output_filename='phonon.png'):
"""
read phonon frequencies from .nc file.
"""
ds = Dataset(fname, mode='r')
#ds.variables[u'space_group'][:]
#print ds.variables[u'primitive_vectors'][:]
#print ds.variables.keys()
qpoints = ds.variables['qpoints'][:]
phfreqs = ds.variables['phfreqs'][:] * 8065.6
phdisps = ds.variables['phdispl_cart'][:]
masses = ds.variables['atomic_mass_units'][:]
masses = list(masses) + [masses[-1]] * 2
IR_modes = label_all(qpoints, phfreqs, phdisps, masses)
#return
print((phdisps[0, 0, :, :] / Bohr))
print((phdisps[0, 0, :, 0] / Bohr))
print((get_weight(phdisps[0, 0, :, :], masses)))
phfreqs = fix_gamma(qpoints, phfreqs)
weights_A = np.empty_like(phfreqs)
weights_B = np.empty_like(phfreqs)
weights_C = np.empty_like(phfreqs)
nk, nm = phfreqs.shape
for i in range(nk):
for j in range(nm):
weights_A[i, j], weights_B[i, j], weights_C[i, j] = get_weight(
phdisps[i, j, :, :], masses)
#for i in range(1):
# plt.plot(weights_B[:, i], linewidth=0.1, color='gray')
#plt.plot(weights_A[:, i], linewidth=0.1, color='gray')
#plt.show()
#return
axis = None
kpath = cubic_kpath()
kslist = [kpath[1]] * 15
xticks = [['$\Gamma$', 'X', 'M', '$\Gamma$', 'R', 'X'], kpath[2]]
axis = plot_band_weight(
kslist,
phfreqs.T,
weights_A.T,
axis=axis,
color='red',
style='alpha',
xticks=xticks,
title=title)
axis = plot_band_weight(
kslist,
phfreqs.T,
weights_B.T,
axis=axis,
color='green',
style='alpha',
xticks=xticks,
title=title)
axis = plot_band_weight(
kslist,
phfreqs.T,
weights_C.T,
axis=axis,
color='blue',
style='alpha',
xticks=xticks,
title=title)
tick_mode = {'R': -2, 'X': -1, 'M': 2,'Gamma':0}
for qname in IR_modes:
for mode in IR_modes[qname]:
#print(mode)
shiftx = lambda x: x-0.2 if x>0.2 else x+0.01
axis.annotate(
mode[1], (shiftx(xticks[1][tick_mode[qname]]) , mode[0] + 5),
fontsize='x-small',
color='black',wrap=True)
plt.savefig(output_filename,dpi=300)
plt.show()
return qpoints, phfreqs, phdisps, masses
#print ds.variables[u'phfreqs'][:]
#print ds.variables[u'phdispl_cart'][:]
#for k in ds.variables:
# print "--------------\n"
# print k
# print ds.variables[k][:]
def label_all(qpoints, phfreqs, phdisps, masses):
special_qpoints = {
#'Gamma': [0, 0.013333, 0],
'X': [0, 0.5, 0],
'M': [0.5, 0.5, 0],
'R': [0.5, 0.5, 0.5]
}
mode_dict = {}
for i, qpt in enumerate(qpoints):
# print qpt
for qname in special_qpoints:
if np.isclose(
qpt, special_qpoints[qname], rtol=1e-5, atol=1e-3).all():
mode_dict[qname] = []
print("====================================")
print(qname)
phdisps_q = phdisps[i]
for j, disp in enumerate(phdisps_q):
disp = disp[:, 0] + 1.0j * disp[:, 1]
mode = label(qname, disp, masses)
freq = phfreqs[i][j]
mode_dict[qname].append([freq, mode])
print(mode_dict)
return mode_dict
def label(qname, phdisp, masses, notation='IR'):
nmode = namedtuple('nmode', [
'Ax', 'Ay', 'Az', 'Bx', 'By', 'Bz', 'O1x', 'O1y', 'O1z', 'O2x', 'O2y',
'O2z', 'O3x', 'O3y', 'O3z'
])
IR_dict = {}
IR_translation={}
IR_translation['Gamma']={
'$\Delta_1$':r'$\Gamma_4^-$',
'$\Delta_2$':r'',
'$\Delta_5$':r'',
}
IR_translation['R']={
r'$\Gamma_2\prime$':'$R_2^-$',
r'$\Gamma_{12}\prime$':'$R_3^-$',
r'$\Gamma_{25}$':'$R_5^-$',
r'$\Gamma_{25}\prime$':'$R_5^+$',
r'$\Gamma_{15}$':'$R_4^-$',
}
IR_translation['X']={
'$M_1$':'$X_1^+$',
'$M_2\prime$':'$X_3^-$',
'$M_3$':'$X_2^+$',
'$M_5$':'$X_5^+$',
'$M_5\prime$':'$X_5^-$',
}
IR_translation['M']={
'$M_1$':'$M_1^+$',
'$M_2$':'$M_3^+$',
'$M_3$':'$M_2^+$',
'$M_4$':'$M_4^+$',
'$M_2\prime$':'$M_3^-$',
'$M_3\prime$':'$M_2^-$',
'$M_5$':'$M_5^+$',
'$M_5\prime$':'$M_5^-$',
}
#with open('names.txt','w') as myfile:
# for q in IR_translation:
# myfile.write('## %s\n\n'%q)
# myfile.write('|Cowley | ? |\n|------|-----|\n')
# for cname in IR_translation[q]:
# myfile.write('| '+cname+' | '+IR_translation[q][cname]+' |\n')
# myfile.write("\n")
zvec=nmode._make([0.0] * 15)
# Gamma point
D1_1=zvec._replace(Ay=1)
D1_2=zvec._replace(By=1)
D1_3=zvec._replace(O3y=1)
D1_4=zvec._replace(O1y=1, O2y=1)
D2 =zvec._replace(O1y=1, O2y=-1)
D5_1=zvec._replace(Ax=1)
D5_2=zvec._replace(Bx=1)
D5_3=zvec._replace(O1x=1)
D5_4=zvec._replace(O2x=1)
D5_5=zvec._replace(O3x=1)
D5_6=zvec._replace(Az=1)
D5_7=zvec._replace(Bz=1)
D5_8=zvec._replace(O1z=1)
D5_9=zvec._replace(O2z=1)
D5_10=zvec._replace(O3z=1)
IR_dict['Gamma'] = {
D1_1: '$\Delta_1$',
D1_2: '$\Delta_1$',
D1_3: '$\Delta_1$',
D1_4: '$\Delta_1$',
D2: '$\Delta_2$',
D5_1: '$\Delta_5$',
D5_2: '$\Delta_5$',
D5_3: '$\Delta_5$',
D5_4: '$\Delta_5$',
D5_5: '$\Delta_5$',
D5_6: '$\Delta_5$',
D5_7: '$\Delta_5$',
D5_8: '$\Delta_5$',
D5_9: '$\Delta_5$',
D5_10:'$\Delta_5$',
}
# X point
X1_1 = nmode._make([0.0] * 15)
X1_1 = X1_1._replace(By=1)
X1_2 = nmode._make([0.0] * 15)
X1_2 = X1_2._replace(O1y=1, O2y=1)
X2p_1 = nmode._make([0.0] * 15)
X2p_1 = X2p_1._replace(Ay=1)
X2p_2 = nmode._make([0.0] * 15)
X2p_2 = X2p_2._replace(O3y=1)
X3 = nmode._make([0.0] * 15)
X3 = X3._replace(O1y=1, O2y=-1)
X5_1 = nmode._make([0.0] * 15)
X5_1 = X5_1._replace(Bx=1)
X5_2 = nmode._make([0.0] * 15)
X5_2 = X5_2._replace(Bz=1)
X5_3 = nmode._make([0.0] * 15)
X5_3 = X5_3._replace(O1x=1)
X5_4 = nmode._make([0.0] * 15)
X5_4 = X5_4._replace(O1z=1)
X5_5 = nmode._make([0.0] * 15)
X5_5 = X5_5._replace(O2x=1)
X5_6 = nmode._make([0.0] * 15)
X5_6 = X5_6._replace(O2z=1)
X5p_1 = nmode._make([0.0] * 15)
X5p_1 = X5_1._replace(Ax=1)
X5p_2 = nmode._make([0.0] * 15)
X5p_2 = X5_2._replace(Az=1)
X5p_3 = nmode._make([0.0] * 15)
X5p_3 = X5_3._replace(O3x=1)
X5p_4 = nmode._make([0.0] * 15)
X5p_4 = X5_4._replace(O3z=1)
IR_dict['X'] = {
X1_1: '$M_1$',
X1_2: '$M_1$',
X2p_1: '$M_2\prime$',
X2p_2: '$M_2\prime$',
X3: '$M_3$',
X5_1: '$M_5$',
X5_2: '$M_5$',
X5_3: '$M_5$',
X5_4: '$M_5$',
X5_5: '$M_5$',
X5_6: '$M_5$',
X5p_1: '$M_5\prime$',
X5p_2: '$M_5\prime$',
X5p_3: '$M_5\prime$',
X5p_4: '$M_5\prime$',
}
# M point
M1 = nmode._make([0.0] * 15)
M1 = M1._replace(O3x=1, O2y=1)
M2 = nmode._make([0.0] * 15)
M2 = M2._replace(O2x=1, O3y=-1)
M3 = nmode._make([0.0] * 15)
M3 = M3._replace(O3x=1, O2y=-1)
M4 = nmode._make([0.0] * 15)
M4 = M4._replace(O2x=1, O3y=1)
M2p = nmode._make([0.0] * 15)
M2p = M2p._replace(Az=1)
M3p_1 = nmode._make([0.0] * 15)
M3p_1 = M3p_1._replace(Bz=1)
M3p_2 = nmode._make([0.0] * 15)
M3p_2 = M3p_2._replace(O1z=1)
M5_1 = nmode._make([0.0] * 15)
M5_1 = M5_1._replace(O3z=1)
M5_2 = nmode._make([0.0] * 15)
M5_2 = M5_2._replace(O2z=1)
M5p_1 = nmode._make([0.0] * 15)
M5p_1 = M5p_1._replace(Bx=1)
M5p_2 = nmode._make([0.0] * 15)
M5p_2 = M5p_2._replace(By=1)
M5p_3 = nmode._make([0.0] * 15)
M5p_3 = M5p_3._replace(Ay=1)
M5p_4 = nmode._make([0.0] * 15)
M5p_4 = M5p_4._replace(Ax=1)
M5p_5 = nmode._make([0.0] * 15)
M5p_5 = M5p_5._replace(O1x=1)
M5p_6 = nmode._make([0.0] * 15)
M5p_6 = M5p_6._replace(O1y=1)
IR_dict['M'] = {
M1: '$M_1$',
M2: '$M_2$',
M3: '$M_3$',
M4: '$M_4$',
M2p: '$M_2\prime$',
M3p_1: '$M_3\prime$',
M3p_2: '$M_3\prime$',
M5_1: '$M_5$',
M5_2: '$M_5$',
M5p_1: '$M_5\prime$',
M5p_2: '$M_5\prime$',
M5p_3: '$M_5\prime$',
M5p_4: '$M_5\prime$',
M5p_5: '$M_5\prime$',
M5p_6: '$M_5\prime$',
}
# R point
G2p = nmode._make([0.0] * 15)
G2p = G2p._replace(O1z=1, O2x=1, O3y=1)
G12p_1 = nmode._make([0.0] * 15)
G12p_1 = G12p_1._replace(O1z=1, O3y=1, O2x=-2)
G12p_2 = nmode._make([0.0] * 15)
G12p_2 = G12p_2._replace(O1z=1, O3y=-1)
G25_1 = nmode._make([0.0] * 15)
G25_1 = G25_1._replace(O1y=1, O3z=-1)
G25_2 = nmode._make([0.0] * 15)
G25_2 = G25_2._replace(O1x=1, O2z=-1)
G25_3 = nmode._make([0.0] * 15)
G25_3 = G25_3._replace(O3x=1, O2y=-1)
G25p_1 = nmode._make([0.0] * 15)
G25p_1 = G25p_1._replace(Bx=1)
G25p_2 = nmode._make([0.0] * 15)
G25p_2 = G25p_2._replace(By=1)
G25p_3 = nmode._make([0.0] * 15)
G25p_3 = G25p_3._replace(Bz=1)
G15_1 = nmode._make([0.0] * 15)
G15_1 = G15_1._replace(Ax=1)
G15_2 = nmode._make([0.0] * 15)
G15_2 = G15_2._replace(Ay=1)
G15_3 = nmode._make([0.0] * 15)
G15_3 = G15_3._replace(Az=1)
G15_4 = nmode._make([0.0] * 15)
G15_4 = G15_4._replace(O1y=1, O3z=1)
G15_5 = nmode._make([0.0] * 15)
G15_5 = G15_5._replace(O1x=1, O2z=1)
G15_6 = nmode._make([0.0] * 15)
G15_6 = G15_6._replace(O3x=1, O2y=1)
IR_dict['R'] = {
G2p: r'$\Gamma_2\prime$',
G12p_1: r'$\Gamma_{12}\prime$',
G12p_2: r'$\Gamma_{12}\prime$',
G25_1: r'$\Gamma_{25}$',
G25_2: r'$\Gamma_{25}$',
G25_3: r'$\Gamma_{25}$',
G25p_1: r'$\Gamma_{25}\prime$',
G25p_2: r'$\Gamma_{25}\prime$',
G25p_3: r'$\Gamma_{25}\prime$',
G15_1: r'$\Gamma_{15}$',
G15_2: r'$\Gamma_{15}$',
G15_3: r'$\Gamma_{15}$',
G15_4: r'$\Gamma_{15}$',
G15_5: r'$\Gamma_{15}$',
G15_6: r'$\Gamma_{15}$',
}
evec = np.array(phdisp) * np.sqrt(np.kron(masses, [1, 1, 1]))
evec = np.real(evec) / np.linalg.norm(evec)
mode = None
for m in IR_dict[qname]:
#print m
mvec = np.real(m)
mvec = mvec / np.linalg.norm(mvec)
#print mvec
p = np.abs(np.dot(np.real(evec), mvec))
#print p
if p > 0.5: #1.0 / np.sqrt(2):
print("-------------")
print("Found! p= %s" % p)
print("eigen vector: ", nmode._make(mvec))
if notation == 'Cowley':
mode = IR_dict[qname][m]
else:
print(IR_translation[qname])
mode = IR_translation[qname][IR_dict[qname][m]]
print("mode: ", mode, m)
#return IR_dict[m]
if mode is None:
print("==============")
print("eigen vector: ", nmode._make(evec))
#return None
return mode
def fix_gamma(qpoints, phfreqs):
for i, qpt in enumerate(qpoints):
if np.isclose(qpt, [0.0, 0.0, 0.0], rtol=1e-5, atol=1e-3).all():
print("Fix")
if i == 0:
phfreqs[i] = phfreqs[i + 1]
else:
phfreqs[i] = phfreqs[i - 1]
return phfreqs
def get_weight(disp, masses):
ms = np.kron(masses, [1, 1, 1])
disp = np.array(disp)
disp = disp[:, 0] + disp[:, 1] * 1j
w = np.real(disp.conj() * disp * ms)
wA = sum(w[0:3])
wB = sum(w[3:6])
wC = sum(w[6:])
s = sum(w)
wA, wB, wC = wA / s, wB / s, wC / s
return wA, wB, wC
def test_read_freq_nc():
fname = 'BaTiO3/abinit_ifc.out_PHBST.nc'
read_phon_freq_nc(fname)
def plot_band_weight(kslist,
ekslist,
wkslist=None,
efermi=0,
yrange=None,
output=None,
style='alpha',
color='blue',
axis=None,
width=2,
xticks=None,
title=None):
if axis is None:
fig, a = plt.subplots()
plt.tight_layout(pad=2.19)
plt.axis('tight')
plt.gcf().subplots_adjust(left=0.17)
else:
a = axis
if title is not None:
a.set_title(title)
xmax = max(kslist[0])
if yrange is None:
yrange = (np.array(ekslist).flatten().min() - 66,
np.array(ekslist).flatten().max() + 66)
if wkslist is not None:
for i in range(len(kslist)):
x = kslist[i]
y = ekslist[i]
lwidths = np.array(wkslist[i]) * width
#lwidths=np.ones(len(x))
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
if style == 'width':
lc = LineCollection(segments, linewidths=lwidths, colors=color)
elif style == 'alpha':
lc = LineCollection(
segments,
linewidths=[2] * len(x),
colors=[
colorConverter.to_rgba(
color, alpha=np.abs(lwidth / (width + 0.001)))
for lwidth in lwidths
])
a.add_collection(lc)
plt.ylabel('Frequency (cm$^{-1}$)')
if axis is None:
for ks, eks in zip(kslist, ekslist):
plt.plot(ks, eks, color='gray', linewidth=0.001)
a.set_xlim(0, xmax)
a.set_ylim(yrange)
if xticks is not None:
plt.xticks(xticks[1], xticks[0])
for x in xticks[1]:
plt.axvline(x, color='gray', linewidth=0.5)
if efermi is not None:
plt.axhline(linestyle='--', color='black')
return a
#plot_phon_from_nc(
# 'BaTiO3/abinit_ifc.out_PHBST.nc',
# title='BaTiO3',
# output_filename='phonon.png')
|
mailhexu/pyDFTutils
|
pyDFTutils/phonon/plotphon.py
|
Python
|
lgpl-3.0
| 14,805
|
[
"ASE"
] |
4681c823b59f07f64dd4013da5b574a2f6fe7b7e0a5b6c0646454d6c3bbea1ed
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
The Brew Shop
10 Pound Stout
Used by permission of The Brew Shop. All rights reserved.
You can purchase this kit at their store:
- http://thebrewshopbend.com/
Original Stats:
OG:
FG:
ADF:
IBU:
Color:
Alcohol:
Boil: 60 min
Pre-Boil Volume:
Pre-Boil Gravity:
""" # noqa
import os
from brew.parsers import JSONDataLoader
from brew.parsers import parse_recipe
from brew.utilities.efficiency import calculate_brew_house_yield # noqa
def main():
recipe = {
u"name": u"10 Pound Stout (Extract)",
u"start_volume": 4.0,
u"final_volume": 5.0,
u"grains": [
{u"name": u"Amber Liquid Extract", u"weight": 7.0, u"grain_type": u"lme"},
{u"name": u"Dark Dry Extract", u"weight": 3.0, u"grain_type": u"dme"},
{
u"name": u"Caramel Crystal Malt 120l",
u"weight": 1.0,
u"grain_type": u"specialty",
},
{
u"name": u"Black Barley Stout",
u"weight": 0.5,
u"grain_type": u"specialty",
},
{u"name": u"Roasted Barley", u"weight": 0.5, u"grain_type": u"specialty"},
],
u"hops": [
{u"name": u"Columbus", u"weight": 1.0, u"boil_time": 60.0},
{u"name": u"Columbus", u"weight": 1.0, u"boil_time": 5.0},
],
u"yeast": {u"name": u"Wyeast 1084"},
u"data": {u"brew_house_yield": 0.80, u"units": u"imperial"},
}
data_dir = os.path.abspath(os.path.join(os.getcwd(), "data/"))
loader = JSONDataLoader(data_dir)
beer = parse_recipe(recipe, loader)
print(beer.format())
if __name__ == "__main__":
main()
|
chrisgilmerproj/brewday
|
examples/the_brew_shop/10_pound_stout.py
|
Python
|
mit
| 1,738
|
[
"ADF",
"Amber",
"COLUMBUS",
"CRYSTAL"
] |
4d6b6ba54b66ef20e96acf6f69fa5bd50ddb29890af3a7bc054e136603edab30
|
from __future__ import print_function
from .lib import TestBase, FileCreator
from smmap.mman import (
SlidingWindowMapManager,
StaticWindowMapManager
)
from smmap.buf import SlidingWindowMapBuffer
from random import randint
from time import time
import sys
import os
man_optimal = SlidingWindowMapManager()
man_worst_case = SlidingWindowMapManager(
window_size=TestBase.k_window_test_size // 100,
max_memory_size=TestBase.k_window_test_size // 3,
max_open_handles=15)
static_man = StaticWindowMapManager()
class TestBuf(TestBase):
def test_basics(self):
with FileCreator(self.k_window_test_size, "buffer_test") as fc:
# invalid paths fail upon construction
c = man_optimal.make_cursor(fc.path)
self.assertRaises(ValueError, SlidingWindowMapBuffer, type(c)()) # invalid cursor
self.assertRaises(ValueError, SlidingWindowMapBuffer, c, fc.size) # offset too large
buf = SlidingWindowMapBuffer() # can create uninitailized buffers
assert buf.cursor() is None
# can call end access any time
buf.end_access()
buf.end_access()
assert len(buf) == 0
# begin access can revive it, if the offset is suitable
offset = 100
assert buf.begin_access(c, fc.size) == False
assert buf.begin_access(c, offset) == True
assert len(buf) == fc.size - offset
assert buf.cursor().is_valid()
# empty begin access keeps it valid on the same path, but alters the offset
assert buf.begin_access() == True
assert len(buf) == fc.size
assert buf.cursor().is_valid()
# simple access
with open(fc.path, 'rb') as fp:
data = fp.read()
assert data[offset] == buf[0]
assert data[offset:offset * 2] == buf[0:offset]
# negative indices, partial slices
assert buf[-1] == buf[len(buf) - 1]
assert buf[-10:] == buf[len(buf) - 10:len(buf)]
# end access makes its cursor invalid
buf.end_access()
assert not buf.cursor().is_valid()
assert buf.cursor().is_associated() # but it remains associated
# an empty begin access fixes it up again
assert buf.begin_access() == True and buf.cursor().is_valid()
del(buf) # ends access automatically
del(c)
assert man_optimal.num_file_handles() == 1
# PERFORMANCE
# blast away with random access and a full mapping - we don't want to
# exaggerate the manager's overhead, but measure the buffer overhead
# We do it once with an optimal setting, and with a worse manager which
# will produce small mappings only !
max_num_accesses = 100
fd = os.open(fc.path, os.O_RDONLY)
for item in (fc.path, fd):
for manager, man_id in ((man_optimal, 'optimal'),
(man_worst_case, 'worst case'),
(static_man, 'static optimal')):
buf = SlidingWindowMapBuffer(manager.make_cursor(item))
assert manager.num_file_handles() == 1
for access_mode in range(2): # single, multi
num_accesses_left = max_num_accesses
num_bytes = 0
fsize = fc.size
st = time()
buf.begin_access()
while num_accesses_left:
num_accesses_left -= 1
if access_mode: # multi
ofs_start = randint(0, fsize)
ofs_end = randint(ofs_start, fsize)
d = buf[ofs_start:ofs_end]
assert len(d) == ofs_end - ofs_start
assert d == data[ofs_start:ofs_end]
num_bytes += len(d)
del d
else:
pos = randint(0, fsize)
assert buf[pos] == data[pos]
num_bytes += 1
# END handle mode
# END handle num accesses
buf.end_access()
assert manager.num_file_handles()
assert manager.collect()
assert manager.num_file_handles() == 0
elapsed = max(time() - st, 0.001) # prevent zero division errors on windows
mb = float(1000 * 1000)
mode_str = (access_mode and "slice") or "single byte"
print("%s: Made %i random %s accesses to buffer created from %s reading a total of %f mb in %f s (%f mb/s)"
% (man_id, max_num_accesses, mode_str, type(item), num_bytes / mb, elapsed, (num_bytes / mb) / elapsed),
file=sys.stderr)
# END handle access mode
del buf
# END for each manager
# END for each input
os.close(fd)
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/smmap/test/test_buf.py
|
Python
|
gpl-3.0
| 5,478
|
[
"BLAST"
] |
771b35f2044455facc218ab8bece8b3a5681df29fc16fcc05c84789c5d24a910
|
# this makes a running surf noise plot
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy import ndimage
import signal
import sys
def signal_handler(sig, frame):
print('User Interrupt detected.')
sys.exit(0)
# Update the matplotlib configuration parameters:
plt.rcParams.update({'font.size': 20,
'font.family': 'serif',
'figure.figsize': (10, 8),
'axes.grid': True,
'grid.color': '#555555'})
signal.signal(signal.SIGINT, signal_handler)
# this is the dimensions of the jellyfish
# 8 tentacles, 64 LEDs / tentacle, 3 colors/LED
d = 2**8
Nbits = 0
cmap = 'nipy_spectral'
#cmap='gnuplot'
#cmap = 'copper'
#cmap = 'inferno'
mu = 0.0
sigma = 0.01
zz = np.random.randn(d, d)
zz = sigma*zz + mu
#zz = zz.astype(int)
fig = plt.figure()
im = plt.imshow(zz * 2**Nbits, animated = True,
interpolation = 'gaussian', aspect='equal',
cmap=cmap, vmin=0, vmax=1)
plt.xticks([])
plt.yticks([])
fig.tight_layout()
def updatefig(*args):
global zz
mu = 0.0
sigma = 1.5
# random dist w/ offset of mu
dz = sigma * np.random.randn(d, d) + mu
#dz2 = sigma * np.random.randn(d, d) + mu
#dz = np.random.lognormal(mean=mu, sigma=sigma, size = (d,d))
# add some outliers to make it non-Gaussian
z = 0.95*zz + 0.05*dz#combination of past and present
zz = z # update the memory holder
# make sure it fits in the range
z = np.clip(z, 0, 1)
im.set_array(z * 2**Nbits)
return im,
ani = animation.FuncAnimation(fig, updatefig,
interval = 420,
blit = True)
plt.show()
|
rxa254/MoodCube
|
Plotting/rednoise.py
|
Python
|
bsd-3-clause
| 1,861
|
[
"Gaussian"
] |
775924eb5b62f68a1fec2db61740cf99cec9d06ecfffb68af8a8c284cd7f661b
|
from pylab import *
from pprint import pprint
import json
from IPython.display import HTML, display
import traitlets
from ipywidgets import *
from .show_code import show_code
from ..manifolds.torus import Torus
from ..manifolds.landmarks import Landmarks
from ..manifolds.curves import Curve, Curves
from ..models.frechet_mean import FrechetMean
from ..models.free_atlas import FreeAtlas
from ..models.free_1D_atlas import Free1DAtlas
from ..models.hypertemplate_atlas import HypertemplateAtlas
#from SharedVariables import t, M, model, Xt
global t, M, model, Xt
t = None
M = None
model = None
Xt = None
class AtlasInterface :
def __init__(self, show = False) :
self.create_manifold_interface()
self.create_start_point_interface()
self.create_data_interface()
self.create_model_interface()
self.create_training_interface()
self.create_display_interface()
self.link_widgets()
self.create_layout()
if show :
display(self.widget)
def link_widgets(self) :
self.w_manifold_type.observe(self.on_manifold_type_change, names='value')
self.w_create_manifold.on_click(self.create_manifold)
self.w_choose_hypertemplate.on_click(self.choose_hypertemplate)
self.w_choose_data.on_click(self.choose_data)
self.w_create_model.on_click(self.create_model)
self.w_train.on_click(self.train_model)
self.w_show.on_click(self.show_model)
traitlets.link((self.w_niterations, 'value'), (self.w_iterations, 'max'))
# Energy update
self.w_template_type.observe(self.update_energy_latex, names='value')
self.w_reg_hypertemplate.observe(self.update_energy_latex, names='value')
self.w_sigma_reg_hypertemplate.observe(self.update_energy_latex, names='value')
self.w_gamma_V0.observe(self.update_energy_latex, names='value')
self.w_sigma_V0.observe(self.update_energy_latex, names='value')
self.w_shooting_dim_constraint.observe(self.update_energy_latex, names='value')
self.w_gamma_V.observe(self.update_energy_latex, names='value')
self.w_frechet_exponent.observe(self.update_energy_latex, names='value')
self.w_data_attachment.observe(self.update_energy_latex, names='value')
self.w_gamma_W.observe(self.update_energy_latex, names='value')
# Algorithm update
self.w_X0_gradient_distribution.observe(self.update_algorithm_latex, names='value')
self.w_X0_descent_mode.observe(self.update_algorithm_latex, names='value')
self.w_X0_descent_speed.observe(self.update_algorithm_latex, names='value')
self.w_Xi_gradient_distribution.observe(self.update_algorithm_latex, names='value')
self.w_Xi_descent_mode.observe(self.update_algorithm_latex, names='value')
self.w_Xi_descent_speed.observe(self.update_algorithm_latex, names='value')
self.w_Ei_gradient_distribution.observe(self.update_algorithm_latex, names='value')
self.w_Ei_descent_mode.observe(self.update_algorithm_latex, names='value')
self.w_Ei_descent_speed.observe(self.update_algorithm_latex, names='value')
self.w_descent_stopping_criterion.observe(self.update_algorithm_latex, names='value')
self.w_niterations.observe(self.update_algorithm_latex, names='value')
self.w_descent_threshold.observe(self.update_algorithm_latex, names='value')
self.update_energy_latex()
self.update_algorithm_latex()
def create_manifold_interface(self):
self.w_manifold_type = Dropdown(
options={'Landmarks 2D': 'Landmarks', 'Torus': 'Torus', 'Curves 2D' : 'Curves'},
value='Torus',
description='',
disabled = False,
button_style = 'primary',
width = '200px'
)
# Landmarks param
self.w_npoints = BoundedIntText(
value = 25,
min=1,
max=400,
step=1,
description='Points :',
disabled=True,
width='148px'
)
self.w_kernel_type = Dropdown(
options={'Gaussian Kernel': 'gaussian'},
value='gaussian',
description='',
disabled = True,
width = '148px'
)
self.w_kernel_size = BoundedFloatText(
value=0.5,
min=0.01,
max=10.0,
description='Size :',
disabled=True,
width='148px'
)
# Torus param
self.w_donut_radius = BoundedFloatText(
value=2,
min=0,
max=4,
disabled=False,
description='Radius :',
width='148px'
)
self.w_donut_section = BoundedFloatText(
value=1,
min=0,
max=4,
disabled=False,
description='Section :',
width='148px'
)
self.w_create_manifold = Button(
description="Create Manifold",
button_style = 'success',
width = '148px',
height = '68px',
disabled = False)
def update_hypertemplate_field(self, i) :
def curryfied(x) :
self.w_hypertemplate.value = self.hypertemplate_buttons[self.w_manifold_type.value][i]['code']
return curryfied
def create_start_point_interface(self) :
self.w_hypertemplate = Text(
value='',
placeholder='Type something or click on one of the above buttons',
description='',
disabled=False,
width = '450px',
font_family = 'monospace',
font_size = 16
)
item_layout = Layout(height='68px', width='71px')
self.hypertemplate_buttons = {
'Torus' : [
{ 'text' : 'zero', 'code' : '(0,0)' } ,
{ 'text' : 'rand', 'code' : '(rand(),rand())' } ,
{ 'text' : '', 'code' : '' } ,
{ 'text' : '', 'code' : '' } ,
{ 'text' : '', 'code' : '' } ,
{ 'text' : '', 'code' : '' } ] ,
'Landmarks' : [
{ 'text' : 'circle', 'code' : '(cos(t), sin(t))'},
{ 'text' : 'square', 'code' : '( minimum(maximum( (4/pi)*abs(t - .75*pi) - 2 , -1) , 1) , minimum(maximum( (4/pi)*abs(t - 1.25*pi) - 2 , -1) , 1) )'},
{ 'text' : 'segment', 'code' : '(0*t, (t/pi) - 1)'},
{ 'text' : '', 'code' : ''},
{ 'text' : '', 'code' : ''},
{ 'text' : '', 'code' : ''} ] ,
'Curves' : [
{ 'text' : 'circle', 'code' : '(cos(t), sin(t))'},
{ 'text' : 'square', 'code' : '( minimum(maximum( (4/pi)*abs(t - .75*pi) - 2 , -1) , 1) , minimum(maximum( (4/pi)*abs(t - 1.25*pi) - 2 , -1) , 1) )'},
{ 'text' : 'segment', 'code' : '(0*t, (t/pi) - 1)'},
{ 'text' : 'skull', 'code' : "'data/skulls_2D/skull.vtk'"},
{ 'text' : '', 'code' : ''},
{ 'text' : '', 'code' : ''} ]
}
self.w_options_hypertemplate_buttons = [Button(description = self.hypertemplate_buttons[self.w_manifold_type.value][i]['text'], layout=item_layout, button_style='') for i in range(6)]
for i in range(6) :
self.w_options_hypertemplate_buttons[i].on_click(self.update_hypertemplate_field(i))
carousel_layout = Layout(
width='450px',
height='',
flex_direction='row',
display='flex')
self.w_options_hypertemplate = HBox(children=self.w_options_hypertemplate_buttons, layout=carousel_layout)
self.w_choose_hypertemplate = Button(
description="Choose HT",
button_style = 'success',
width = '148px',
height = '104px',
disabled = True)
def update_data_field(self, i) :
def curryfied(x) :
self.w_data.value = self.data_buttons[self.w_manifold_type.value][i]['code']
return curryfied
def create_data_interface(self) :
self.w_data = Text(
value='',
placeholder='Type something or click on one of the above buttons',
description='',
disabled=False,
width = '450px',
font_family = 'monospace',
font_size = 16
)
item_layout = Layout(height='68px', width='71px')
self.data_buttons = {
'Torus' : [
{ 'text' : 'line', 'code' : '[ (R*cos(theta), R*sin(theta), 2 * ((theta)/pi - .5) + 0.2*randn() ) for '+
' (R,theta) in zip( 3 + 0.2*randn(10), pi*rand(10) ) ]' } ,
{ 'text' : 'randn', 'code' : '[ (.8*randn(), .8*randn(), .8*randn()) for i in range(20) ]' } ,
{ 'text' : '', 'code' : '' } ,
{ 'text' : '', 'code' : '' } ,
{ 'text' : '', 'code' : '' } ,
{ 'text' : '', 'code' : '' } ] ,
'Landmarks' : [
{ 'text' : 'chips', 'code' : '[(2*cos(t) + sin(t), sin(t) + randn() *sin(2*t) ) for i in range(2)]'},
{ 'text' : 'tricky', 'code' : '[((.5+rand())*cos(t) + randn(), (.5+rand())*sin(t) + randn()) for i in range(2)]'},
{ 'text' : 'easy', 'code' : '[((.5+rand())*cos(t) + (rand()-.5), (.5+rand())*sin(t) + (rand()-.5) ) for i in range(2)]'},
{ 'text' : 'segments', 'code' : '[(.7*randn()*((t/pi)-1) + .2*randn(), .7*randn()*((t/pi)-1) + .2*randn() ) for i in range(2)]'},
{ 'text' : '', 'code' : ''},
{ 'text' : '', 'code' : ''} ] ,
'Curves' : [
{ 'text' : 'chips', 'code' : '[(2*cos(t) + sin(t), sin(t) + randn() *sin(2*t) ) for i in range(2)]'},
{ 'text' : 'tricky', 'code' : '[((.5+rand())*cos(t) + randn(), (.5+rand())*sin(t) + randn()) for i in range(2)]'},
{ 'text' : 'easy', 'code' : '[((.5+rand())*cos(t) + (rand()-.5), (.5+rand())*sin(t) + (rand()-.5) ) for i in range(2)]'},
{ 'text' : 'segments', 'code' : '[(.7*randn()*((t/pi)-1) + .2*randn(), .7*randn()*((t/pi)-1) + .2*randn() ) for i in range(2)]'},
{ 'text' : 'skulls', 'code' : "[ 'data/skulls_2D/skull_australopithecus.vtk', 'data/skulls_2D/skull_erectus.vtk', 'data/skulls_2D/skull_habilis.vtk', 'data/skulls_2D/skull_neandertalis.vtk', 'data/skulls_2D/skull_sapiens.vtk' ]"},
{ 'text' : '', 'code' : ''} ]
}
self.w_options_data_buttons = [Button(description = self.data_buttons[self.w_manifold_type.value][i]['text'], layout=item_layout, button_style='') for i in range(6)]
for i in range(6) :
self.w_options_data_buttons[i].on_click(self.update_data_field(i))
carousel_layout = Layout(
width='450px',
height='',
flex_direction='row',
display='flex')
self.w_options_data = HBox(children=self.w_options_data_buttons, layout=carousel_layout)
self.w_choose_data = Button(
description="Choose Data",
button_style = 'success',
width = '148px',
height = '104px',
disabled = True)
def create_model_interface(self) :
self.w_template_type = Dropdown(
options=['Free', 'Shooted'],
value='Free',
description='',
button_style = 'info',
width = '148px'
)
self.w_reg_hypertemplate = Dropdown(
options = ['No reglztion', 'Gaussian reglztion'],
value = 'No reglztion',
description = '',
info = 'Prevents a Free Template from becoming edgy',
width = '148px'
)
self.w_sigma_reg_hypertemplate = BoundedFloatText(
value = 0.5,
min=0,
max=2,
description='$\\sigma_{\\text{reg}}$',
width='148px'
)
self.w_gamma_V0 = FloatText(
value=0.01,
description='$\\gamma_{V_0}$',
width='148px'
)
self.w_sigma_V0 = BoundedFloatText(
value = 0.5,
min=0.1,
max=2,
description='$\\sigma_{V_0}$',
width='148px'
)
self.w_shooting_dim_constraint = Dropdown(
options = ['No shooting', 'rank(p) = 1', 'rank(p) = 2', 'No dim constr'],
value = 'No dim constr',
description = '',
button_style = 'info',
width = '148px'
)
self.w_gamma_V = FloatText(
value=0.05,
description='$\\gamma_{V}$',
disabled=False,
width='148px'
)
self.w_frechet_exponent = BoundedIntText(
value = 2,
min=1,
max=2,
step=1,
description='Frechet :',
disabled=False,
width='148px'
)
self.data_attachment_options = {
'Torus' : {
'options' : {
'Squared L2': 'squared_distance',
'L2': 'distance'},
'value' : 'squared_distance'
},
'Landmarks' : {
'options' : {
'Squared L2': 'squared_distance',
'L2': 'distance',
'Gaussian Kernel' : 'kernel_matching',
'Optimal Transport' : 'sinkhorn_matching'},
'value' : 'squared_distance'
},
'Curves' : {
'options' : {
'Currents' : 'currents',
'Varifolds' : 'varifolds',
'Gaussian Kernel' : 'kernel_matching',
'Normal Cycles' : 'normal_cycles',
'Optimal Transport' : 'sinkhorn_matching'},
'value' : 'kernel_matching'
},
}
self.w_data_attachment = Dropdown(
options=self.data_attachment_options[self.w_manifold_type.value]['options'],
value=self.data_attachment_options[self.w_manifold_type.value]['value'],
description='',
disabled = False,
button_style = 'info',
width = '148px'
)
self.w_gamma_W = FloatText(
value=1,
description='$\\gamma_{W}$',
disabled=False,
width='148px',
)
self.w_sigma_W_start = BoundedFloatText(
value = 2,
min=0.01,
max=5,
description='$\\sigma_{W}^{\\text{start}}$',
width='148px'
)
self.w_sigma_W_end = BoundedFloatText(
value = .1,
min=0.01,
max=5,
description='$\\sigma_{W}^{\\text{end}}$',
width='148px'
)
self.w_energy_latex = Label('$$E = 0$$', width = '450px')
self.w_create_model = Button(
description="Choose Model",
button_style = 'success',
width = '148px',
height = '350px',
disabled = True)
def get_descent_parameters(self) :
descent_parameters = {
'template' : {
'distribution' : self.w_X0_gradient_distribution.value,
'mode' : self.w_X0_descent_mode.value,
'speed' : self.w_X0_descent_speed.value
},
'models' : {
'distribution' : self.w_Xi_gradient_distribution.value,
'mode' : self.w_Xi_descent_mode.value,
'speed' : self.w_Xi_descent_speed.value
},
'directions' : {
'distribution' : self.w_Ei_gradient_distribution.value,
'mode' : self.w_Ei_descent_mode.value,
'speed' : self.w_Ei_descent_speed.value
},
'scheme' : {
'direction' : 'gradient',
'line search' : 'naive',
'direction memory' : 5
}
}
return descent_parameters
def create_training_interface(self) :
# Descent parameters for X0
self.w_X0_gradient_distribution = Dropdown(
options = ['Mean'],
value = 'Mean',
width = '148px'
)
self.w_X0_descent_mode = Dropdown(
#options = ['Fixed Stepsize', '1/nit', 'Gradient', 'Gauss-Newton'],
options = ['Fixed Stepsize', '1/nit', 'Gradient'],
value = 'Gradient',
width = '148px'
)
self.w_X0_descent_speed = FloatText(
value=1,
description = 'Speed :',
width='148px'
)
# Descent parameters for Xi / lambda_i
self.w_Xi_gradient_distribution = Dropdown(
options = ['Mean'],
value = 'Mean',
width = '148px'
)
self.w_Xi_descent_mode = Dropdown(
#options = ['Fixed Stepsize', '1/nit', 'Gradient', 'Gauss-Newton'],
options = ['Fixed Stepsize', '1/nit', 'Gradient'],
value = 'Gradient',
width = '148px'
)
self.w_Xi_descent_speed = FloatText(
value=1,
description = 'Speed :',
width='148px'
)
# Descent parameters for Ei
self.w_Ei_gradient_distribution = Dropdown(
options = ['lambda', 'sign(lambda)', '1/lambda'],
value = 'sign(lambda)',
width = '148px'
)
self.w_Ei_descent_mode = Dropdown(
#options = ['Fixed Stepsize', '1/nit', 'Gradient', 'Gauss-Newton'],
options = ['Fixed Stepsize', '1/nit', 'Gradient'],
value = 'Fixed Stepsize',
width = '148px'
)
self.w_Ei_descent_speed = FloatText(
value=0.01,
description = 'Speed :',
width='148px'
)
# Stopping criterion
self.w_descent_stopping_criterion = Dropdown(
#options = ['nits > ...', 'dE < ...'],
options = ['nits > ...'],
value = 'nits > ...',
width = '148px'
)
self.w_niterations = BoundedIntText(
value = 100,
min=1,
max=1000,
step=1,
description='',
disabled=False,
width='148px'
)
self.w_descent_threshold = FloatText(
value = 0,
width = '148px'
)
# Latex cell
self.w_descent_algorithm_latex = Label(
'',
width = '450px',
height = '100px')
# Train button
self.w_train = Button(
description="Train",
button_style = 'success',
width='148px',
height='250px',
disabled = True)
def create_display_interface(self) :
self.w_iterations = IntProgress(
value=0,
min=0,
max=self.w_niterations.value,
step=1,
description='',
bar_style='success', # 'success', 'info', 'warning', 'danger' or ''
orientation='horizontal',
width = '450px'
)
self.w_show = Button(
description="Show",
button_style = 'success',
width='148px',
disabled = True)
def on_manifold_type_change(self, change):
if change['new'] == 'Torus' :
self.w_npoints.disabled = True
self.w_kernel_type.disabled = True
self.w_kernel_size.disabled = True
self.w_donut_radius.disabled = False
self.w_donut_section.disabled = False
elif change['new'] == 'Landmarks' :
self.w_npoints.disabled = False
self.w_kernel_type.disabled = False
self.w_kernel_size.disabled = False
self.w_donut_radius.disabled = True
self.w_donut_section.disabled = True
elif change['new'] == 'Curves' :
self.w_npoints.disabled = False
self.w_kernel_type.disabled = False
self.w_kernel_size.disabled = False
self.w_donut_radius.disabled = True
self.w_donut_section.disabled = True
for i in range(len(self.w_options_data_buttons)) :
self.w_options_data_buttons[i].description = self.data_buttons[self.w_manifold_type.value][i]['text']
for i in range(len(self.w_options_hypertemplate_buttons)) :
self.w_options_hypertemplate_buttons[i].description = self.hypertemplate_buttons[self.w_manifold_type.value][i]['text']
self.w_data_attachment.options = self.data_attachment_options[self.w_manifold_type.value]['options']
self.w_data_attachment.value = self.data_attachment_options[self.w_manifold_type.value]['value']
def update_energy_latex(self, nobodycares=None) :
if self.w_frechet_exponent.value == 1 :
frechet_str = '^{1/2}'
elif self.w_frechet_exponent.value == 2 :
frechet_str = ''
if self.w_template_type.value == 'Free' :
XHT_X0 = '0~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\text{(flat template prior)}'
elif self.w_template_type.value == 'Shooted' :
XHT_X0 = '\\frac{\\gamma_{V_0}}{2}\\big(p_0, K_{\\sigma_{V_0},X_{\\text{HT}}} p_0 \\big)~~~~~~~~~~\\text{(template prior)}'
if self.w_data_attachment.value == 'squared_distance' :
attachment = '\\big|X_i - \\widetilde{X_i}\\big|^2_{\\text{pointwise}}'
elif self.w_data_attachment.value == 'distance' :
attachment = '\\big|X_i - \\widetilde{X_i}\\big|_{\\text{pointwise}}'
elif self.w_data_attachment.value == 'monge_kantorovitch' :
attachment = '\\big|X_i - \\widetilde{X_i}\\big|^2_{\\text{transport}}'
elif self.w_data_attachment.value == 'currents' :
attachment = '\\big|\\omega(X_i) - \\omega(\\widetilde{X_i})\\big|^2_{\\sigma_W}'
elif self.w_data_attachment.value == 'varifolds' :
attachment = '\\big|X_i - \\widetilde{X_i}\\big|^2_{\\text{varifolds}}'
elif self.w_data_attachment.value == 'kernel_matching' :
attachment = '\\big|\\mu_{X_i} - \\mu_{\\widetilde{X_i}}\\big|^{\\star 2}_{\\sigma_W}'
elif self.w_data_attachment.value == 'sinkhorn_matching' :
attachment = '\\big|\\mu_{X_i} - \\mu_{\\widetilde{X_i}}\\big|^{\\star 2}_{\\text{Wasserstein}}'
elif self.w_data_attachment.value == 'normal_cycles' :
attachment = '\\big|N_{X_i} - N_{\\widetilde{X_i}}\\big|^{2}_{\\sigma_W}'
X0_Xi = '\\frac{\\gamma_{V}}{2 \cdot n_{\\text{obs} }} \\sum_{i=1}^{n_{\\text{obs}}} \\big(p_i, K_{X_0} p_i \\big)' + frechet_str + '~~~~\\text{(shooting cost)}'
Xi_Xt = '\\frac{\\gamma_{W}}{2 \cdot n_{\\text{obs} }} \\sum_{i=1}^{n_{\\text{obs}}}' + attachment + '~~~~\\text{(data attachment)}'
pad = '~~~~~~~~~~~~'
code = pad + 'C = ' + XHT_X0 \
+ '\\\\' +pad+ '\\,~~+' + X0_Xi \
+ '\\\\' +pad+ '\\,~~+' + Xi_Xt
self.w_energy_latex.value = '$$' + code + '$$'
def create_manifold(self, b):
global M
if self.w_manifold_type.value == "Torus" :
code = '''M = Torus( a = {rad},
b = {sec} )'''.format(
rad = self.w_donut_radius.value,
sec = self.w_donut_section.value
)
M = Torus( a = self.w_donut_radius.value, b = self.w_donut_section.value )
elif self.w_manifold_type.value == "Landmarks" :
code = '''M = Landmarks( npoints = {npoints},
kernel = {kernel} )'''.format(
npoints = self.w_npoints.value,
kernel = (self.w_kernel_type.value, self.w_kernel_size.value)
)
M = Landmarks(npoints = self.w_npoints.value,
kernel = (self.w_kernel_type.value, self.w_kernel_size.value))
elif self.w_manifold_type.value == "Curves" :
code = '''M = Curves( connectivity = array([ [i, i+1] for i in range(self.w_npoints.value - 1) ]),
npoints = {npoints},
kernel = {kernel} )'''.format(
npoints = self.w_npoints.value,
kernel = (self.w_kernel_type.value, self.w_kernel_size.value)
)
M = Curves( connectivity = array([ [i, i+1] for i in range(self.w_npoints.value - 1) ]),
npoints = self.w_npoints.value,
kernel = (self.w_kernel_type.value, self.w_kernel_size.value))
show_code(code)
self.w_create_manifold.disabled = True
self.w_choose_hypertemplate.disabled = False
def choose_hypertemplate(self, b):
if self.w_hypertemplate.value != '' :
global t
t = linspace(0, 2*pi, self.w_npoints.value, endpoint=False)
if self.w_manifold_type.value == "Torus" :
code = "q0 = array({ht})".format(
ht = self.w_hypertemplate.value
)
elif self.w_manifold_type.value == "Landmarks" :
code = "q0 = (vstack({ht}).T).ravel()".format(
ht = self.w_hypertemplate.value
)
elif self.w_manifold_type.value == "Curves" :
obj = eval(self.w_hypertemplate.value)
if type(obj) is str :
code = "q0 = Curve.from_file('"+obj+"', offset = [.3, -.7])\n"
else :
code = """q0 = Curve( (vstack({ht}).T).ravel(),
array([ [i, i+1] for i in range(len(t) - 1) ]),
2
)""".format(
ht = self.w_hypertemplate.value
)
show_code(code)
exec('global q0; ' + code)
self.w_choose_hypertemplate.disabled = True
self.w_choose_data.disabled = False
def choose_data(self, b):
if self.w_data.value != '' :
global t
t = linspace(0, 2*pi, self.w_npoints.value, endpoint=False)
if self.w_manifold_type.value == "Torus" :
code = "Xt = vstack( {ht} )".format(
ht = self.w_data.value
)
elif self.w_manifold_type.value == "Landmarks" :
code = """Xt = vstack( tuple( (vstack(x).T).ravel()
for x in
({ht})
) )""".format(
ht = self.w_data.value
)
elif self.w_manifold_type.value == "Curves" :
obj = eval(self.w_hypertemplate.value)
if type(obj[0]) is str :
code = """Xt = [ Curve.from_file(f) for f in
({ht})
] """.format(
ht = self.w_data.value
)
else :
code = """Xt = [ Curve( (vstack(x).T).ravel() ,
array([ [i, i+1] for i in range(len(t) - 1) ]),
2
)
for x in
({ht})
] """.format(
ht = self.w_data.value
)
show_code(code)
exec('global Xt; ' + code)
self.w_choose_data.disabled = True
self.w_create_model.disabled = False
def create_model(self, b):
global model
if hasattr(model, 'training_widget') :
model.training_widget.close()
reg_template_gradient_str = ''
if self.w_template_type.value == 'Free' :
atlas_type = ' FreeAtlas'
if self.w_reg_hypertemplate.value == 'Gaussian reglztion' :
reg_template_gradient_str = " reg_template_gradient = ('gaussian', {s})\n ".format( s = self.w_sigma_reg_hypertemplate.value )
elif self.w_template_type.value == 'Shooted' :
atlas_type = 'HypertemplateAtlas'
if self.w_data_attachment.value == 'kernel_matching' :
data_attachment = 'kernel_matchings(start_scale = {ss}, end_scale = {es} )'.format(ss = self.w_sigma_W_start.value, es = self.w_sigma_W_end.value)
elif self.w_data_attachment.value == 'sinkhorn_matching' :
data_attachment = 'sinkhorn_matchings( sinkhorn_options = None )'
elif self.w_data_attachment.value == 'currents' :
data_attachment = 'current_matchings(start_scale = {ss}, end_scale = {es} )'.format(ss = self.w_sigma_W_start.value, es = self.w_sigma_W_end.value)
elif self.w_data_attachment.value == 'varifolds' :
data_attachment = 'varifold_matchings(start_scale = {ss}, end_scale = {es} )'.format(ss = self.w_sigma_W_start.value, es = self.w_sigma_W_end.value)
elif self.w_data_attachment.value == 'normal_cycles' :
data_attachment = 'normalcycles_matchings(start_scale = {ss}, end_scale = {es} )'.format(ss = self.w_sigma_W_start.value, es = self.w_sigma_W_end.value)
else :
data_attachment = self.w_data_attachment.value
code = ('model = ' + atlas_type + '''( Manifold = M,
DataAttachment = M.{dist},
FrechetExponent = {frechexp},
weights = ({g_V0}, {g_V}, {g_W}),
nobs = {nobs},
Q0 = q0
{reg_template_gradient})'''
).format(
dist = data_attachment,
frechexp = self.w_frechet_exponent.value,
g_V0 = self.w_gamma_V0.value,
g_V = self.w_gamma_V.value,
g_W = self.w_gamma_W.value,
nobs = len(Xt),
reg_template_gradient = reg_template_gradient_str
)
show_code(code)
exec('global model; ' + code)
self.w_create_model.disabled = True
self.w_train.disabled = False
def update_algorithm_latex(self, nobodycares=None) :
txt = lambda x : '\\text{' + x + '}'
nline = '\\\\~~~~~~~~~~~~~~'
grad_X0 = ''
if self.w_X0_descent_mode.value == 'Fixed Stepsize' :
grad_X0 = '\\nabla_{X_0} C ~\\big/~| \\nabla_{X_0} C |'
elif self.w_X0_descent_mode.value == '1/nit' :
grad_X0 = '\\nabla_{X_0} C ~\\big/~| n_{\\text{it}} \\cdot \\nabla_{X_0} C |'
elif self.w_X0_descent_mode.value == 'Gradient' :
grad_X0 = '\\nabla_{X_0} C'
grad_Xi = ''
if self.w_Xi_descent_mode.value == 'Fixed Stepsize' :
grad_Xi = '\\nabla_{p_i} C ~\\big/~| \\nabla_{p_i} C |'
elif self.w_Xi_descent_mode.value == '1/nit' :
grad_Xi = '\\nabla_{p_i} C ~\\big/~| n_{\\text{it}} \\cdot \\nabla_{p_i} C |'
elif self.w_Xi_descent_mode.value == 'Gradient' :
grad_Xi = '\\nabla_{p_i} C'
code = txt('While $n_{\\text{it}} < ' + str(self.w_niterations.value) + '~$:') \
+ nline + 'X_0 \\leftarrow X_0 + ' + '{0:.2f}'.format(self.w_X0_descent_speed.value) + '\\cdot' + grad_X0 \
+ nline + 'p_i ~\\leftarrow p_i ~+ ' + '{0:.2f}'.format(self.w_Xi_descent_speed.value) + '\\cdot' + grad_Xi
self.w_descent_algorithm_latex.value = '$$' + code + '$$'
def train_model(self, b):
descent_parameters = self.get_descent_parameters()
global model
code = '''model.train( Xt,
descent_parameters = {desc},
nits = {nits}
)'''.format(
desc = ' '.join(json.dumps(descent_parameters, sort_keys=False, indent=4).splitlines(True)),
nits = self.w_niterations.value)
show_code(code)
model.train(Xt, descent_parameters, nits = self.w_niterations.value, progressbar = self.w_iterations)
self.w_train.disabled = True
self.w_show.disabled = False
def show_model(self, b):
global model
code = 'model.show()\n'
show_code(code)
model.show()
self.w_show.disabled = True
self.w_create_manifold.disabled = False
def create_layout(self) :
spaceright = '50px'
b_manifold = HBox([self.w_manifold_type, VBox(
[ HBox([self.w_donut_radius, self.w_donut_section ,
Label('', width=self.w_kernel_size.width)]),
HBox([self.w_npoints, self.w_kernel_type, self.w_kernel_size]) ]
), Label('', width=spaceright), self.w_create_manifold] ,
layout=Layout(width='100%', justify_content='center')
)
b_start_point = HBox([Label('Hypertemplate $X_{\\text{HT}}$ :', width='202px'),
VBox([self.w_options_hypertemplate, self.w_hypertemplate]),
Label('', width=spaceright),
self.w_choose_hypertemplate],
layout=Layout(width='100%', justify_content='center')
)
b_data = HBox([Label('Dataset $\\widetilde{X_i}$ :', width='202px'),
VBox([self.w_options_data, self.w_data]),
Label('', width=spaceright),
self.w_choose_data],
layout=Layout(width='100%', justify_content='center')
)
b_HT_X0 = HBox( [ Label('Template constr $X_{\\text{HT}} \\rightarrow X_0$ :', width='202px', padding='22px 0px 0px 0px'),
self.w_template_type,
VBox([HBox([self.w_reg_hypertemplate,
self.w_sigma_reg_hypertemplate]),
HBox([self.w_gamma_V0,
self.w_sigma_V0]) ]) ] )
b_X0_X = HBox( [ Label('Models shooting $X_{0} \\rightarrow X_i$ :', width='202px', padding='5px 0px 0px 0px'),
self.w_shooting_dim_constraint,
self.w_gamma_V,
self.w_frechet_exponent ] )
b_X_Xt = VBox([
HBox( [ Label('Data attachment $X_{i} \\rightarrow \\widetilde{X_i}$ :', width='202px', padding='5px 0px 0px 0px'),
self.w_data_attachment,
self.w_gamma_W,
Label('', width='148px', padding='5px 0px 0px 0px') ] ),
HBox( [ Label('', width='202px', padding='5px 0px 0px 0px'),
Label('Coarse to fine scheme :', width='148px', padding='5px 0px 0px 0px'),
self.w_sigma_W_start,
self.w_sigma_W_end ] )
])
b_energy_latex = HBox( [Label('', width='202px'), self.w_energy_latex ] )
b_model = HBox( [VBox([b_HT_X0, b_X0_X, b_X_Xt, b_energy_latex]),
Label('', width=spaceright),
self.w_create_model
],
layout = Layout(width='100%', justify_content='center'))
# Descent parameters
b_descent_X0 = HBox([ Label('Update of the template $X_0$', width='202px', padding='6px 0px 0px 0px'),
self.w_X0_gradient_distribution,
self.w_X0_descent_mode,
self.w_X0_descent_speed ])
b_descent_Xi = HBox([ Label('Update of the models $X_i$', width='202px', padding='6px 0px 0px 0px'),
self.w_Xi_gradient_distribution,
self.w_Xi_descent_mode,
self.w_Xi_descent_speed ])
b_descent_Ei = HBox([ Label('Update of the directions $E_j$', width='202px', padding='6px 0px 0px 0px'),
self.w_Ei_gradient_distribution,
self.w_Ei_descent_mode,
self.w_Ei_descent_speed ])
b_descent_stopping = HBox([ Label('Stopping criterion', width='202px', padding='6px 0px 0px 0px'),
self.w_descent_stopping_criterion,
self.w_niterations,
self.w_descent_threshold ])
b_train = HBox([VBox([b_descent_X0, b_descent_Xi, b_descent_Ei, b_descent_stopping,
HBox([Label('', width='202px'), self.w_descent_algorithm_latex]) ]),
Label('', width=spaceright),
self.w_train],
layout = Layout(width='100%', justify_content='center'))
b_display = HBox([Label('Progress :', width='202px', padding='5px 0px 0px 0px'),
self.w_iterations,
Label('', width=spaceright),
self.w_show],
layout = Layout(width='100%', justify_content='center'))
self.widget = VBox([b_manifold, b_start_point, b_data, b_model, b_train, b_display])
|
jeanfeydy/lddmm-ot
|
LDDMM_Python/lddmm_python/modules/io/interactive_riemannian_geometry.py
|
Python
|
mit
| 30,519
|
[
"Gaussian",
"VTK"
] |
17668229a1ecdc80725fa54855215002b505793616240bc0d70095939a28c218
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
"""Utility to compile CoreML models"""
import os
import shutil
import tvm._ffi
from ...relay.expr_functor import ExprVisitor
from .. import xcode, coreml_runtime
def _convert_add(builder, name, inputs, outputs, args, attrs):
builder.add_elementwise(name=name, input_names=inputs, output_name=outputs[0], mode="ADD")
def _convert_multiply(builder, name, inputs, outputs, args, attrs):
builder.add_elementwise(name=name, input_names=inputs, output_name=outputs[0], mode="MULTIPLY")
def _convert_clip(builder, name, inputs, outputs, args, attrs):
builder.add_clip(
name=name,
input_name=inputs[0],
output_name=outputs[0],
min_value=attrs.a_min,
max_value=attrs.a_max,
)
def _convert_batch_flatten(builder, name, inputs, outputs, args, attrs):
builder.add_flatten_to_2d(name=name, input_name=inputs[0], output_name=outputs[0])
def _convert_expand_dims(builder, name, inputs, outputs, args, attrs):
if attrs.axis >= 0:
axes = list(range(attrs.axis, attrs.axis + attrs.num_newaxis))
else:
axes = list(range(attrs.axis - attrs.num_newaxis + 1, attrs.axis + 1))
builder.add_expand_dims(name=name, input_name=inputs[0], output_name=outputs[0], axes=axes)
def _convert_relu(builder, name, inputs, outputs, args, attrs):
builder.add_activation(
name=name, non_linearity="RELU", input_name=inputs[0], output_name=outputs[0]
)
def _convert_softmax(builder, name, inputs, outputs, args, attrs):
builder.add_softmax_nd(
name=name, input_name=inputs[0], output_name=outputs[0], axis=int(attrs["axis"])
)
def _convert_conv2d(builder, name, inputs, outputs, args, attrs):
weight = args[1].data.asnumpy()
if attrs["kernel_layout"] == "OIHW":
# convert to 'HWIO'
weight = weight.transpose([2, 3, 1, 0])
kh, kw, kc, oc = weight.shape
builder.add_convolution(
name=name,
kernel_channels=kc,
output_channels=oc,
height=kh,
width=kw,
stride_height=int(attrs["strides"][0]),
stride_width=int(attrs["strides"][0]),
border_mode="valid",
groups=int(attrs["groups"]),
W=weight,
b=None,
has_bias=False,
input_name=inputs[0],
output_name=outputs[0],
dilation_factors=[int(v) for v in attrs["dilation"]],
padding_top=int(attrs["padding"][0]),
padding_bottom=int(attrs["padding"][2]),
padding_left=int(attrs["padding"][1]),
padding_right=int(attrs["padding"][3]),
)
def _convert_global_avg_pool2d(builder, name, inputs, outputs, args, attrs):
builder.add_pooling(
name=name,
height=1,
width=1,
stride_height=1,
stride_width=1,
layer_type="AVERAGE",
padding_type="VALID",
input_name=inputs[0],
output_name=outputs[0],
is_global=True,
)
_convert_map = {
"add": _convert_add,
"multiply": _convert_multiply,
"clip": _convert_clip,
"expand_dims": _convert_expand_dims,
"nn.relu": _convert_relu,
"nn.batch_flatten": _convert_batch_flatten,
"nn.softmax": _convert_softmax,
"nn.conv2d": _convert_conv2d,
"nn.global_avg_pool2d": _convert_global_avg_pool2d,
}
class CodegenCoreML(ExprVisitor):
"""
A visitor to traverse subgraphs and build Core ML models.
"""
def __init__(self, model_name, function):
import coremltools
from coremltools.models.neural_network import NeuralNetworkBuilder
ExprVisitor.__init__(self)
self.model_name = model_name
self.function = function
self.out_map = {}
self.model_inputs_ = []
self.buf_idx_ = 0
# Update inputs and outputs after we visit all the nodes.
# Set dummy values for now.
# TODO: support multiple outputs
inputs = [
(
"",
coremltools.models.datatypes.Array(
1,
),
)
for _ in self.function.params
]
outputs = [
(
"",
coremltools.models.datatypes.Array(
1,
),
)
]
self.builder = NeuralNetworkBuilder(inputs, outputs, disable_rank5_shape_mapping=True)
def visit_constant(self, const):
output = "buf_" + str(self.buf_idx_)
self.builder.add_load_constant_nd(
name=output,
output_name=output,
constant_value=const.data.asnumpy(),
shape=const.data.shape,
)
self.buf_idx_ = self.buf_idx_ + 1
self.out_map[const] = [output]
def visit_var(self, var):
name = var.name_hint
shape = [int(n) for n in var.type_annotation.shape]
dtype = var.type_annotation.dtype
self.model_inputs_.append((name, shape, dtype))
self.out_map[var] = [name]
def visit_call(self, call):
inputs = []
for arg in call.args:
super().visit(arg)
for out in self.out_map[arg]:
inputs.append(out)
outputs = ["buf_" + str(self.buf_idx_)]
op_name = call.op.name
layer_name = op_name + "_" + str(self.buf_idx_)
assert op_name in _convert_map, "{} is not supported".format(op_name)
_convert_map[op_name](self.builder, layer_name, inputs, outputs, call.args, call.attrs)
self.buf_idx_ = self.buf_idx_ + 1
self.out_map[call] = outputs
def compile(self, out_dir):
"""
Build a Core ML model and compile it with Xcode toolchain.
"""
import coremltools
from coremltools.proto.Model_pb2 import ArrayFeatureType
FEATURE_TYPE_MAP = {
"float32": ArrayFeatureType.FLOAT32,
"float64": ArrayFeatureType.DOUBLE,
"int32": ArrayFeatureType.INT32,
}
input_names, input_dims, input_dtypes = zip(*self.model_inputs_)
self.builder.set_input(input_names, input_dims)
for i, dtype in enumerate(input_dtypes):
assert dtype in FEATURE_TYPE_MAP
input_desc = self.builder.spec.description.input
input_desc[i].type.multiArrayType.dataType = FEATURE_TYPE_MAP[dtype]
output_dim = [int(n) for n in self.function.ret_type.shape]
self.builder.set_output(self.out_map[self.function.body], [output_dim])
for i, dtype in enumerate([self.function.ret_type.dtype]):
assert dtype in FEATURE_TYPE_MAP
output_desc = self.builder.spec.description.output
output_desc[i].type.multiArrayType.dataType = FEATURE_TYPE_MAP[dtype]
model = coremltools.models.MLModel(self.builder.spec)
xcode.compile_coreml(model, self.model_name, out_dir)
@tvm._ffi.register_func("relay.ext.coremlcompiler")
def coreml_compiler(func):
"""
Create a CoreML runtime from a Relay module.
"""
assert isinstance(func, tvm.relay.function.Function)
model_dir = os.getcwd()
name = str(func.attrs.global_symbol)
builder = CodegenCoreML(name, func)
builder.visit(func.body)
mlmodelc_path = "{}/{}.mlmodelc".format(model_dir, name)
if os.path.exists(mlmodelc_path):
shutil.rmtree(mlmodelc_path)
builder.compile(model_dir)
ctx = tvm.cpu(0)
return coreml_runtime.create(name, mlmodelc_path, ctx).module
|
tqchen/tvm
|
python/tvm/contrib/target/coreml.py
|
Python
|
apache-2.0
| 8,292
|
[
"VisIt"
] |
e6256c59028710a6ecc6b715c4db9dac2099b94a45964e50214cf1cbd987af36
|
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, micronota development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from tempfile import mkdtemp
from shutil import rmtree
from os import getcwd
from os.path import join
from unittest import TestCase, main
from skbio.util import get_data_path
from burrito.util import ApplicationError
from micronota.util import _get_named_data_path
from micronota.bfillings.diamond import (
DiamondMakeDB, make_db, FeatureAnnt)
class DiamondTests(TestCase):
def setUp(self):
self.tmp_dir = mkdtemp()
self.db_fa = _get_named_data_path('db.faa')
self.db = _get_named_data_path('db.dmnd')
self.neg_fp = [get_data_path(i) for i in
['empty', 'whitespace_only']]
def tearDown(self):
rmtree(self.tmp_dir)
class DiamondMakeDBTests(DiamondTests):
def test_base_command(self):
c = DiamondMakeDB()
self.assertEqual(
c.BaseCommand,
'cd "%s/"; %s' % (getcwd(), c._command))
def test_make_db(self):
fp = join(self.tmp_dir, 'db.dmnd')
make_db(self.db_fa, fp)
with open(fp, 'rb') as obs, open(self.db, 'rb') as exp:
self.assertEqual(obs.read(), exp.read())
def test_make_db_wrong_input(self):
fp = join(self.tmp_dir, 'db.dmnd')
for i in self.neg_fp:
with self.assertRaisesRegex(
ApplicationError,
r'(Error reading file)|(Invalid input file format)'):
make_db(i, fp)
class DiamondBlastTests(DiamondTests):
def setUp(self):
super().setUp()
tests = [('blastp', 'WP_009885814.faa'),
('blastx', 'WP_009885814.fna')]
self.blast = [
(i[0], get_data_path(i[1]),
_get_named_data_path('%s.diamond' % i[1]))
for i in tests]
def test_blast(self):
for aligner, query, exp_fp in self.blast:
pred = FeatureAnnt([self.db], mkdtemp(dir=self.tmp_dir))
obs = pred(query, aligner=aligner)
exp = pred.parse_tabular(exp_fp)
self.assertTrue(exp.equals(obs))
def test_blast_wrong_input(self):
pred = FeatureAnnt([self.db], self.tmp_dir)
for i in self.neg_fp:
for aligner in ['blastp', 'blastx']:
with self.assertRaisesRegex(
ApplicationError,
r'(Error reading file)|(Invalid input file format)'):
pred(i, aligner=aligner)
if __name__ == '__main__':
main()
|
tkosciol/micronota
|
micronota/bfillings/tests/test_diamond.py
|
Python
|
bsd-3-clause
| 2,809
|
[
"BLAST"
] |
2719ce4d50a559a151bbe782b4e688e95480084d9b7cc0d013b91de4be84b0fa
|
from datetime import datetime
from turbogears.database import PackageHub
from sqlobject import *
from turbogears import identity
#added for iskonline
from mx.DateTime import now
import string
hub = PackageHub('infoshopkeeperonline')
__connection__ = hub
# class YourDataClass(SQLObject):
# pass
# identity models.
class Visit(SQLObject):
"""
A visit to your site
"""
class sqlmeta:
table = 'visit'
visit_key = StringCol(length=40, alternateID=True,
alternateMethodName='by_visit_key')
created = DateTimeCol(default=datetime.now)
expiry = DateTimeCol()
def lookup_visit(cls, visit_key):
try:
return cls.by_visit_key(visit_key)
except SQLObjectNotFound:
return None
lookup_visit = classmethod(lookup_visit)
class VisitIdentity(SQLObject):
"""
A Visit that is link to a User object
"""
visit_key = StringCol(length=40, alternateID=True,
alternateMethodName='by_visit_key')
user_id = IntCol()
class Group(SQLObject):
"""
An ultra-simple group definition.
"""
# names like "Group", "Order" and "User" are reserved words in SQL
# so we set the name to something safe for SQL
class sqlmeta:
table = 'tg_group'
group_name = UnicodeCol(length=16, alternateID=True,
alternateMethodName='by_group_name')
display_name = UnicodeCol(length=255)
created = DateTimeCol(default=datetime.now)
# collection of all users belonging to this group
users = RelatedJoin('User', intermediateTable='user_group',
joinColumn='group_id', otherColumn='user_id')
# collection of all permissions for this group
permissions = RelatedJoin('Permission', joinColumn='group_id',
intermediateTable='group_permission',
otherColumn='permission_id')
class User(SQLObject):
"""
Reasonably basic User definition.
Probably would want additional attributes.
"""
# names like "Group", "Order" and "User" are reserved words in SQL
# so we set the name to something safe for SQL
class sqlmeta:
table = 'tg_user'
user_name = UnicodeCol(length=16, alternateID=True,
alternateMethodName='by_user_name')
email_address = UnicodeCol(length=255, alternateID=True,
alternateMethodName='by_email_address')
display_name = UnicodeCol(length=255)
password = UnicodeCol(length=40)
created = DateTimeCol(default=datetime.now)
# groups this user belongs to
groups = RelatedJoin('Group', intermediateTable='user_group',
joinColumn='user_id', otherColumn='group_id')
def _get_permissions(self):
perms = set()
for g in self.groups:
perms = perms | set(g.permissions)
return perms
def _set_password(self, cleartext_password):
"Runs cleartext_password through the hash algorithm before saving."
password_hash = identity.encrypt_password(cleartext_password)
self._SO_set_password(password_hash)
def set_password_raw(self, password):
"Saves the password as-is to the database."
self._SO_set_password(password)
class Permission(SQLObject):
"""
A relationship that determines what each Group can do
"""
permission_name = UnicodeCol(length=16, alternateID=True,
alternateMethodName='by_permission_name')
description = UnicodeCol(length=255)
groups = RelatedJoin('Group',
intermediateTable='group_permission',
joinColumn='permission_id',
otherColumn='group_id')
#class Rating(SQLObject):
# _fromDatabase = True
# title = ForeignKey('Title')
|
johm/infoshopkeeper
|
infoshopkeeperonline/infoshopkeeperonline/model.py
|
Python
|
gpl-2.0
| 3,893
|
[
"VisIt"
] |
770d5e91f060cc9414eea13a5f85fbcc0f4f8c06ff1c90f88e0f168c3527d297
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("icecreamratings_project.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
vainotuisk/icecreamratings
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,245
|
[
"VisIt"
] |
56a97709fb53d3d5327149139f1e7e54766109291345da094d793d7b81a253ad
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to handle the calculation of the IR spectra
This implementation is adapted from Abipy
https://github.com/abinit/abipy
where it was originally done by Guido Petretto and Matteo Giantomassi
"""
import numpy as np
from monty.json import MSONable
from pymatgen.core.spectrum import Spectrum
from pymatgen.core.structure import Structure
from pymatgen.util.plotting import add_fig_kwargs
from pymatgen.vis.plotters import SpectrumPlotter
__author__ = "Henrique Miranda, Guido Petretto, Matteo Giantomassi"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Henrique Miranda"
__email__ = "miranda.henrique@gmail.com"
__date__ = "Oct 31, 2018"
class IRDielectricTensor(MSONable):
"""
Class to handle the Ionic Dielectric Tensor
The implementation is adapted from Abipy
See the definitions Eq.(53-54) in :cite:`Gonze1997` PRB55, 10355 (1997).
"""
def __init__(self, oscillator_strength, ph_freqs_gamma, epsilon_infinity, structure):
"""
Args:
oscillatator_strength: IR oscillator strengths as defined
in Eq. 54 in :cite:`Gonze1997` PRB55, 10355 (1997).
ph_freqs_gamma: Phonon frequencies at the Gamma point
epsilon_infinity: electronic susceptibility as defined in Eq. 29.
structure: A Structure object corresponding to the structure used for the calculation.
"""
self.structure = structure
self.oscillator_strength = np.array(oscillator_strength).real
self.ph_freqs_gamma = np.array(ph_freqs_gamma)
self.epsilon_infinity = np.array(epsilon_infinity)
@classmethod
def from_dict(cls, d):
"""
Returns IRDielectricTensor from dict representation
"""
structure = Structure.from_dict(d["structure"])
oscillator_strength = d["oscillator_strength"]
ph_freqs_gamma = d["ph_freqs_gamma"]
epsilon_infinity = d["epsilon_infinity"]
return cls(oscillator_strength, ph_freqs_gamma, epsilon_infinity, structure)
@property
def max_phfreq(self):
"""Maximum phonon frequency"""
return max(self.ph_freqs_gamma)
@property
def nph_freqs(self):
"""Number of phonon frequencies"""
return len(self.ph_freqs_gamma)
def as_dict(self):
"""
Json-serializable dict representation of IRDielectricTensor.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"oscillator_strength": self.oscillator_strength.tolist(),
"ph_freqs_gamma": self.ph_freqs_gamma.tolist(),
"structure": self.structure.as_dict(),
"epsilon_infinity": self.epsilon_infinity.tolist(),
}
def write_json(self, filename):
"""
Save a json file with this data
"""
import json
with open(filename, "w") as f:
json.dump(self.as_dict(), f)
def get_ir_spectra(self, broad=0.00005, emin=0, emax=None, divs=500):
"""
The IR spectra is obtained for the different directions
Args:
broad: a list of broadenings or a single broadening for the phonon peaks
emin, emax: minimum and maximum energy in which to obtain the spectra
divs: number of frequency samples between emin and emax
Returns:
frequencies: divs array with the frequencies at which the
dielectric tensor is calculated
dielectric_tensor: divsx3x3 numpy array with the dielectric tensor
for the range of frequencies
"""
if isinstance(broad, float):
broad = [broad] * self.nph_freqs
if isinstance(broad, list) and len(broad) != self.nph_freqs:
raise ValueError("The number of elements in the broad_list " "is not the same as the number of frequencies")
if emax is None:
emax = self.max_phfreq + max(broad) * 20
frequencies = np.linspace(emin, emax, divs)
na = np.newaxis
dielectric_tensor = np.zeros((divs, 3, 3), dtype=complex)
for i in range(3, len(self.ph_freqs_gamma)):
g = broad[i] * self.ph_freqs_gamma[i]
num = self.oscillator_strength[i, :, :]
den = self.ph_freqs_gamma[i] ** 2 - frequencies[:, na, na] ** 2 - 1j * g
dielectric_tensor += num / den
dielectric_tensor += self.epsilon_infinity[na, :, :]
return frequencies, dielectric_tensor
@add_fig_kwargs
def plot(self, components=("xx",), reim="reim", show_phonon_frequencies=True, xlim=None, ylim=None, **kwargs):
"""
Helper function to generate the Spectrum plotter and directly plot the results
Arguments:
components: A list with the components of the dielectric tensor to plot.
Can be either two indexes or a string like 'xx' to plot the (0,0) component
reim: If 're' (im) is present in the string plots the real (imaginary) part of the dielectric tensor
show_phonon_frequencies: plot a dot where the phonon frequencies are to help identify IR inactive modes
"""
plotter = self.get_plotter(components=components, reim=reim, **kwargs)
plt = plotter.get_plot(xlim=xlim, ylim=ylim)
if show_phonon_frequencies:
ph_freqs_gamma = self.ph_freqs_gamma[3:]
plt.scatter(ph_freqs_gamma * 1000, np.zeros_like(ph_freqs_gamma))
plt.xlabel(r"$\epsilon(\omega)$")
plt.xlabel(r"Frequency (meV)")
return plt
def get_spectrum(self, component, reim, broad=0.00005, emin=0, emax=None, divs=500, label=None):
"""
component: either two indexes or a string like 'xx' to plot the (0,0) component
reim: only "re" or "im"
broad: a list of broadenings or a single broadening for the phonon peaks
"""
# some check on component and reim value? but not really necessary maybe
directions_map = {"x": 0, "y": 1, "z": 2, 0: 0, 1: 1, 2: 2}
functions_map = {"re": lambda x: x.real, "im": lambda x: x.imag}
reim_label = {"re": "Re", "im": "Im"}
i, j = [directions_map[direction] for direction in component]
label = r"%s{$\epsilon_{%s%s}$}" % (reim_label[reim], "xyz"[i], "xyz"[j])
frequencies, dielectric_tensor = self.get_ir_spectra(broad=broad, emin=emin, emax=emax, divs=divs)
y = functions_map[reim](dielectric_tensor[:, i, j])
return Spectrum(frequencies * 1000, y, label=label)
def get_plotter(self, components=("xx",), reim="reim", broad=0.00005, emin=0, emax=None, divs=500, **kwargs):
"""
Return an instance of the Spectrum plotter containing the different requested components
Arguments:
components: A list with the components of the dielectric tensor to plot.
Can be either two indexes or a string like 'xx' to plot the (0,0) component
reim: If 're' (im) is present in the string plots the real (imaginary) part of the dielectric tensor
emin, emax: minimum and maximum energy in which to obtain the spectra
divs: number of frequency samples between emin and emax
"""
directions_map = {"x": 0, "y": 1, "z": 2, 0: 0, 1: 1, 2: 2}
reim_label = {"re": "Re", "im": "Im"}
plotter = SpectrumPlotter()
for component in components:
i, j = [directions_map[direction] for direction in component]
for fstr in ("re", "im"):
if fstr in reim:
label = r"%s{$\epsilon_{%s%s}$}" % (
reim_label[fstr],
"xyz"[i],
"xyz"[j],
)
spectrum = self.get_spectrum(component, fstr, broad=broad, emin=emin, emax=emax, divs=divs)
spectrum.XLABEL = r"Frequency (meV)"
spectrum.YLABEL = r"$\epsilon(\omega)$"
plotter.add_spectrum(label, spectrum)
return plotter
|
gmatteo/pymatgen
|
pymatgen/phonon/ir_spectra.py
|
Python
|
mit
| 8,319
|
[
"ABINIT",
"pymatgen"
] |
824b70589484dc9951f294ed7fbbd91be5aacf1ebeb8cb808e768f8a70feb80a
|
#!/bin/env python
"""
List the number of requests in the caches of all the ReqProxyies
Usage:
dirac-rms-list-req-cache [options]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.registerSwitch('', 'Full', ' Print full list of requests')
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
fullPrint = False
for switch in Script.getUnprocessedSwitches():
if switch[0] == 'Full':
fullPrint = True
reqClient = ReqClient()
for server, rpcClient in reqClient.requestProxies().items():
DIRAC.gLogger.always("Checking request cache at %s" % server)
reqCache = rpcClient.listCacheDir()
if not reqCache['OK']:
DIRAC.gLogger.error("Cannot list request cache", reqCache)
continue
reqCache = reqCache['Value']
if fullPrint:
DIRAC.gLogger.always("List of requests", reqCache)
else:
DIRAC.gLogger.always("Number of requests in the cache", len(reqCache))
DIRAC.exit(0)
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/RequestManagementSystem/scripts/dirac_rms_list_req_cache.py
|
Python
|
gpl-3.0
| 1,293
|
[
"DIRAC"
] |
1cc04b9fd449769fb2cd98d537003ab87b8aed24ff71f6ba996807d44a704bef
|
"""
Copyright Government of Canada 2015-2020
Written by: National Microbiology Laboratory, Public Health Agency of Canada
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this work except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from __future__ import absolute_import
import argparse
import os
import sys
import logging
from irida_import.config import Config
"""
From the command line, pass JSON files to IridaImport, and set up the logger
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--json_parameter_file', dest='json_parameter_file',
default='sample.dat',
help='A JSON formatted parameter file from Galaxy.',
metavar='json_parameter_file')
parser.add_argument(
'-l', '--log-file', dest='log', default='log_file',
help="The file to which the tool will output the log.", metavar='log')
parser.add_argument(
'-t', '--token', dest='token',
help='The tool can use a supplied access token instead of querying '
+ 'IRIDA.', metavar='token')
parser.add_argument(
'-c', '--config', dest='config', default=Config.DEFAULT_CONFIG_PATH,
help='The tool requires a config file to run. '
+ 'By default this is config.ini in the main irida_import folder.')
parser.add_argument(
'-g', '--generate_xml', action='store_true', default=False, dest='generate_xml',
help='The tool must generate a galaxy tool xml file before Galaxy can be started. '
+ 'Use this option to do so.')
parser.add_argument(
'-i', '--history-id', dest='hist_id', default=False,
help='The tool requires a History ID.')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
log_format = "%(levelname)s: %(message)s"
logging.basicConfig(filename=args.log,
format=log_format,
level=logging.ERROR,
filemode="w")
if os.path.isfile(args.config):
config = Config(args.config)
else:
message = "Error: {} does not exist!".format(args.config)
logging.info(message)
sys.exit(message)
logging.debug("Reading from passed file")
if args.generate_xml:
config.generate_xml()
message = 'Successfully generated the XML file!'
logging.info(message)
print(message)
else:
# importing here prevents a user from needing all libs when only performing a config,
# after the tool xml is generated galaxy will install all the required dependencies
from irida_import.irida_import import IridaImport
importer = IridaImport(config)
# otherwise start looking at the input file
try:
file_to_open = args.json_parameter_file
importer.import_to_galaxy(file_to_open, args.log, args.hist_id,
token=args.token)
except Exception:
logging.exception('')
importer.print_summary(failed=True)
raise
|
phac-nml/irida-galaxy-importer
|
irida_import/main.py
|
Python
|
apache-2.0
| 3,543
|
[
"Galaxy"
] |
7d2a96e7dd42bb6628ff05d5d873896bd137ed8f95a1fed4223a70baf558dfe9
|
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
from __future__ import absolute_import, division, print_function, with_statement
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import tornado.escape
import tornado.web
import zlib
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str, to_unicode
from tornado import httpclient, httputil
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask, PY3
if PY3:
from urllib.parse import urlparse # py2
xrange = range
else:
from urlparse import urlparse # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
"""
def __init__(self, application, request, **kwargs):
super(WebSocketHandler, self).__init__(application, request, **kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
self._on_close_called = False
@tornado.web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.clear()
self.set_status(400)
log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(),
headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.clear()
self.set_status(400)
log_msg = "\"Connection\" must be \"Upgrade\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.clear()
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
self.clear_header('Content-Type')
self.ws_connection.accept_connection()
else:
if not self.stream.closed():
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 7, 8, 13\r\n\r\n"))
self.stream.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def get_compression_options(self):
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the memory and CPU usage of the compression,
but no such options are currently implemented.
.. versionadded:: 4.1
"""
return None
def open(self, *args, **kwargs):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data):
"""Invoked when the response to a ping frame is received."""
pass
def on_close(self):
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code=None, reason=None):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin):
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return True to accept the request or False to reject it.
By default, rejects all requests with an origin on a host other
than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return true::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value):
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
self.stream.set_nodelay(value)
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
def send_error(self, *args, **kwargs):
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self):
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
return WebSocketProtocol13(
self, compression_options=self.get_compression_options(),
response_headers=self._headers)
def _wrap_method(method):
def _disallow_for_websocket(self, *args, **kwargs):
if self.stream is None:
method(self, *args, **kwargs)
else:
raise RuntimeError("Method not supported for Web Sockets")
return _disallow_for_websocket
for method in ["write", "redirect", "set_header", "set_cookie",
"set_status", "flush", "finish"]:
setattr(WebSocketHandler, method,
_wrap_method(getattr(WebSocketHandler, method)))
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def _run_callback(self, callback, *args, **kwargs):
"""Runs the given callback with exception handling.
On error, aborts the websocket connection and returns False.
"""
try:
callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
self.request.path, exc_info=True)
self._abort()
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class _PerMessageDeflateCompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._compressor = self._create_compressor()
else:
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(tornado.web.GZipContentEncoding.GZIP_LEVEL,
zlib.DEFLATED, -self._max_wbits)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
data = (compressor.compress(data) +
compressor.flush(zlib.Z_SYNC_FLUSH))
assert data.endswith(b'\x00\x00\xff\xff')
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._decompressor = self._create_decompressor()
else:
self._decompressor = None
def _create_decompressor(self):
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data):
decompressor = self._decompressor or self._create_decompressor()
return decompressor.decompress(data + b'\x00\x00\xff\xff')
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0f
def __init__(self, handler, mask_outgoing=False,
compression_options=None, response_headers=None):
WebSocketProtocol.__init__(self, handler)
self._response_headers = response_headers
self.mask_outgoing = mask_outgoing
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self._compression_options = compression_options
self._decompressor = None
self._compressor = None
self._frame_compressed = None
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
def accept_connection(self):
try:
self._handle_websocket_headers()
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
exc_info=True)
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key):
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self):
return WebSocketProtocol13.compute_accept_value(
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocol_header = ''
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
subprotocol_header = ("Sec-WebSocket-Protocol: %s\r\n"
% selected)
extension_header = ''
extensions = self._parse_extensions_header(self.request.headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors('server', ext[1])
if ('client_max_window_bits' in ext[1] and
ext[1]['client_max_window_bits'] is None):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]['client_max_window_bits']
extension_header = ('Sec-WebSocket-Extensions: %s\r\n' %
httputil._encode_header(
'permessage-deflate', ext[1]))
break
response_headers = ''
if self._response_headers is not None:
for header_name, header_value in self._response_headers.get_all():
response_headers += '%s: %s\r\n' % (header_name, header_value)
if self.stream.closed():
self._abort()
return
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %s\r\n"
"%s%s%s"
"\r\n" % (self._challenge_response(), subprotocol_header,
extension_header, response_headers)))
self._run_callback(self.handler.open, *self.handler.open_args,
**self.handler.open_kwargs)
self._receive_frame()
def _parse_extensions_header(self, headers):
extensions = headers.get("Sec-WebSocket-Extensions", '')
if extensions:
return [httputil._parse_header(e.strip())
for e in extensions.split(',')]
return []
def _process_server_headers(self, key, headers):
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers['Upgrade'].lower() == 'websocket'
assert headers['Connection'].lower() == 'upgrade'
accept = self.compute_accept_value(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
return options
def _create_compressors(self, side, agreed_parameters):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
try:
return self.stream.write(frame)
except StreamClosedError:
self._abort()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
return self._write_frame(True, opcode, message, flags=flags)
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None and self._frame_opcode != 0:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length,
self._on_frame_data)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self.stream.read_bytes(self._frame_length,
self._on_masked_frame_data)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
self._handle_message(opcode, data)
if not self.client_terminated:
self._receive_frame()
def _handle_message(self, opcode, data):
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.handler.close_code = struct.unpack('>H', data[:2])[0]
if len(data) > 2:
self.handler.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.handler.close_code)
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
elif opcode == 0xA:
# Pong
self._run_callback(self.handler.on_pong, data)
else:
self._abort()
def close(self, code=None, reason=None):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b''
else:
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
self._write_frame(True, 0x8, close_data)
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort)
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, io_loop, request, on_message_callback=None,
compression_options=None):
self.compression_options = compression_options
self.connect_future = TracebackFuture()
self.protocol = None
self.read_future = None
self.read_queue = collections.deque()
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = self.close_reason = None
scheme, sep, rest = request.url.partition(':')
scheme = {'ws': 'http', 'wss': 'https'}[scheme]
request.url = scheme + sep + rest
request.headers.update({
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': self.key,
'Sec-WebSocket-Version': '13',
})
if self.compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop)
super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536, 104857600)
def close(self, code=None, reason=None):
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
def on_connection_close(self):
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self.on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def _on_http_response(self, response):
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(WebSocketError(
"Non-websocket response"))
def headers_received(self, start_line, headers):
if start_line.code != 101:
return super(WebSocketClientConnection, self).headers_received(
start_line, headers)
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol._receive_frame()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.stream = self.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
return self.protocol.write_message(message, binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
assert self.read_future is None
future = TracebackFuture()
if self.read_queue:
future.set_result(self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
self.io_loop.add_future(future, callback)
return future
def on_message(self, message):
if self._on_message_callback:
self._on_message_callback(message)
elif self.read_future is not None:
self.read_future.set_result(message)
self.read_future = None
else:
self.read_queue.append(message)
def on_pong(self, data):
pass
def get_websocket_protocol(self):
return WebSocketProtocol13(self, mask_outgoing=True,
compression_options=self.compression_options)
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
on_message_callback=None, compression_options=None):
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
The ``io_loop`` argument is deprecated.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request,
on_message_callback=on_message_callback,
compression_options=compression_options)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
return conn.connect_future
|
obsh/tornado
|
tornado/websocket.py
|
Python
|
apache-2.0
| 41,216
|
[
"VisIt"
] |
2ac985e8412bcf06d1cf6f846faf8b43b3a4ac3bafe3fcf5f0973834d0e819fe
|
######!/usr/bin/python3
###### # -*- coding: utf-8 -*-
import smbus as smbus
import subprocess
import time
import sys
import quick2wire.i2c as i2clib
from quick2wire.i2c import I2CMaster, writing_bytes, reading
cof = 32768 #crystal constant
class tea5767:
def __init__(self):
self.i2c = smbus.SMBus(1)
self.bus = i2clib.I2CMaster()
self.add = 0x60 # I2C address circuit
self.freq = 101.9
print("FM Radio Module TEA5767")
def getFreq(self):
# getReady()
frequency = 0.0
results = self.bus.transaction(
reading(self.add, 5)
)
frequency = ((results[0][0]&0x3F) << 8) + results[0][1];
# Determine the current frequency using the same high side formula as above
frequency = round(frequency * 32768 / 4 - 225000) / 1000000;
# print(frequency)
return round(frequency,2)
def calculateFrequency(self):
"""calculate the station frequency based upon the upper and lower bits read from the device"""
repeat = 0
f =0.0
with i2clib.I2CMaster() as b:
results = b.transaction(
reading(self.add, 5)
)
uF = results[0][0]&0x3F
lF = results[0][1]
# this is probably not the best way of doing this but I was having issues with the
# frequency being off by as much as 1.5 MHz
current_freq = round((float(round(int(((int(uF)<<8)+int(lF))*cof/4-22500)/100000)/10)-.2)*10)/10
return current_freq
#script to get ready
def getReady(self):
readyFlag = 0
i = False
attempt = 0
results=[]
standbyFlag = 0
sys.stdout.flush()
time.sleep(0.1)
print("Getting ready ", end ="")
while (i==False):
results = self.bus.transaction(
reading(self.add, 5)
)
readyFlag = 1 if (results[0][0]&0x80)==128 else 0
standbyFlag = 1 if (results[0][3]+0x40)!=319 else 0
#print("result search mode:" , results[0][0]+0x40)
#s = results[0][3]+0x40
sys.stdout.flush()
time.sleep(0.9)
print(".", end = "")
# print("Soft mute ", results[0][3]&0x08)
#print(results[0][3]+0x40)
i=standbyFlag*readyFlag
attempt+=1
if(attempt>10):
break
if(i==True):
print("Ready! (",attempt,")")
# print("Raw output ", results[0])
else:
self.i2c.read_byte(self.add)
print("Not ready!")
def writeFrequency(self,f, mute):
freq = f # desired frequency in MHz (at 101.1 popular music station in Melbourne)
cof = 32768
i=False
attempt = 0
# Frequency distribution for two bytes (according to the data sheet)
freq14bit = int (4 * (freq * 1000000 + 225000) / cof)
freqH = freq14bit >>8
freqL = freq14bit & 0xFF
data = [0 for i in range(4)]
# Descriptions of individual bits in a byte - viz. catalog sheets
if(mute==0):
init = freqH&0x3F# freqH # 1.byte (MUTE bit; Frequency H) // MUTE is 0x80 disable mute and search mode & 0x3F
else:
init = freqH&0x7F
data[0] = freqL # 2.byte (frequency L)
if(mute==0):
data[1] = 0b10010000 # 3.byte (SUD; SSL1, SSL2; HLSI, MS, MR, ML; SWP1)
else:
data[1] = 0b00010110
data[2] = 0b00010010 # 4.byte (SWP2; STBY, BL; XTAL; smut; HCC, SNC, SI)
data[3] = 0b00000000 # 5.byte (PLREFF; DTC; 0; 0; 0; 0; 0; 0)
#data[1]=0xB0; #3 byte (0xB0): high side LO injection is on,.
#data[2]=0x10; #4 byte (0x10) : Xtal is 32.768 kHz
#data[3]=0x00; #5 byte0x00)
while (i==False):
try:
self.i2c.write_i2c_block_data (self.add, init, data) # Setting a new frequency to the circuit
except IOError as e :
i = False
attempt +=1
if attempt > 100000:
break
except Exception as e:
print("I/O error: {0}".format(e))
else:
i = True
cf = self.calculateFrequency()
gf = self.getFreq()
averageF =round((cf+gf)/2,2)
def scan(self,direction):
i=False
self.freq = self.getFreq()
fadd = 0
while (i==False):
if(direction==1):
fadd+=0.05
else:
fadd-=0.05
self.freq = self.getFreq() #round((self.calculateFrequency()+self.getFreq())/2,2)
if(self.freq<87.5):
self.freq=108
elif(self.freq>108):
self.freq=87.5
self.writeFrequency(self.freq+fadd,1)
time.sleep(0.1)
results = self.bus.transaction(
reading(self.add, 5)
)
readyFlag = 1 if (results[0][0]&0x80)==128 else 0
level = results[0][3]>>4
#print(results[0][0]&0x80 , " " , results[0][3]>>4)
if(readyFlag and level>9):
i=True
print("Frequency tuned: ",self.calculateFrequency(), "FM (Strong Signal: ",level,")")
else:
i=False
print("Station skipped: ",self.calculateFrequency(), "FM (Weak Signal: ",level,")")
self.writeFrequency(self.calculateFrequency(),0)
def off(self):
print("Radio off: Goodbye now!")
self.writeFrequency(self.calculateFrequency(), 1)
radio = tea5767()
radio.getReady()
radio.scan(1)
time.sleep(10)
radio.scan(1)
time.sleep(10)
radio.scan(0)
time.sleep(10)
radio.scan(0)
time.sleep(10)
radio.off()
|
LinuxCircle/tea5767
|
radio-smbus-tea5767-class.py
|
Python
|
mit
| 4,972
|
[
"CRYSTAL"
] |
379ee51486e8d61374701911f7229322c3e27928a300eca98e80ab6952282cb3
|
# -*- coding: utf-8 -*-
''' Used for processing FPCA requests'''
#
# Copyright © 2008 Ricky Zhou
# Copyright © 2008-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Author(s): Ricky Zhou <ricky@fedoraproject.org>
# Mike McGrath <mmcgrath@redhat.com>
# Toshio Kuratomi <toshio@redhat.com>
#
import turbogears
from turbogears import controllers, expose, identity, config
from turbogears.database import session
import cherrypy
from sqlalchemy.exc import DBAPIError
from datetime import datetime
import GeoIP
from genshi.template.plugin import TextTemplateEnginePlugin
import fas.fedmsgshim
from fedora.tg.utils import request_format
from fas.model import People, Groups, Log
from fas.auth import is_admin, standard_cla_done, undeprecated_cla_done
from fas.util import send_mail
import fas
class FPCA(controllers.Controller):
''' Processes FPCA workflow '''
# Group name for people having signed the FPCA
CLAGROUPNAME = config.get('cla_standard_group')
# Meta group for everyone who has satisfied the requirements of the FPCA
# (By signing or having a corporate signatue or, etc)
CLAMETAGROUPNAME = config.get('cla_done_group')
# Values legal in phone numbers
PHONEDIGITS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '+',
'-', ')' ,'(', ' ')
def __init__(self):
'''Create a FPCA Controller.'''
@identity.require(turbogears.identity.not_anonymous())
@expose(template="fas.templates.fpca.index")
def index(self):
'''Display the FPCAs (and accept/do not accept buttons)'''
show = {}
show['show_postal_address'] = config.get('show_postal_address')
username = turbogears.identity.current.user_name
person = People.by_username(username)
try:
code_len = len(person.country_code)
except TypeError:
code_len = 0
if show['show_postal_address']:
contactInfo = person.telephone or person.postal_address
if person.country_code == 'O1' and not person.telephone:
turbogears.flash(_('A telephone number is required to ' + \
'complete the FPCA. Please fill out below.'))
elif not person.country_code or not person.human_name \
or not contactInfo:
turbogears.flash(_('A valid country and telephone number ' + \
'or postal address is required to complete the FPCA. ' + \
'Please fill them out below.'))
else:
if not person.telephone or code_len != 2 or \
person.country_code == ' ':
turbogears.flash(_('A valid country and telephone number are' +
' required to complete the FPCA. Please fill them ' +
'out below.'))
(cla, undeprecated_cla) = undeprecated_cla_done(person)
person = person.filter_private()
return dict(cla=undeprecated_cla, person=person, date=datetime.utcnow().ctime(),
show=show)
def _cla_dependent(self, group):
'''
Check whether a group has the cla in its prerequisite chain.
Arguments:
:group: group to check
Returns: True if the group requires the cla_group_name otherwise
'''
if group.name in (self.CLAGROUPNAME, self.CLAMETAGROUPNAME):
return True
if group.prerequisite_id:
return self._cla_dependent(group.prerequisite)
return False
def json_request(self):
''' Helps define if json is being used for this request
:returns: 1 or 0 depending on if request is json or not
'''
return 'tg_format' in cherrypy.request.params and \
cherrypy.request.params['tg_format'] == 'json'
@expose(template="fas.templates.error")
def error(self, tg_errors=None):
'''Show a friendly error message'''
if not tg_errors:
turbogears.redirect('/')
return dict(tg_errors=tg_errors)
@identity.require(turbogears.identity.not_anonymous())
@expose(template = "genshi-text:fas.templates.fpca.fpca", format = "text",
content_type = 'text/plain; charset=utf-8')
def text(self):
'''View FPCA as text'''
username = turbogears.identity.current.user_name
person = People.by_username(username)
person = person.filter_private()
return dict(person=person, date=datetime.utcnow().ctime())
@identity.require(turbogears.identity.not_anonymous())
@expose(template = "genshi-text:fas.templates.fpca.fpca", format = "text",
content_type = 'text/plain; charset=utf-8')
def download(self):
'''Download FPCA'''
username = turbogears.identity.current.user_name
person = People.by_username(username)
person = person.filter_private()
return dict(person=person, date=datetime.utcnow().ctime())
@identity.require(turbogears.identity.not_anonymous())
@expose(template="fas.templates.user.view", allow_json=True)
def reject(self, person_name):
'''Reject a user's FPCA.
This method will remove a user from the FPCA group and any other groups
that they are in that require the FPCA. It is used when a person has
to fulfill some more legal requirements before having a valid FPCA.
Arguments
:person_name: Name of the person to reject.
'''
show = {}
show['show_postal_address'] = config.get('show_postal_address')
exc = None
user = People.by_username(turbogears.identity.current.user_name)
if not is_admin(user):
# Only admins can use this
turbogears.flash(_('You are not allowed to reject FPCAs.'))
exc = 'NotAuthorized'
else:
# Unapprove the cla and all dependent groups
person = People.by_username(person_name)
for role in person.roles:
if self._cla_dependent(role.group):
role.role_status = 'unapproved'
try:
session.flush()
except DBAPIError, error:
turbogears.flash(_('Error removing cla and dependent groups' \
' for %(person)s\n Error was: %(error)s') %
{'person': person_name, 'error': str(error)})
exc = 'DBAPIError'
if not exc:
# Send a message that the ICLA has been revoked
date_time = datetime.utcnow()
Log(author_id=user.id, description='Revoked %s FPCA' %
person.username, changetime=date_time)
revoke_subject = _('Fedora ICLA Revoked', person.locale)
i18n_revoke_text = _('''
Hello %(human_name)s,
We're sorry to bother you but we had to reject your FPCA for now because
information you provided has been deemed incorrect. The most common cause
of this is people abbreviating their name like "B L Couper" instead of
providing their actual full name "Bill Lay Couper". Other causes of this
include are using a country, or phone number that isn't accurate [1]_.
If you could edit your account [2]_ to fix any of these problems and resubmit
the FPCA we would appreciate it.
.. [1]: Why does it matter that we have your real name and phone
number? It's because the FPCA is a legal document and should we ever
need to contact you about one of your contributions (as an example,
because someone contacts *us* claiming that it was really they who
own the copyright to the contribution) we might need to contact you
for more information about what's going on.
.. [2]: Edit your account by logging in at this URL:
https://admin.fedoraproject.org/accounts/user/edit/%(username)s
If you have questions about what specifically might be the problem with your
account, please contact us at accounts@fedoraproject.org.
Thanks!
''' % {'username': person.username, 'human_name': person.human_name}, person.locale)
#TODO: Look at a better way to handle one text for mutiple usage.
# while dealing with pot files.
std_revoke_text = '''
English version:
Hello %(human_name)s,
We're sorry to bother you but we had to reject your FPCA for now because
information you provided has been deemed incorrect. The most common cause
of this is people abbreviating their name like "B L Couper" instead of
providing their actual full name "Bill Lay Couper". Other causes of this
include are using a country, or phone number that isn't accurate [1]_.
If you could edit your account [2]_ to fix any of these problems and resubmit
the FPCA we would appreciate it.
.. [1]: Why does it matter that we have your real name and phone
number? It's because the FPCA is a legal document and should we ever
need to contact you about one of your contributions (as an example,
because someone contacts *us* claiming that it was really they who
own the copyright to the contribution) we might need to contact you
for more information about what's going on.
.. [2]: Edit your account by logging in at this URL:
https://admin.fedoraproject.org/accounts/user/edit/%(username)s
If you have questions about what specifically might be the problem with your
account, please contact us at accounts@fedoraproject.org.
Thanks!
''' % {'username': person.username, 'human_name': person.human_name}
revoke_text = i18n_revoke_text + std_revoke_text
send_mail(person.email, revoke_subject, revoke_text)
# Yay, sweet success!
turbogears.flash(_('FPCA Successfully Removed.'))
# and now we're done
if request_format() == 'json':
return_val = {}
if exc:
return_val['exc'] = exc
return return_val
else:
turbogears.redirect('/user/view/%s' % person_name)
@identity.require(turbogears.identity.not_anonymous())
@expose(template="fas.templates.fpca.index")
def send(self, human_name, telephone, country_code, postal_address=None,
confirm=False, agree=False):
'''Send FPCA'''
# TO DO: Pull show_postal_address in at the class level
# as it's used in three methods now
show = {}
show['show_postal_address'] = config.get('show_postal_address')
username = turbogears.identity.current.user_name
person = People.by_username(username)
if standard_cla_done(person):
turbogears.flash(_('You have already completed the FPCA.'))
turbogears.redirect('/fpca/')
return dict()
if not agree:
turbogears.flash(_("You have not completed the FPCA."))
turbogears.redirect('/user/view/%s' % person.username)
if not confirm:
turbogears.flash(_(
'You must confirm that your personal information is accurate.'
))
turbogears.redirect('/fpca/')
# Compare old information to new to see if any changes have been made
if human_name and person.human_name != human_name:
person.human_name = human_name
if telephone and person.telephone != telephone:
person.telephone = telephone
if postal_address and person.postal_address != postal_address:
person.postal_address = postal_address
if country_code and person.country_code != country_code:
person.country_code = country_code
# Save it to the database
try:
session.flush()
except Exception:
turbogears.flash(_("Your updated information could not be saved."))
turbogears.redirect('/fpca/')
return dict()
# Heuristics to detect bad data
if show['show_postal_address']:
contactInfo = person.telephone or person.postal_address
if person.country_code == 'O1':
if not person.human_name or not person.telephone:
# Message implemented on index
turbogears.redirect('/fpca/')
else:
if not person.country_code or not person.human_name \
or not contactInfo:
# Message implemented on index
turbogears.redirect('/fpca/')
else:
if not person.telephone or \
not person.human_name or \
not person.country_code:
turbogears.flash(_('To complete the FPCA, we must have your ' + \
'name telephone number, and country. Please ensure they ' + \
'have been filled out.'))
turbogears.redirect('/fpca/')
blacklist = config.get('country_blacklist', [])
country_codes = [c for c in GeoIP.country_codes if c not in blacklist]
if person.country_code not in country_codes:
turbogears.flash(_('To complete the FPCA, a valid country code' + \
' must be specified. Please select one now.'))
turbogears.redirect('/fpca/')
if [True for char in person.telephone if char not in self.PHONEDIGITS]:
turbogears.flash(_('Telephone numbers can only consist of' + \
' numbers, "-", "+", "(", ")", or " ". Please reenter using' +\
' only those characters.'))
turbogears.redirect('/fpca/')
group = Groups.by_name(self.CLAGROUPNAME)
try:
# Everything is correct.
person.apply(group, person) # Apply for the new group
session.flush()
fas.fedmsgshim.send_message(topic="group.member.apply", msg={
'agent': person.username,
'user': person.username,
'group': group.name,
})
except fas.ApplyError:
# This just means the user already is a member (probably
# unapproved) of this group
pass
except Exception:
turbogears.flash(_("You could not be added to the '%s' group.") %
group.name)
turbogears.redirect('/fpca/')
return dict()
try:
# Everything is correct.
person.sponsor(group, person) # Sponsor!
session.flush()
except fas.SponsorError:
turbogears.flash(_("You are already a part of the '%s' group.") %
group.name)
turbogears.redirect('/fpca/')
except:
turbogears.flash(_("You could not be added to the '%s' group.") %
group.name)
turbogears.redirect('/fpca/')
date_time = datetime.utcnow()
Log(author_id = person.id, description = 'Completed FPCA',
changetime = date_time)
cla_subject = \
'Fedora ICLA completed for %(human_name)s (%(username)s)' % \
{'username': person.username, 'human_name': person.human_name}
cla_text = '''
Fedora user %(username)s has completed an ICLA (below).
Username: %(username)s
Email: %(email)s
Date: %(date)s
If you need to revoke it, please visit this link:
https://admin.fedoraproject.org/accounts/fpca/reject/%(username)s
=== FPCA ===
''' % {'username': person.username,
'email': person.email,
'date': date_time.ctime(),}
# Sigh.. if only there were a nicer way.
plugin = TextTemplateEnginePlugin()
cla_text += plugin.transform(dict(person=person),
'fas.templates.fpca.fpca').render(method='text',
encoding=None)
send_mail(config.get('legal_cla_email'), cla_subject, cla_text)
fas.fedmsgshim.send_message(topic="group.member.sponsor", msg={
'agent': person.username,
'user': person.username,
'group': group.name,
})
turbogears.flash(_("You have successfully completed the FPCA. You " + \
"are now in the '%s' group.") % group.name)
turbogears.redirect('/user/view/%s' % person.username)
return dict()
|
cydrobolt/fas
|
fas/fpca.py
|
Python
|
gpl-2.0
| 17,099
|
[
"VisIt"
] |
5ec7b6661fe0ce7703dd772114fb58e6c0e86f9ec3a3e65f45b5e1e5570f46df
|
# -*- coding: utf-8 -*-
import asyncio
import discord
import random
import math
from discord.ext import commands
from libraries.library import *
from libraries import casino
emotes = [
u"\u0030\N{COMBINING ENCLOSING KEYCAP}", #0
u"\u0031\N{COMBINING ENCLOSING KEYCAP}", #1
u"\u0032\N{COMBINING ENCLOSING KEYCAP}", #2
u"\u0033\N{COMBINING ENCLOSING KEYCAP}", #3
u"\u0034\N{COMBINING ENCLOSING KEYCAP}", #4
u"\u0035\N{COMBINING ENCLOSING KEYCAP}", #5
u"\u0036\N{COMBINING ENCLOSING KEYCAP}", #6
u"\u0037\N{COMBINING ENCLOSING KEYCAP}", #7
u"\u0038\N{COMBINING ENCLOSING KEYCAP}", #8
u"\u0039\N{COMBINING ENCLOSING KEYCAP}" #9
]
class Jeux:
"""Jeux proposés par le bot"""
def __init__(self,bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=False)
async def casino(self, ctx):
"""Bienvenue au ZCasino !
Misez une somme sur un nombre, si c'est le bon nombre vous récupérez
trois fois votre mise, si le nombre est de la même couleur vous
récupérez la moitié de votre mise ! Bonne chance !"""
data = casino.start()
server = ctx.message.server
user = ctx.message.author
chan = ctx.message.channel
await self.bot.delete_message(ctx.message)
res_embed = discord.Embed()
res_embed.colour = 0x3498db
res_embed.title = "Choisissez votre mise (0-9)"
res = await self.bot.say(embed = res_embed)
for x in emotes:
await self.bot.add_reaction(res, x)
await asyncio.sleep(1)
result = await self.bot.wait_for_reaction(emotes, message = res)
react = result.reaction.emoji
await self.bot.clear_reactions(res)
await self.bot.delete_message(res)
nb = emotes.index(react)
money = casino.get(server, user, data)
if money == 0:
tmp = await self.bot.say("Votre cagnotte est vide, adressez-vous à Sakiut pour la remplir.")
await asyncio.sleep(2)
await self.bot.delete_message(tmp)
return
mon_embed = discord.Embed()
mon_embed.colour = 0x3498db
mon_embed.title = "Vous avez une cagnotte de {}$".format(money)
mise_embed = discord.Embed()
mise_embed.colour = 0x3498db
mise_embed.title = "Combien souhaitez-vous miser ?"
mise_embed.set_footer(text = "Répondre par message")
mon = await self.bot.say(embed = mon_embed)
mis = await self.bot.say(embed = mise_embed)
ok = False
async def send_tmp(msg):
tmp = await self.bot.say("Vous devez envoyer un nombre entier inférieur ou égal au \
montant de votre cagnotte et supérieur à 0.")
await self.bot.delete_message(msg)
await asyncio.sleep(5)
await self.bot.delete_message(tmp)
while ok is not True:
ans = await self.bot.wait_for_message(author = user, channel = chan)
try:
mise = int(ans.content)
if mise > money or mise <= 0:
await send_tmp(ans)
continue
else:
ok = True
break
except ValueError:
await send_tmp(ans)
continue
await self.bot.delete_message(mon)
await self.bot.delete_message(mis)
await self.bot.delete_message(ans)
cas_embed = discord.Embed()
cas_embed.title = "Bienvenue au ZCasino !"
cas_embed.add_field(name = "Mise", value = str(mise))
cas_embed.add_field(name = "Nombre", value = str(nb))
cas = await self.bot.say(embed = cas_embed)
await asyncio.sleep(2)
result = random.randrange(0, 10)
if nb % 2 == 0:
nbpair = True
nb_icon = "🔴"
else:
nbpair = False
nb_icon = "🔵"
if result % 2 == 0:
pair = True
res_icon = "🔴"
else:
pair = False
res_icon = "🔵"
rand_embed = discord.Embed()
rand_embed.title = "Et le résultat est..."
rand_embed.add_field(name = "Nombre départ", value = str(nb_icon) + " " + str(nb))
rand_embed.add_field(name = "Résultat", value = str(res_icon) + " " + str(result))
rand_embed.set_footer(text = "Joueur : {0}#{1}".format(user.name, user.discriminator), icon_url = ctx.message.author.avatar_url)
rand = await self.bot.say(embed = rand_embed)
await asyncio.sleep(0.5)
await self.bot.delete_message(cas)
if result == nb:
win = mise * 3
rand_embed.add_field(name = "_ _", value = "Vous avez gagné ! Vous récupérez " + str(win) + "$")
elif nbpair is pair:
win = mise / 2
win = math.ceil(win)
rand_embed.add_field(name = "_ _", value = "La couleur des nombres correspond, vous récupérez " + str(win) + "$.")
else:
win = 0
rand_embed.add_field(name = "_ _", value = "Vous avez perdu, vous pouvez recommencer ! 😉")
await self.bot.edit_message(rand, embed = rand_embed)
money = money - mise + win
casino.post(server, user, data, money)
@commands.command(pass_context=True, no_pm=True)
async def money(self, ctx, *, user:discord.Member=None):
"""Affiche votre cagnotte"""
data = casino.start()
server = ctx.message.server
if not user:
user = ctx.message.author
chan = ctx.message.channel
await self.bot.delete_message(ctx.message)
money = casino.get(server, user, data)
mon_embed = discord.Embed()
mon_embed.colour = 0x3498db
mon_embed.title = "Cagnotte : {}$".format(str(money))
mon_embed.set_footer(text = "{0}#{1}".format(user.name, user.discriminator), icon_url = user.avatar_url)
await self.bot.say(embed = mon_embed)
@commands.command(pass_context=True, no_pm=True)
async def add_money(self, ctx, *, user:discord.Member=None):
"""Ajoute de l'argent à la cagnotte d'un utilisateur
Bot Master uniquement"""
data = casino.start()
server = ctx.message.server
if not user:
user = ctx.message.author
chan = ctx.message.channel
if ctx.message.author.id == "187565415512276993":
await self.bot.delete_message(ctx.message)
tmp = await self.bot.say("Combien voulez-vous ajouter à la cagnotte de {} ?".format(user.name))
ans = await self.bot.wait_for_message(author = ctx.message.author, channel = chan)
money = int(ans.content)
await self.bot.delete_message(ans)
casino.post(server, user, data, money)
await self.bot.delete_message(tmp)
tmp = await self.bot.say("Done")
await asyncio.sleep(3)
await self.bot.delete_message(tmp)
else:
return
|
Sakiut/FalltoSkyBot
|
cogs/jeux.py
|
Python
|
agpl-3.0
| 7,246
|
[
"CASINO"
] |
a075da63ea379126d6173d85d3c1ba318e7ce348628cd7d258753812f18d61ec
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.linalg
from pyscf.pbc.gto import Cell
from pyscf.pbc.tools import k2gamma
from pyscf.pbc.scf import rsjk
cell = Cell().build(
a = np.eye(3)*1.8,
atom = '''He 0. 0. 0.
He 0.4917 0.4917 0.4917''',
basis = {'He': [[0, [2.5, 1]]]})
cell1 = Cell().build(
a = np.eye(3)*2.6,
atom = '''He 0.4917 0.4917 0.4917''',
basis = {'He': [[0, [4.8, 1, -.1],
[1.1, .3, .5],
[0.15, .2, .8]],
[1, [0.8, 1]],]})
def tearDownModule():
global cell, cell1
del cell, cell1
class KnowValues(unittest.TestCase):
def test_get_jk(self):
kpts = cell.make_kpts([3,1,1])
np.random.seed(1)
dm = (np.random.rand(len(kpts), cell.nao, cell.nao) +
np.random.rand(len(kpts), cell.nao, cell.nao) * 1j)
dm = dm + dm.transpose(0,2,1).conj()
kmesh = k2gamma.kpts_to_kmesh(cell, kpts)
phase = k2gamma.get_phase(cell, kpts, kmesh)[1]
dm = np.einsum('Rk,kuv,Sk->RSuv', phase.conj().T, dm, phase.T)
dm = np.einsum('Rk,RSuv,Sk->kuv', phase, dm.real, phase.conj())
mf = cell.KRHF(kpts=kpts)
jref, kref = mf.get_jk(cell, dm, kpts=kpts)
ej = np.einsum('kij,kji->', jref, dm)
ek = np.einsum('kij,kji->', kref, dm) * .5
jk_builder = rsjk.RangeSeparationJKBuilder(cell, kpts)
jk_builder.omega = 0.5
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv, with_k=False)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv, with_j=False)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv, with_k=False)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv, with_j=False)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = mf.jk_method('RS').get_jk(cell, dm)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
mf = cell.KUHF(kpts=kpts)
jref, kref = mf.get_jk(cell, np.array([dm, dm]))
vj, vk = mf.jk_method('RS').get_jk(cell, np.array([dm, dm]))
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
mf = cell.KROHF(kpts=kpts)
jref, kref = mf.get_jk(cell, dm)
vj, vk = mf.jk_method('RS').get_jk(cell, dm)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
mf = cell.RHF(kpt=kpts[0])
jref, kref = mf.get_jk(cell, dm[0])
vj, vk = mf.jk_method('RS').get_jk(cell, dm[0])
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
mf = cell.UHF(kpt=kpts[0])
jref, kref = mf.get_jk(cell, dm[[0,0]])
vj, vk = mf.jk_method('RS').get_jk(cell, dm[[0,0]])
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
mf = cell.ROHF(kpt=kpts[0])
jref, kref = mf.get_jk(cell, dm[0])
vj, vk = mf.jk_method('RS').get_jk(cell, dm[0])
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
def test_get_jk_high_cost(self):
kpts = cell1.make_kpts([3,1,1])
np.random.seed(1)
dm = (np.random.rand(len(kpts), cell1.nao, cell1.nao) +
np.random.rand(len(kpts), cell1.nao, cell1.nao) * 1j)
dm = dm + dm.transpose(0,2,1).conj()
kmesh = k2gamma.kpts_to_kmesh(cell1, kpts)
phase = k2gamma.get_phase(cell1, kpts, kmesh)[1]
dm = np.einsum('Rk,kuv,Sk->RSuv', phase.conj().T, dm, phase.T)
dm = np.einsum('Rk,RSuv,Sk->kuv', phase, dm.real, phase.conj())
mf = cell1.KRHF(kpts=kpts)
jref, kref = mf.get_jk(cell1, dm, kpts=kpts)
ej = np.einsum('kij,kji->', jref, dm)
ek = np.einsum('kij,kji->', kref, dm) * .5
jk_builder = rsjk.RangeSeparationJKBuilder(cell1, kpts)
jk_builder.omega = 0.5
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv, with_k=False)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv, with_j=False)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv, with_k=False)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv, with_j=False)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
if __name__ == '__main__':
print("Full Tests for rsjk")
unittest.main()
|
sunqm/pyscf
|
pyscf/pbc/scf/test/test_rsjk.py
|
Python
|
apache-2.0
| 6,424
|
[
"PySCF"
] |
89a729baedeb6bc28babe5566c374f3170efdeedf4665fa98d18b02b9277f4a6
|
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import textwrap
import time
import yaml
from yaml.error import YAMLError
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import (
build_collection,
CollectionRequirement,
download_collections,
find_existing_collections,
install_collections,
publish_collection,
validate_collection_name,
validate_collection_path,
verify_collections
)
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
urlparse = six.moves.urllib.parse.urlparse
def _display_header(path, h1, h2, w1=10, w2=7):
display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
path,
h1,
h2,
'-' * max([len(h1), w1]), # Make sure that the number of dashes is at least the width of the header
'-' * max([len(h2), w2]),
cwidth=w1,
vwidth=w2,
))
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
fqcn=to_text(collection),
version=collection.latest_version,
cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
vwidth=max(vwidth, min_vwidth)
))
def _get_collection_widths(collections):
if is_iterable(collections):
fqcn_set = set(to_text(c) for c in collections)
version_set = set(to_text(c.latest_version) for c in collections)
else:
fqcn_set = set([to_text(collections)])
version_set = set([collections.latest_version])
fqcn_length = len(max(fqcn_set, key=len))
version_length = len(max(version_set, key=len))
return fqcn_length, version_length
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
# Inject role into sys.argv[1] as a backwards compatibility step
if len(args) > 1 and args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
# Remove this in Ansible 2.13 when we also remove -v as an option on the root parser for ansible-galaxy.
idx = 2 if args[1].startswith('-v') else 1
args.insert(idx, 'role')
self.api_servers = []
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--token', '--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences. You can also use ansible-galaxy login to '
'retrieve this key or set the token for the GALAXY_SERVER_LIST entry.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs',
default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
opt_help.add_verbosity_options(common)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.argparse.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
collections_path = opt_help.argparse.ArgumentParser(add_help=False)
collections_path.add_argument('-p', '--collection-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
default=C.COLLECTIONS_PATHS, action=opt_help.PrependListAction,
help="One or more directories to search for collections in addition "
"to the default COLLECTIONS_PATHS. Separate multiple paths "
"with '{0}'.".format(os.path.pathsep))
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_download_options(collection_parser, parents=[common])
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force])
self.add_list_options(collection_parser, parents=[common, collections_path])
self.add_verify_options(collection_parser, parents=[common, collections_path])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_login_options(role_parser, parents=[common])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_download_options(self, parser, parents=None):
download_parser = parser.add_parser('download', parents=parents,
help='Download collections and their dependencies as a tarball for an '
'offline install.')
download_parser.set_defaults(func=self.execute_download)
download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download collection(s) listed as dependencies.")
download_parser.add_argument('-p', '--download-path', dest='download_path',
default='./collections',
help='The directory to download the collections to.')
download_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be downloaded.')
download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
galaxy_type = 'role'
if parser.metavar == 'COLLECTION_ACTION':
galaxy_type = 'collection'
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role into a galaxy server')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_login_options(self, parser, parents=None):
login_parser = parser.add_parser('login', parents=parents,
help="Login to api.github.com server in order to use ansible-galaxy role sub "
"command such as 'import', 'delete', 'publish', and 'setup'")
login_parser.set_defaults(func=self.execute_login)
login_parser.add_argument('--github-token', dest='token', default=None,
help='Identify with github token rather than username and password.')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_verify_options(self, parser, parents=None):
galaxy_type = 'collection'
verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
'found on the server and the installed copy. This does not verify dependencies.')
verify_parser.set_defaults(func=self.execute_verify)
verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The collection(s) name or '
'path/url to a tar.gz collection artifact. This is mutually exclusive with --requirements-file.')
verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during verification and continue with the next specified collection.')
verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be verified.')
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type))
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path',
default=C.COLLECTIONS_PATHS[0],
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
else:
install_parser.add_argument('-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported.')
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be publish to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
def server_config_def(section, key, required):
return {
'description': 'The %s of the %s Galaxy server' % (key, section),
'ini': [
{
'section': 'galaxy_server.%s' % section,
'key': key,
}
],
'env': [
{'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
],
'required': required,
}
server_def = [('url', True), ('username', False), ('password', False), ('token', False),
('auth_url', False)]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
for server_key in server_list:
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
config_dict = dict((k, server_config_def(server_key, k, req)) for k, req in server_def)
defs = AnsibleLoader(yaml.safe_dump(config_dict)).get_single_data()
C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
server_options = C.config.get_plugin_options('galaxy_server', server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi
auth_url = server_options.pop('auth_url', None)
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
# default case if no auth info is provided.
server_options['token'] = None
if username:
server_options['token'] = BasicAuthToken(username,
server_options['password'])
else:
if token_val:
if auth_url:
server_options['token'] = KeycloakToken(access_token=token_val,
auth_url=auth_url,
validate_certs=not context.CLIARGS['ignore_certs'])
else:
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
config_servers.append(GalaxyAPI(self.galaxy, server_key, **server_options))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(self.galaxy, 'cmd_arg', cmd_server, token=cmd_token))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token))
context.CLIARGS['func']()
@property
def api(self):
return self.api_servers[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml.safe_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if file_requirements is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, self.api, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, self.api, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
else:
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles', []):
requirements['roles'] += parse_role_req(role_req)
for collection_req in file_requirements.get('collections', []):
if isinstance(collection_req, dict):
req_name = collection_req.get('name', None)
if req_name is None:
raise AnsibleError("Collections requirement entry should contain the key name.")
req_version = collection_req.get('version', '*')
req_source = collection_req.get('source', None)
if req_source:
# Try and match up the requirement source with our list of Galaxy API servers defined in the
# config, otherwise create a server with that URL without any auth.
req_source = next(iter([a for a in self.api_servers if req_source in [a.name, a.api_server]]),
GalaxyAPI(self.galaxy, "explicit_requirement_%s" % req_name, req_source))
requirements['collections'].append((req_name, req_version, req_source))
else:
requirements['collections'].append((collection_req, '*', None))
return requirements
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
# Get the top-level 'description' first, falling back to galaxy_info['galaxy_info']['description'].
galaxy_info = role_info.get('galaxy_info', {})
description = role_info.get('description', galaxy_info.get('description', ''))
text.append(u"\tdescription: %s" % description)
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
loader = DataLoader()
templar = Templar(loader, variables={'required_config': required_config, 'optional_config': optional_config})
templar.environment.filters['comment_ify'] = comment_ify
meta_value = templar.template(meta_template)
return meta_value
def _require_one_of_collections_requirements(self, collections, requirements_file):
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
elif requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(requirements_file, allow_old_format=False)['collections']
else:
requirements = []
for collection_input in collections:
requirement = None
if os.path.isfile(to_bytes(collection_input, errors='surrogate_or_strict')) or \
urlparse(collection_input).scheme.lower() in ['http', 'https']:
# Arg is a file path or URL to a collection
name = collection_input
else:
name, dummy, requirement = collection_input.partition(':')
requirements.append((name, requirement or '*', None))
return requirements
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(collection_path, output_path, force)
def execute_download(self):
collections = context.CLIARGS['args']
no_deps = context.CLIARGS['no_deps']
download_path = context.CLIARGS['download_path']
ignore_certs = context.CLIARGS['ignore_certs']
requirements_file = context.CLIARGS['requirements']
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._require_one_of_collections_requirements(collections, requirements_file)
download_path = GalaxyCLI._resolve_path(download_path)
b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
if not os.path.exists(b_download_path):
os.makedirs(b_download_path)
download_collections(requirements, download_path, self.api_servers, (not ignore_certs), no_deps,
context.CLIARGS['allow_pre_release'])
return 0
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
dependencies=[],
))
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <example@domain.com>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
build_ignore=[],
))
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
if obj_skeleton is not None:
own_skeleton = False
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
loader = DataLoader()
templar = Templar(loader, variables=inject_data)
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
with open(dest_file, 'wb') as df:
df.write(b_rendered)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, self.api, role)
if not gr._exists:
data = u"- the role %s was not found" % role
break
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not context.CLIARGS['offline']:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
self.pager(data)
def execute_verify(self):
collections = context.CLIARGS['args']
search_paths = context.CLIARGS['collections_path']
ignore_certs = context.CLIARGS['ignore_certs']
ignore_errors = context.CLIARGS['ignore_errors']
requirements_file = context.CLIARGS['requirements']
requirements = self._require_one_of_collections_requirements(collections, requirements_file)
resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
verify_collections(requirements, resolved_paths, self.api_servers, (not ignore_certs), ignore_errors,
allow_pre_release=True)
return 0
def execute_install(self):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
"""
if context.CLIARGS['type'] == 'collection':
collections = context.CLIARGS['args']
force = context.CLIARGS['force']
output_path = context.CLIARGS['collections_path']
ignore_certs = context.CLIARGS['ignore_certs']
ignore_errors = context.CLIARGS['ignore_errors']
requirements_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._require_one_of_collections_requirements(collections, requirements_file)
output_path = GalaxyCLI._resolve_path(output_path)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(output_path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection won't be picked up in an Ansible "
"run." % (to_text(output_path), to_text(":".join(collections_path))))
output_path = validate_collection_path(output_path)
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(requirements, output_path, self.api_servers, (not ignore_certs), ignore_errors,
no_deps, force, force_deps, context.CLIARGS['allow_pre_release'])
return 0
role_file = context.CLIARGS['role_file']
if not context.CLIARGS['args'] and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
roles_left = []
if role_file:
if not (role_file.endswith('.yaml') or role_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
roles_left = self._parse_requirements_file(role_file)['roles']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, self.api, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = (role.metadata.get('dependencies') or []) + role.requirements
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, self.api, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % to_text(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependant role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
roles_left.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
roles_left.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, self.api, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
List installed collections or roles
"""
if context.CLIARGS['type'] == 'role':
self.execute_list_role()
elif context.CLIARGS['type'] == 'collection':
self.execute_list_collection()
def execute_list_role(self):
"""
List all roles installed on the local system or a specific role
"""
path_found = False
role_found = False
warnings = []
roles_search_paths = context.CLIARGS['roles_path']
role_name = context.CLIARGS['role']
for path in roles_search_paths:
role_path = GalaxyCLI._resolve_path(path)
if os.path.isdir(path):
path_found = True
else:
warnings.append("- the configured path {0} does not exist.".format(path))
continue
if role_name:
# show the requested role, if it exists
gr = GalaxyRole(self.galaxy, self.api, role_name, path=os.path.join(role_path, role_name))
if os.path.isdir(gr.path):
role_found = True
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
break
warnings.append("- the role %s was not found" % role_name)
else:
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
if not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, self.api, path_file, path=path)
if gr.metadata:
_display_role(gr)
# Do not warn if the role was found in any of the search paths
if role_found and role_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
return 0
def execute_list_collection(self):
"""
List all collections installed on the local system
"""
collections_search_paths = set(context.CLIARGS['collections_path'])
collection_name = context.CLIARGS['collection']
default_collections_path = C.config.get_configuration_definition('COLLECTIONS_PATHS').get('default')
warnings = []
path_found = False
collection_found = False
for path in collections_search_paths:
collection_path = GalaxyCLI._resolve_path(path)
if not os.path.exists(path):
if path in default_collections_path:
# don't warn for missing default paths
continue
warnings.append("- the configured path {0} does not exist.".format(collection_path))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
path_found = True
if collection_name:
# list a specific collection
validate_collection_name(collection_name)
namespace, collection = collection_name.split('.')
collection_path = validate_collection_path(collection_path)
b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict')
if not os.path.exists(b_collection_path):
warnings.append("- unable to find {0} in collection paths".format(collection_name))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
collection_found = True
collection = CollectionRequirement.from_path(b_collection_path, False)
fqcn_width, version_width = _get_collection_widths(collection)
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
_display_collection(collection, fqcn_width, version_width)
else:
# list all collections
collection_path = validate_collection_path(path)
if os.path.isdir(collection_path):
display.vvv("Searching {0} for collections".format(collection_path))
collections = find_existing_collections(collection_path)
else:
# There was no 'ansible_collections/' directory in the path, so there
# or no collections here.
display.vvv("No 'ansible_collections' directory found at {0}".format(collection_path))
continue
if not collections:
display.vvv("No collections found at {0}".format(collection_path))
continue
# Display header
fqcn_width, version_width = _get_collection_widths(collections)
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
# Sort collections by the namespace and name
collections.sort(key=to_text)
for collection in collections:
_display_collection(collection, fqcn_width, version_width)
# Do not warn if the specific collection was found in any of the search paths
if collection_found and collection_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if context.CLIARGS['token'] is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = context.CLIARGS['token']
galaxy_response = self.api.authenticate(github_token)
if context.CLIARGS['token'] is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
tonk/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 71,937
|
[
"Galaxy"
] |
4a09c685b8df10d25399185750dc6409ddbb7a603852409500337ef801d0e7d0
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 8 18:02:30 2017
@author: Melikşah
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
### Activate following two lines to auto-set working directory as directory the script exists
#import os
#os.chdir(os.path.dirname(os.path.realpath(__file__)))
#type image file name here
imgName = 'sudoku-original.jpg'
img = cv2.imread(imgName,0)
def gausKernelArray(size, sigma):
from numpy import pi, exp, sqrt
s, k = sigma, int(size/2) # generate a (2k+1)x(2k+1) gaussian kernel with mean=0 and sigma = s
probs = [exp(-z*z/(2*s*s))/sqrt(2*pi*s*s) for z in range(-k,k+1)]
kernel = np.outer(probs, probs)
return kernel
#def CannyEdgeDetector(img, minVal, maxVal):
#global theta, gausKernel, sobelx, sobely, gradx, grady, grad, thetaQ, gradC, tq, theta2, thresEdges
#gausKernel = gausKernelArray(5,0.7) #creates a Gaussian Kernel array
gauss = cv2.GaussianBlur(img,(9,9),1)
sobelx = cv2.Sobel(gauss,cv2.CV_64F,1,0,ksize=3) # Gradient_x of Gaussian Kernel
sobely = cv2.Sobel(gauss,cv2.CV_64F,0,1,ksize=3) # Gradient_y of Gaussian Kernel
#gradx = np.float64(cv2.filter2D(img,-1,sobelx)) #filter image with Gradient_x Gaus. Kernel
#grady = np.float64(cv2.filter2D(img,-1,sobely)) #filter image with Gradient_y Gaus. Kernel
grad = np.sqrt(np.add(np.square(sobelx),np.square(sobely))) #calculate gradient magnitude
#theta2 = np.arctan2(sobely,sobelx) #calculate graident direction, angle
theta = np.add((np.arctan2(sobely,sobelx)* 180) / np.pi , 180)
#thetaQ = (np.round(theta * (5.0 / np.pi))+5)%5
thetaQ = theta.copy()
for i in range(len(theta)):
for j in range(len(theta[0])):
if (theta[i,j] >= 360 - 22.5 and theta[i,j] <= 360 ) or (theta[i,j] >= 0 and theta[i,j] < 0 + 22.5) or (theta[i,j] >= 180 - 22.5 and theta[i,j] < 180 + 22.5): thetaQ[i,j] = 0
elif (theta[i,j] >= 0 + 22.5 and theta[i,j] < 90 - 22.5) or (theta[i,j] >= 180 + 22.5 and theta[i,j] < 270 - 22.5): thetaQ[i,j] = 1
elif (theta[i,j] >= 90 - 22.5 and theta[i,j] < 90 + 22.5) or (theta[i,j] >= 270 - 22.5 and theta[i,j] < 270 + 22.5): thetaQ[i,j] = 2
elif (theta[i,j] >= 90 + 22.5 and theta[i,j] < 180 - 22.5) or (theta[i,j] >= 270 + 22.5 and theta[i,j] < 360 - 22.5): thetaQ[i,j] = 3
gradC = grad.copy()
for r in range(img.shape[0]):
for c in range(img.shape[1]):
if r == 0 or r == img.shape[0]-1 or c==0 or c == img.shape[1] -1:
gradC[r,c] = 0
continue
tq = thetaQ[r, c] % 4
if tq == 0: #0-180 degree
if grad[r,c] <= grad[r,c-1] or grad[r,c] <= grad[r,c+1]:
gradC[r,c] = 0
if tq == 1: #45-225 degree
if grad[r,c] <= grad[r-1,c+1] or grad[r,c] <= grad[r+1,c-1]:
gradC[r,c] = 0
if tq == 2: #90-270 degree
if grad[r,c] <= grad[r-1,c] or grad[r,c] <= grad[r+1,c]:
gradC[r,c] = 0
if tq == 3: #135-315 degree
if grad[r,c] <= grad[r-1,c-1] or grad[r,c] <= grad[r+1,c+1]:
gradC[r,c] = 0
thresH, thresL = 40,100#maxVal, minVal
strong = (gradC > thresH)
thresEdges = np.array(strong, dtype = np.uint8) + (gradC > thresL)
final = strong.copy()
currentPixels = []
for r in range(1, img.shape[0]-1):
for c in range(1, img.shape[1]-1):
if thresEdges[r,c] != 1:
continue
local = thresEdges[r-1:r+2,c-1:c+2]
localMax = local.max()
if localMax == 2:
currentPixels.append((r,c))
final[r,c] = 1
while len(currentPixels) > 0 :
print(len(currentPixels))
newPix = []
for r,c in currentPixels:
for dr in range(-1,2):
for dc in range(-1,2):
if dr == 0 and dc == 0:
continue
r2 = r+dr
c2 = c+dc
if thresEdges[r2,c2] == 1 and final[r2,c2] == 0:
newPix.append((r2,c2))
final[r2,c2] = 1
print(len(newPix))
currentPixels = newPix
#return final
plt.subplot(121),plt.imshow(cv2.Canny(img,40,100),cmap='gray'), plt.title('Built-in function')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(final,cmap='gray'), plt.title('Implemented')
plt.xticks([]), plt.yticks([])
plt.show()
|
mlkshclk/ComputerVision
|
canny.py
|
Python
|
mit
| 4,414
|
[
"Gaussian"
] |
6cdc6df303431ed9458f4df6f8f58c0c3b9ee84d2de9f3633f14a3a40d0d88d5
|
#!/usr/bin/env python
import sys
import os
import logging
from optparse import OptionParser
import glob
import shutil
from utils.GenomeLoader import GenomeLoader
from utils import utils_logging
import pysam
import command_runner
def read_white_list(white_list_file):
all_consensus = []
with open(white_list_file) as open_file:
for line in open_file:
sp_line = line.strip().split()
if sp_line and len(sp_line) > 0:
all_consensus.append(sp_line[0])
return all_consensus
_all_open_bam = {}
def _get_open_bam_file(bam_file):
if _all_open_bam.has_key(bam_file):
return _all_open_bam.get(bam_file)
logging.info('open %s' % bam_file)
open_bam = pysam.Samfile(bam_file, "rb")
_all_open_bam[bam_file] = open_bam
return open_bam
def close_bam_file(bam_file):
if _all_open_bam.has_key(bam_file):
open_bam = _all_open_bam.pop(bam_file)
logging.info('close %s' % bam_file)
open_bam.close()
def get_pysam_iterator(bam_file, options, consensus):
sam_file = _get_open_bam_file(bam_file)
return sam_file.fetch(consensus)
def get_read_group_from_bam_files(bam_files):
all_read_groups = []
for bam_file in bam_files:
open_bam_file = _get_open_bam_file(bam_file)
read_groups = open_bam_file.header.get('RG')
out = ['@RG']
for read_group in read_groups:
out.append('ID:%s' % read_group.pop('ID'))
for key in read_group.keys():
out.append('%s:%s' % (key, read_group.get(key)))
all_read_groups.append('\t'.join(out))
return all_read_groups
def load_from_sites_generator2(bam_file, options='', consensus=''):
"""This function return a generator that iterates over read pair where both pairs are mapping.
@return a tuple containing read1 read2 and optionally unmatched read1"""
iter_sam = get_pysam_iterator(bam_file, options=options, consensus=consensus)
all_unmatched_read1 = {}
all_unmatched_read2 = {}
single_reads = []
count_line = 0
for align_read in iter_sam:
count_line += 1
if align_read.is_unmapped:
continue
if align_read.is_read1:
align_read_r2 = all_unmatched_read2.pop(align_read.qname, None)
if align_read_r2:
yield ((align_read, align_read_r2))
else:
all_unmatched_read1[align_read.qname] = align_read
elif align_read.is_read2:
sam_record_r1 = all_unmatched_read1.pop(align_read.qname, None)
if sam_record_r1:
yield ((sam_record_r1, align_read))
else:
all_unmatched_read2[align_read.qname] = align_read
else:
single_reads.append(align_read)
if single_reads:
logging.warning("{} single read 1".format(len(single_reads)))
for read1 in single_reads:
yield ((read1, None))
if all_unmatched_read1:
raise StandardError("{} reads 1 left in the unmatch in generator\n{}".format(len(all_unmatched_read1), ', '.join(all_unmatched_read1.keys())))
if all_unmatched_read2:
raise StandardError("{} reads 2 left in the unmatch in generator\n{}".format(len(all_unmatched_read2), ', '.join(all_unmatched_read2.keys())))
def process_one_bam_file_one_consensus(bam_file, consensus_name):
sam_pair_generator = load_from_sites_generator2(bam_file, consensus=consensus_name)
all_first_reads_for_consensus = []
all_second_reads_for_consensus = []
single_end_for_consensus = []
for aligned_read_r1, aligned_read_r2, in sam_pair_generator:
rgid = aligned_read_r1.opt('RG')
if aligned_read_r2:
all_first_reads_for_consensus.append(aligned_read_to_fastq(aligned_read_r1, rgid=rgid))
all_second_reads_for_consensus.append(aligned_read_to_fastq(aligned_read_r2, rgid=rgid))
else:
single_end_for_consensus.append(aligned_read_to_fastq(aligned_read_r1, rgid=rgid))
aligned_read_r1 = None
aligned_read_r2 = None
return all_first_reads_for_consensus, all_second_reads_for_consensus, single_end_for_consensus
def extract_reads_from_one_consensus(bam_files, output_dir, consensus_name, consensus_sequence):
all_read1_for_that_consensus = []
all_read2_for_that_consensus = []
all_single_end_for_consensus = []
for bam_file in bam_files:
all_first_reads_for_consensus, all_second_reads_for_consensus, single_end_for_consensus = process_one_bam_file_one_consensus(bam_file,
consensus_name)
all_read1_for_that_consensus.extend(all_first_reads_for_consensus)
all_read2_for_that_consensus.extend(all_second_reads_for_consensus)
all_single_end_for_consensus.extend(single_end_for_consensus)
consensus_directory = os.path.join(output_dir, consensus_name + '_dir')
if not os.path.exists(consensus_directory):
os.mkdir(consensus_directory)
read1_file = os.path.join(consensus_directory, consensus_name + "_1.fastq")
read2_file = os.path.join(consensus_directory, consensus_name + "_2.fastq")
single_read_file = os.path.join(consensus_directory, consensus_name + "_single.fastq")
read1_consensus = os.path.join(consensus_directory, consensus_name + "_1.fa")
open_file = open(read1_consensus, 'w')
open_file.write('>%s\n%s\n' % (consensus_name, consensus_sequence))
open_file.close()
open_file = open(read1_file, 'w')
open_file.write('\n'.join(all_read1_for_that_consensus))
open_file.close()
open_file = open(read2_file, 'w')
open_file.write('\n'.join(all_read2_for_that_consensus))
open_file.close()
if all_single_end_for_consensus:
open_file = open(single_read_file, 'w')
open_file.write('\n'.join(all_single_end_for_consensus))
open_file.close()
return read1_consensus, read1_file, read2_file
open_fastq_files = {}
def _get_open_fastq_files(fastq_file):
if open_fastq_files.has_key(fastq_file):
return open_fastq_files.get(fastq_file)
open_file = open(fastq_file, 'w')
open_fastq_files[fastq_file] = open_file
return open_file
def close_fastq_files():
for filename in open_fastq_files.keys():
open_fastq_files.pop(filename).close()
def extract_reads_from_one_bam_file(bam_file, output_dir, list_consensus, genome_loader):
for consensus_name in list_consensus:
consensus_name, consensus_sequence = genome_loader.get_chr(consensus_name)
all_first_reads_for_consensus, all_second_reads_for_consensus, single_end_for_consensus = process_one_bam_file_one_consensus(bam_file,
consensus_name)
consensus_directory = os.path.join(output_dir, consensus_name + '_dir')
read1_file = os.path.join(consensus_directory, consensus_name + "_1.fastq")
read2_file = os.path.join(consensus_directory, consensus_name + "_2.fastq")
single_read_file = os.path.join(consensus_directory, consensus_name + "_single.fastq")
if not os.path.exists(consensus_directory):
os.mkdir(consensus_directory)
read1_consensus = os.path.join(consensus_directory, consensus_name + "_1.fa")
open_file = open(read1_consensus, 'w')
open_file.write('>%s\n%s\n' % (consensus_name, consensus_sequence))
open_file.close()
if len(all_first_reads_for_consensus):
open_file1 = _get_open_fastq_files(read1_file)
open_file1.write('\n'.join(all_first_reads_for_consensus) + '\n')
# open_file1.close()
if all_second_reads_for_consensus:
open_file2 = _get_open_fastq_files(read2_file)
open_file2.write('\n'.join(all_second_reads_for_consensus) + '\n')
if single_end_for_consensus:
open_file_single = _get_open_fastq_files(single_read_file)
open_file_single.write('\n'.join(single_end_for_consensus) + '\n')
logging.info("Extract %s read pairs and %s single reads from %s" % (len(all_first_reads_for_consensus), len(single_end_for_consensus),
consensus_name))
close_bam_file(bam_file)
def split_whitelist(white_list_file, nb_consensus_per_dir):
list_consensus = read_white_list(white_list_file)
list_of_list_consensus = []
temp_list = []
i = 0
for consensus in list_consensus:
i += 1
temp_list.append(consensus)
if i % nb_consensus_per_dir == 0:
list_of_list_consensus.append(temp_list)
temp_list = []
if len(temp_list) > 0:
list_of_list_consensus.append(temp_list)
# print "split into %s jobs of %s consensus"%(len(list_of_list_consensus),nb_consensus_per_dir)
for i, list_of_consensus in enumerate(list_of_list_consensus):
output_dir = os.path.join(os.path.curdir, '%s_dir' % (i + 1))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
output_file = os.path.join(output_dir, 'whitelist.txt')
with open(output_file, 'w') as open_file:
open_file.write('\n'.join(list_of_consensus))
yield (output_dir, output_file)
def extract_reads_from_all_bam_files_set_of_consensus_old(bam_files, list_consensus, output_dir, genome_loader=None,
all_read1_consensus_file=None):
if genome_loader is None:
genome_loader = GenomeLoader(all_read1_consensus_file, keep_until_done=True)
for consensus_name in list_consensus:
logging.info("Extract reads from %s " % consensus_name)
consensus_name, consensus_sequence = genome_loader.get_chr(consensus_name)
extract_reads_from_one_consensus(bam_files, output_dir, consensus_name, consensus_sequence)
def extract_reads_from_all_bam_files_set_of_consensus(bam_files, list_consensus, output_dir, genome_loader=None,
all_read1_consensus_file=None):
all_previous_dir = glob.glob(os.path.join(output_dir, '*_dir'))
if len(all_previous_dir):
logging.info("cleanup previous run in %s" % output_dir)
for dir in all_previous_dir:
shutil.rmtree(dir)
if genome_loader is None:
genome_loader = GenomeLoader(all_read1_consensus_file, keep_until_done=True)
for bam_file in bam_files:
extract_reads_from_one_bam_file(bam_file, output_dir, list_consensus, genome_loader)
# All the read have been extract now close the fastq files
close_fastq_files()
def aligned_read_to_fastq(aligned_read, rgid=None):
out = []
if aligned_read.is_read1:
suffix = '/1'
elif aligned_read.is_read2:
suffix = '/2'
else:
suffix = ''
if rgid:
rgid_str = 'RGID:%s' % (rgid)
else:
rgid_str = ''
out.append('@%s%s%s' % (aligned_read.qname, rgid_str, suffix))
if aligned_read.is_read2:
# Do not reverse complement the fastq file
#out.append(DNA_tools.rev_complements(aligned_read.seq))
#out.append('+')
#out.append(aligned_read.qual[::-1])
out.append(aligned_read.seq)
out.append('+')
out.append(aligned_read.qual)
else:
out.append(aligned_read.seq)
out.append('+')
out.append(aligned_read.qual)
return '\n'.join(out)
def main():
# initialize the logging
utils_logging.init_logging(logging.INFO)
#Setup options
optparser = _prepare_optparser()
(options, args) = optparser.parse_args()
#verify options
arg_pass = _verifyOption(options)
if not arg_pass:
logging.warning(optparser.get_usage())
logging.critical("Non valid arguments: exit")
sys.exit(1)
if options.debug:
utils_logging.init_logging(logging.DEBUG)
bam_files = [options.bam_file]
if len(args) > 0:
bam_files.extend(args)
command_runner.set_command_to_run_localy()
if options.output_dir:
list_consensus = read_white_list(options.white_list_file)
extract_reads_from_all_bam_files_set_of_consensus(bam_files, list_consensus, options.output_dir,
genome_loader=None,
all_read1_consensus_file=options.read1_consensus_file)
else:
iter_dir_and_file = split_whitelist(white_list_file=options.white_list_file,
nb_consensus_per_dir=options.nb_consensus_per_dir)
for output_dir, sub_whitelist in iter_dir_and_file:
command = 'python %s -g %s -w %s -o %s -b %s' % (
sys.argv[0], options.read1_consensus_file, sub_whitelist, output_dir, ' '.join(bam_files))
print command
def _prepare_optparser():
"""Prepare optparser object. New options will be added in this
function first.
"""
usage = """usage: %prog <-b bam_file> <-g genome_file> <-k known_sites>"""
description = """This script extract reads from an aligned bam file and create the corresponding fastq files."""
optparser = OptionParser(description=description, usage=usage, add_help_option=False)
optparser.add_option("-h", "--help", action="help", help="show this help message and exit.")
optparser.add_option("-b", "--bam_file", dest="bam_file", type="string",
help="The bam file from which the reads should be extracted.")
optparser.add_option("-g", "--read1_consensus_file", dest="read1_consensus_file", type="string",
help="The fasta file containing the read1 consensus file corresponding to the bam file.")
optparser.add_option("-w", "--white_list_file", dest="white_list_file", type="string",
help="The file containing the name of the consensus to assemble")
optparser.add_option("-o", "--output_dir", dest="output_dir", type="string",
help="This act as a flag to bypass the nb_consensus_per_dir. This means that all consensus in the whitelist will be extracted in the output_dir.")
optparser.add_option("-p", "--number_of_process", dest="number_of_process", type="int", default=10,
help="The number of process used to extract the information from the bam files.")
optparser.add_option("-n", "--nb_consensus_per_dir", dest="nb_consensus_per_dir", type="int", default=50,
help="The number of that should be held in each directory.")
optparser.add_option("--debug", dest="debug", action='store_true', default=False,
help="Output debug statment. Default: %default")
return optparser
def _verifyOption(options):
"""Check if the mandatory option are present in the options objects.
@return False if any argument is wrong."""
arg_pass = True
if not options.bam_file:
logging.error("You must specify a bam file -b.")
arg_pass = False
if not options.read1_consensus_file:
logging.error("You must specify a fasta file with the read1 consensus with -g.")
arg_pass = False
elif not os.path.exists(options.read1_consensus_file):
logging.error("You must specify a an existing fasta file with -g.")
arg_pass = False
if not options.white_list_file:
logging.error("You must specify a list of consensus to use with -w.")
arg_pass = False
elif not os.path.exists(options.white_list_file):
logging.error("You must specify a an existing list of consensus to use with -w.")
arg_pass = False
return arg_pass
if __name__ == "__main__":
main()
if __name__ == "1__main__":
bam_files = sys.argv[1:]
for bam_file in bam_files:
open_bam = _get_open_bam_file(bam_file)
|
tcezard/RADmapper
|
bin/RAD_bam_to_fastq.py
|
Python
|
mit
| 16,032
|
[
"pysam"
] |
871c23136490c249bdddd1bc5e9213d9369d2c0297c7ed56f13404d3cb545db8
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing TINKER, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import glob
import os
import re
import shutil
import sys
import tempfile
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import DARWIN, LINUX, get_os_type
class EB_TINKER(EasyBlock):
"""Support for building/installing TINKER."""
def __init__(self, *args, **kwargs):
"""Custom easyblock constructor for TINKER: initialise class variables."""
super(EB_TINKER, self).__init__(*args, **kwargs)
self.build_subdir = None
self.build_in_installdir = True
def configure_step(self):
"""Custom configuration procedure for TINKER."""
# make sure FFTW is available
if get_software_root('FFTW') is None:
raise EasyBuildError("FFTW dependency is not available.")
os_dirs = {
LINUX: 'linux',
DARWIN: 'macosx',
}
os_type = get_os_type()
os_dir = os_dirs.get(os_type)
if os_dir is None:
raise EasyBuildError("Failed to determine OS directory for %s (known: %s)", os_type, os_dirs)
comp_dirs = {
toolchain.INTELCOMP: 'intel',
toolchain.GCC: 'gfortran',
}
comp_fam = self.toolchain.comp_family()
comp_dir = comp_dirs.get(comp_fam)
if comp_dir is None:
raise EasyBuildError("Failed to determine compiler directory for %s (known: %s)", comp_fam, comp_dirs)
self.build_subdir = os.path.join(os_dir, comp_dir)
self.log.info("Using build scripts from %s subdirectory" % self.build_subdir)
# patch 'link.make' script to use FFTW provided via EasyBuild
link_make_fp = os.path.join(self.cfg['start_dir'], self.build_subdir, 'link.make')
for line in fileinput.input(link_make_fp, inplace=1, backup='.orig'):
line = re.sub(r"libfftw3_threads.a libfftw3.a", r"-L$EBROOTFFTW/lib -lfftw3_threads -lfftw3", line)
sys.stdout.write(line)
def build_step(self):
"""Custom build procedure for TINKER."""
source_dir = os.path.join(self.cfg['start_dir'], 'source')
try:
os.chdir(source_dir)
except OSError, err:
raise EasyBuildError("Failed to move to %s: %s", source_dir, err)
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'compile.make'))
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'library.make'))
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'link.make'))
def test_step(self):
"""Custom built-in test procedure for TINKER."""
if self.cfg['runtest']:
# copy tests, params and built binaries to temporary directory for testing
tmpdir = tempfile.mkdtemp()
testdir = os.path.join(tmpdir, 'test')
mkdir(os.path.join(tmpdir, 'bin'))
binaries = glob.glob(os.path.join(self.cfg['start_dir'], 'source', '*.x'))
try:
for binary in binaries:
shutil.copy2(binary, os.path.join(tmpdir, 'bin', os.path.basename(binary)[:-2]))
shutil.copytree(os.path.join(self.cfg['start_dir'], 'test'), testdir)
shutil.copytree(os.path.join(self.cfg['start_dir'], 'params'), os.path.join(tmpdir, 'params'))
except OSError, err:
raise EasyBuildError("Failed to copy binaries and tests to %s: %s", tmpdir, err)
try:
os.chdir(testdir)
except OSError, err:
raise EasyBuildError("Failed to move to %s to run tests: %s", testdir, err)
# run all tests via the provided 'run' scripts
tests = glob.glob(os.path.join(testdir, '*.run'))
# gpcr takes too logn (~1h), ifabp fails due to input issues (?)
tests = [t for t in tests if not (t.endswith('gpcr.run') or t.endswith('ifabp.run'))]
for test in tests:
run_cmd(test)
def install_step(self):
"""Custom install procedure for TINKER."""
source_dir = os.path.join(self.cfg['start_dir'], 'source')
try:
os.chdir(source_dir)
except OSError, err:
raise EasyBuildError("Failed to move to %s: %s", source_dir, err)
mkdir(os.path.join(self.cfg['start_dir'], 'bin'))
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'rename.make'))
def sanity_check_step(self):
"""Custom sanity check for TINKER."""
custom_paths = {
'files': ['tinker/source/libtinker.a'],
'dirs': ['tinker/bin'],
}
super(EB_TINKER, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom guesses for module file prepend-path statements."""
guesses = super(EB_TINKER, self).make_module_req_guess()
guesses['PATH'].append(os.path.join('tinker', 'bin'))
guesses['LIBRARY_PATH'].append(os.path.join('tinker', 'source'))
return guesses
|
wpoely86/easybuild-easyblocks
|
easybuild/easyblocks/t/tinker.py
|
Python
|
gpl-2.0
| 6,440
|
[
"TINKER"
] |
8887ef3d7a413c7559eda1ce166e2e86f484ffdcc8f6227ac7ccb7e2ba1e930f
|
from math import sin, cos, pi, sqrt
import numpy as np
from ase.atoms import Atoms, Atom
from ase.units import Bohr, Ry
def read_scf(filename):
try:
f = open(filename + '.scf', 'r')
pip = f.readlines()
ene = []
for line in pip:
if line[0:4] == ':ENE':
ene.append(float(line[43:59]) * Ry)
f.close()
return ene
except:
return None
def read_struct(filename, ase = True):
f = open(filename, 'r')
pip = f.readlines()
lattice = pip[1][0:3]
nat = int(pip[1][27:30])
cell = np.zeros(6)
for i in range(6):
cell[i] = float(pip[3][0 + i * 10:10 + i * 10])
cell[0:3] = cell[0:3] * Bohr
if lattice == 'P ':
lattice = 'P'
elif lattice == 'H ':
lattice = 'P'
cell[3:6] = [90.0, 90.0, 120.0]
elif lattice == 'R ':
lattice = 'R'
elif lattice == 'F ':
lattice = 'F'
elif lattice == 'B ':
lattice = 'I'
elif lattice == 'CXY':
lattice = 'C'
elif lattice == 'CXZ':
lattice = 'B'
elif lattice == 'CYZ':
lattice = 'A'
else:
print 'TEST needed'
pos = np.array([])
atomtype = []
rmt = []
neq = np.zeros(nat)
iline = 4
indif = 0
for iat in range(nat):
indifini = indif
if len(pos) == 0:
pos = np.array([[float(pip[iline][12:22]),
float(pip[iline][25:35]),
float(pip[iline][38:48])]])
else:
pos = np.append(pos, np.array([[float(pip[iline][12:22]),
float(pip[iline][25:35]),
float(pip[iline][38:48])]]),
axis = 0)
indif += 1
iline += 1
neq[iat] = int(pip[iline][15:17])
iline += 1
for ieq in range(1, int(neq[iat])):
pos = np.append(pos, np.array([[float(pip[iline][12:22]),
float(pip[iline][25:35]),
float(pip[iline][38:48])]]),
axis = 0)
indif += 1
iline += 1
for i in range(indif - indifini):
atomtype.append(pip[iline][0:2].replace(' ', ''))
rmt.append(float(pip[iline][43:48]))
iline += 4
if ase:
cell2 = coorsys(cell)
atoms = Atoms(atomtype, pos, pbc = True)
atoms.set_cell(cell2, scale_atoms = True)
cell2 = np.dot(c2p(lattice), cell2)
if lattice == 'R':
atoms.set_cell(cell2, scale_atoms = True)
else:
atoms.set_cell(cell2)
return atoms
else:
return cell, lattice, pos, atomtype, rmt
def write_struct(filename, atoms2 = None, rmt = None, lattice = 'P', zza=None):
atoms=atoms2.copy()
atoms.set_scaled_positions(atoms.get_scaled_positions())
f = file(filename, 'w')
f.write('ASE generated\n')
nat = len(atoms)
if rmt == None:
rmt = [2.0] * nat
f.write(lattice+' LATTICE,NONEQUIV.ATOMS:%3i\nMODE OF CALC=RELA\n'%nat)
cell = atoms.get_cell()
metT = np.dot(cell, np.transpose(cell))
cell2 = cellconst(metT)
cell2[0:3] = cell2[0:3] / Bohr
f.write(('%10.6f' * 6) % tuple(cell2) + '\n')
#print atoms.get_positions()[0]
if zza is None:
zza = atoms.get_atomic_numbers()
for ii in range(nat):
f.write('ATOM %3i: ' % (ii + 1))
pos = atoms.get_scaled_positions()[ii]
f.write('X=%10.8f Y=%10.8f Z=%10.8f\n' % tuple(pos))
f.write(' MULT= 1 ISPLIT= 1\n')
zz = zza[ii]
if zz > 71:
ro = 0.000005
elif zz > 36:
ro = 0.00001
elif zz > 18:
ro = 0.00005
else:
ro = 0.0001
f.write('%-10s NPT=%5i R0=%9.8f RMT=%10.4f Z:%10.5f\n' %
(atoms.get_chemical_symbols()[ii], 781, ro, rmt[ii], zz))
f.write('LOCAL ROT MATRIX: %9.7f %9.7f %9.7f\n' % (1.0, 0.0, 0.0))
f.write(' %9.7f %9.7f %9.7f\n' % (0.0, 1.0, 0.0))
f.write(' %9.7f %9.7f %9.7f\n' % (0.0, 0.0, 1.0))
f.write(' 0\n')
def cellconst(metT):
aa = np.sqrt(metT[0, 0])
bb = np.sqrt(metT[1, 1])
cc = np.sqrt(metT[2, 2])
gamma = np.arccos(metT[0, 1] / (aa * bb)) / np.pi * 180.0
beta = np.arccos(metT[0, 2] / (aa * cc)) / np.pi * 180.0
alpha = np.arccos(metT[1, 2] / (bb * cc)) / np.pi * 180.0
return np.array([aa, bb, cc, alpha, beta, gamma])
def coorsys(latconst):
a = latconst[0]
b = latconst[1]
c = latconst[2]
cal = np.cos(latconst[3] * np.pi / 180.0)
cbe = np.cos(latconst[4] * np.pi / 180.0)
cga = np.cos(latconst[5] * np.pi / 180.0)
sal = np.sin(latconst[3] * np.pi / 180.0)
sbe = np.sin(latconst[4] * np.pi / 180.0)
sga = np.sin(latconst[5] * np.pi / 180.0)
return np.array([[a, b * cga, c * cbe],
[0, b * sga, c * (cal - cbe * cga) / sga],
[0, 0, c * np.sqrt(1 - cal**2 - cbe**2 - cga**2 + 2 * cal * cbe * cga) / sga]]).transpose()
def c2p(lattice):
# apply as eg. cell2 = np.dot(ct.c2p('F'), cell)
if lattice == 'P':
cell = np.eye(3)
elif lattice == 'F':
cell = np.array([[0.0, 0.5, 0.5], [0.5, 0.0, 0.5], [0.5, 0.5, 0.0]])
elif lattice == 'I':
cell = np.array([[-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, -0.5]])
elif lattice == 'C':
cell = np.array([[0.5, 0.5, 0.0], [0.5, -0.5, 0.0], [0.0, 0.0, -1.0]])
elif lattice == 'R':
cell = np.array([[2.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0], [-1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0], [-1.0 / 3.0, -2.0/3.0, 1.0 / 3.0]])
else:
print 'lattice is ' + lattice + '!'
return cell
|
grhawk/ASE
|
tools/ase/io/wien2k.py
|
Python
|
gpl-2.0
| 5,868
|
[
"ASE"
] |
aea6bc3666b391475e2354c7baea307a716f87d08260045559ec9e2fba070ded
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the courseware unit bookmarks.
"""
import json
from nose.plugins.attrib import attr
import requests
from common.test.acceptance.pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage as LmsAutoAuthPage
from common.test.acceptance.pages.lms.bookmarks import BookmarksPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.course_nav import CourseNavPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.common import BASE_URL
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.tests.helpers import EventsTestMixin, UniqueCourseTest, is_404_page
class BookmarksTestMixin(EventsTestMixin, UniqueCourseTest):
"""
Mixin with helper methods for testing Bookmarks.
"""
USERNAME = "STUDENT"
EMAIL = "student@example.com"
def create_course_fixture(self, num_chapters):
"""
Create course fixture
Arguments:
num_chapters: number of chapters to create
"""
self.course_fixture = CourseFixture( # pylint: disable=attribute-defined-outside-init
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
xblocks = []
for index in range(num_chapters):
xblocks += [
XBlockFixtureDesc('chapter', 'TestSection{}'.format(index)).add_children(
XBlockFixtureDesc('sequential', 'TestSubsection{}'.format(index)).add_children(
XBlockFixtureDesc('vertical', 'TestVertical{}'.format(index))
)
)
]
self.course_fixture.add_children(*xblocks).install()
def verify_event_data(self, event_type, event_data):
"""
Verify emitted event data.
Arguments:
event_type: expected event type
event_data: expected event data
"""
actual_events = self.wait_for_events(event_filter={'event_type': event_type}, number_of_matches=1)
self.assert_events_match(event_data, actual_events)
@attr(shard=8)
class BookmarksTest(BookmarksTestMixin):
"""
Tests to verify bookmarks functionality.
"""
def setUp(self):
"""
Initialize test setup.
"""
super(BookmarksTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.bookmarks_page = BookmarksPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
# Get session to be used for bookmarking units
self.session = requests.Session()
params = {'username': self.USERNAME, 'email': self.EMAIL, 'course_id': self.course_id}
response = self.session.get(BASE_URL + "/auto_auth", params=params)
self.assertTrue(response.ok, "Failed to get session")
def _test_setup(self, num_chapters=2):
"""
Setup test settings.
Arguments:
num_chapters: number of chapters to create in course
"""
self.create_course_fixture(num_chapters)
# Auto-auth register for the course.
LmsAutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
self.courseware_page.visit()
def _bookmark_unit(self, location):
"""
Bookmark a unit
Arguments:
location (str): unit location
"""
_headers = {
'Content-type': 'application/json',
'X-CSRFToken': self.session.cookies['csrftoken'],
}
params = {'course_id': self.course_id}
data = json.dumps({'usage_id': location})
response = self.session.post(
BASE_URL + '/api/bookmarks/v1/bookmarks/',
data=data,
params=params,
headers=_headers
)
self.assertTrue(response.ok, "Failed to bookmark unit")
def _bookmark_units(self, num_units):
"""
Bookmark first `num_units` units
Arguments:
num_units(int): Number of units to bookmarks
"""
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
for index in range(num_units):
self._bookmark_unit(xblocks[index].locator)
def _breadcrumb(self, num_units, modified_name=None):
"""
Creates breadcrumbs for the first `num_units`
Arguments:
num_units(int): Number of units for which we want to create breadcrumbs
Returns:
list of breadcrumbs
"""
breadcrumbs = []
for index in range(num_units):
breadcrumbs.append(
[
'TestSection{}'.format(index),
'TestSubsection{}'.format(index),
modified_name if modified_name else 'TestVertical{}'.format(index)
]
)
return breadcrumbs
def _delete_section(self, index):
""" Delete a section at index `index` """
# Logout and login as staff
LogoutPage(self.browser).visit()
StudioAutoAuthPage(
self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id, staff=True
).visit()
# Visit course outline page in studio.
self.course_outline_page.visit()
self.course_outline_page.wait_for_page()
self.course_outline_page.section_at(index).delete()
# Logout and login as a student.
LogoutPage(self.browser).visit()
LmsAutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
# Visit courseware as a student.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
def _toggle_bookmark_and_verify(self, bookmark_icon_state, bookmark_button_state, bookmarked_count):
"""
Bookmark/Un-Bookmark a unit and then verify
"""
self.assertTrue(self.courseware_page.bookmark_button_visible)
self.courseware_page.click_bookmark_unit_button()
self.assertEqual(self.courseware_page.bookmark_icon_visible, bookmark_icon_state)
self.assertEqual(self.courseware_page.bookmark_button_state, bookmark_button_state)
self.bookmarks_page.click_bookmarks_button()
self.assertEqual(self.bookmarks_page.count(), bookmarked_count)
def _verify_pagination_info(
self,
bookmark_count_on_current_page,
header_text,
previous_button_enabled,
next_button_enabled,
current_page_number,
total_pages
):
"""
Verify pagination info
"""
self.assertEqual(self.bookmarks_page.count(), bookmark_count_on_current_page)
self.assertEqual(self.bookmarks_page.get_pagination_header_text(), header_text)
self.assertEqual(self.bookmarks_page.is_previous_page_button_enabled(), previous_button_enabled)
self.assertEqual(self.bookmarks_page.is_next_page_button_enabled(), next_button_enabled)
self.assertEqual(self.bookmarks_page.get_current_page_number(), current_page_number)
self.assertEqual(self.bookmarks_page.get_total_pages, total_pages)
def _navigate_to_bookmarks_list(self):
"""
Navigates and verifies the bookmarks list page.
"""
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.assertEqual(self.bookmarks_page.results_header_text(), 'My Bookmarks')
def _verify_breadcrumbs(self, num_units, modified_name=None):
"""
Verifies the breadcrumb trail.
"""
bookmarked_breadcrumbs = self.bookmarks_page.breadcrumbs()
# Verify bookmarked breadcrumbs.
breadcrumbs = self._breadcrumb(num_units=num_units, modified_name=modified_name)
breadcrumbs.reverse()
self.assertEqual(bookmarked_breadcrumbs, breadcrumbs)
def update_and_publish_block_display_name(self, modified_name):
"""
Update and publish the block/unit display name.
"""
self.course_outline_page.visit()
self.course_outline_page.wait_for_page()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
container_page = section.subsection_at(0).unit_at(0).go_to()
self.course_fixture._update_xblock(container_page.locator, { # pylint: disable=protected-access
"metadata": {
"display_name": modified_name
}
})
container_page.visit()
container_page.wait_for_page()
self.assertEqual(container_page.name, modified_name)
container_page.publish_action.click()
def test_bookmark_button(self):
"""
Scenario: Bookmark unit button toggles correctly
Given that I am a registered user
And I visit my courseware page
For first 2 units
I visit the unit
And I can see the Bookmark button
When I click on Bookmark button
Then unit should be bookmarked
Then I click again on the bookmark button
And I should see a unit un-bookmarked
"""
self._test_setup()
for index in range(2):
self.course_nav.go_to_section('TestSection{}'.format(index), 'TestSubsection{}'.format(index))
self._toggle_bookmark_and_verify(True, 'bookmarked', 1)
self.bookmarks_page.click_bookmarks_button(False)
self._toggle_bookmark_and_verify(False, '', 0)
def test_empty_bookmarks_list(self):
"""
Scenario: An empty bookmarks list is shown if there are no bookmarked units.
Given that I am a registered user
And I visit my courseware page
And I can see the Bookmarks button
When I click on Bookmarks button
Then I should see an empty bookmarks list
And empty bookmarks list content is correct
"""
self._test_setup()
self.assertTrue(self.bookmarks_page.bookmarks_button_visible())
self.bookmarks_page.click_bookmarks_button()
self.assertEqual(self.bookmarks_page.results_header_text(), 'My Bookmarks')
self.assertEqual(self.bookmarks_page.empty_header_text(), 'You have not bookmarked any courseware pages yet.')
empty_list_text = ("Use bookmarks to help you easily return to courseware pages. To bookmark a page, "
"select Bookmark in the upper right corner of that page. To see a list of all your "
"bookmarks, select Bookmarks in the upper left corner of any courseware page.")
self.assertEqual(self.bookmarks_page.empty_list_text(), empty_list_text)
def test_bookmarks_list(self):
"""
Scenario: A bookmarks list is shown if there are bookmarked units.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked 2 units
When I click on Bookmarks button
Then I should see a bookmarked list with 2 bookmark links
And breadcrumb trail is correct for a bookmark
When I click on bookmarked link
Then I can navigate to correct bookmarked unit
"""
self._test_setup()
self._bookmark_units(2)
self._navigate_to_bookmarks_list()
self._verify_breadcrumbs(num_units=2)
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 1-2 out of 2 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
# get usage ids for units
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
xblock_usage_ids = [xblock.locator for xblock in xblocks]
# Verify link navigation
for index in range(2):
self.bookmarks_page.click_bookmarked_block(index)
self.courseware_page.wait_for_page()
self.assertIn(self.courseware_page.active_usage_id(), xblock_usage_ids)
self.courseware_page.visit().wait_for_page()
self.bookmarks_page.click_bookmarks_button()
def test_bookmark_shows_updated_breadcrumb_after_publish(self):
"""
Scenario: A bookmark breadcrumb trail is updated after publishing the changed display name.
Given that I am a registered user
And I visit my courseware page
And I can see bookmarked unit
Then I visit unit page in studio
Then I change unit display_name
And I publish the changes
Then I visit my courseware page
And I visit bookmarks list page
When I see the bookmark
Then I can see the breadcrumb trail
with updated display_name.
"""
self._test_setup(num_chapters=1)
self._bookmark_units(num_units=1)
self._navigate_to_bookmarks_list()
self._verify_breadcrumbs(num_units=1)
LogoutPage(self.browser).visit()
LmsAutoAuthPage(
self.browser,
username=self.USERNAME,
email=self.EMAIL,
course_id=self.course_id,
staff=True
).visit()
modified_name = "Updated name"
self.update_and_publish_block_display_name(modified_name)
LogoutPage(self.browser).visit()
LmsAutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
self.courseware_page.visit()
self._navigate_to_bookmarks_list()
self._verify_breadcrumbs(num_units=1, modified_name=modified_name)
def test_unreachable_bookmark(self):
"""
Scenario: We should get a HTTP 404 for an unreachable bookmark.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked 2 units
Then I delete a bookmarked unit
Then I click on Bookmarks button
And I should see a bookmarked list
When I click on deleted bookmark
Then I should navigated to 404 page
"""
self._test_setup(num_chapters=1)
self._bookmark_units(1)
self._delete_section(0)
self._navigate_to_bookmarks_list()
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 1 out of 1 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
self.bookmarks_page.click_bookmarked_block(0)
self.assertTrue(is_404_page(self.browser))
def test_page_size_limit(self):
"""
Scenario: We can't get bookmarks more than default page size.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 11 units available
Then I click on Bookmarks button
And I should see a bookmarked list
And bookmark list contains 10 bookmarked items
"""
self._test_setup(11)
self._bookmark_units(11)
self._navigate_to_bookmarks_list()
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 11 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_single_page(self):
"""
Scenario: Bookmarks list pagination is working as expected for single page
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 2 units available
Then I click on Bookmarks button
And I should see a bookmarked list with 2 bookmarked items
And I should see paging header and footer with correct data
And previous and next buttons are disabled
"""
self._test_setup(num_chapters=2)
self._bookmark_units(num_units=2)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 1-2 out of 2 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
def test_next_page_button(self):
"""
Scenario: Next button is working as expected for bookmarks list pagination
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 12 units available
Then I click on Bookmarks button
And I should see a bookmarked list of 10 items
And I should see paging header and footer with correct info
Then I click on next page button in footer
And I should be navigated to second page
And I should see a bookmarked list with 2 items
And I should see paging header and footer with correct info
"""
self._test_setup(num_chapters=12)
self._bookmark_units(num_units=12)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 12 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
self.bookmarks_page.press_next_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 11-12 out of 12 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
def test_previous_page_button(self):
"""
Scenario: Previous button is working as expected for bookmarks list pagination
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 12 units available
And I click on Bookmarks button
Then I click on next page button in footer
And I should be navigated to second page
And I should see a bookmarked list with 2 items
And I should see paging header and footer with correct info
Then I click on previous page button
And I should be navigated to first page
And I should see paging header and footer with correct info
"""
self._test_setup(num_chapters=12)
self._bookmark_units(num_units=12)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.bookmarks_page.press_next_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 11-12 out of 12 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
self.bookmarks_page.press_previous_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 12 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_valid_page_number(self):
"""
Scenario: Bookmarks list pagination works as expected for valid page number
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 12 units available
Then I click on Bookmarks button
And I should see a bookmarked list
And I should see total page value is 2
Then I enter 2 in the page number input
And I should be navigated to page 2
"""
self._test_setup(num_chapters=11)
self._bookmark_units(num_units=11)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.assertEqual(self.bookmarks_page.get_total_pages, 2)
self.bookmarks_page.go_to_page(2)
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 11-11 out of 11 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
def test_pagination_with_invalid_page_number(self):
"""
Scenario: Bookmarks list pagination works as expected for invalid page number
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 11 units available
Then I click on Bookmarks button
And I should see a bookmarked list
And I should see total page value is 2
Then I enter 3 in the page number input
And I should stay at page 1
"""
self._test_setup(num_chapters=11)
self._bookmark_units(num_units=11)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.assertEqual(self.bookmarks_page.get_total_pages, 2)
self.bookmarks_page.go_to_page(3)
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 11 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_bookmarked_unit_accessed_event(self):
"""
Scenario: Bookmark events are emitted with correct data when we access/visit a bookmarked unit.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked a unit
When I click on bookmarked unit
Then `edx.course.bookmark.accessed` event is emitted
"""
self._test_setup(num_chapters=1)
self.reset_event_tracking()
# create expected event data
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
event_data = [
{
'event': {
'bookmark_id': '{},{}'.format(self.USERNAME, xblocks[0].locator),
'component_type': xblocks[0].category,
'component_usage_id': xblocks[0].locator,
}
}
]
self._bookmark_units(num_units=1)
self.bookmarks_page.click_bookmarks_button()
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 1 out of 1 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
self.bookmarks_page.click_bookmarked_block(0)
self.verify_event_data('edx.bookmark.accessed', event_data)
|
louyihua/edx-platform
|
common/test/acceptance/tests/lms/test_bookmarks.py
|
Python
|
agpl-3.0
| 23,938
|
[
"VisIt"
] |
cc4420ce59a5e57573ce5448fa6e90cd2842c25fdc840f498ec087e84e472655
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from PyQt4 import QtCore, QtGui
from openlp.core.lib import translate, build_icon
from openlp.core.lib.ui import create_button_box
class Ui_MediaFilesDialog(object):
def setupUi(self, mediaFilesDialog):
mediaFilesDialog.setObjectName(u'mediaFilesDialog')
mediaFilesDialog.setWindowModality(QtCore.Qt.ApplicationModal)
mediaFilesDialog.resize(400, 300)
mediaFilesDialog.setModal(True)
mediaFilesDialog.setWindowIcon(build_icon(u':/icon/openlp-logo-16x16.png'))
self.filesVerticalLayout = QtGui.QVBoxLayout(mediaFilesDialog)
self.filesVerticalLayout.setSpacing(8)
self.filesVerticalLayout.setMargin(8)
self.filesVerticalLayout.setObjectName(u'filesVerticalLayout')
self.selectLabel = QtGui.QLabel(mediaFilesDialog)
self.selectLabel.setWordWrap(True)
self.selectLabel.setObjectName(u'selectLabel')
self.filesVerticalLayout.addWidget(self.selectLabel)
self.fileListWidget = QtGui.QListWidget(mediaFilesDialog)
self.fileListWidget.setAlternatingRowColors(True)
self.fileListWidget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.fileListWidget.setObjectName(u'fileListWidget')
self.filesVerticalLayout.addWidget(self.fileListWidget)
self.button_box = create_button_box(mediaFilesDialog, u'button_box', [u'cancel', u'ok'])
self.filesVerticalLayout.addWidget(self.button_box)
self.retranslateUi(mediaFilesDialog)
def retranslateUi(self, mediaFilesDialog):
mediaFilesDialog.setWindowTitle(translate('SongsPlugin.MediaFilesForm', 'Select Media File(s)'))
self.selectLabel.setText(translate('SongsPlugin.MediaFilesForm',
'Select one or more audio files from the list below, and click OK to import them into this song.'))
|
marmyshev/transitions
|
openlp/plugins/songs/forms/mediafilesdialog.py
|
Python
|
gpl-2.0
| 3,943
|
[
"Brian"
] |
45d618050b9897a47396cfe70f9425ead28d7d11c4de4e6fec94da72a1c02a47
|
import matplotlib.pyplot as plt
#%matplotlib inline
import nengo
import numpy as np
import scipy.ndimage
import matplotlib.animation as animation
from matplotlib import pylab
from PIL import Image
import nengo.spa as spa
import cPickle
from nengo_extras.data import load_mnist
from nengo_extras.vision import Gabor, Mask
#Encode categorical integer features using a one-hot aka one-of-K scheme.
def one_hot(labels, c=None):
assert labels.ndim == 1
n = labels.shape[0]
c = len(np.unique(labels)) if c is None else c
y = np.zeros((n, c))
y[np.arange(n), labels] = 1
return y
rng = np.random.RandomState(9)
# --- load the data
img_rows, img_cols = 28, 28
(X_train, y_train), (X_test, y_test) = load_mnist()
X_train = 2 * X_train - 1 # normalize to -1 to 1
X_test = 2 * X_test - 1 # normalize to -1 to 1
train_targets = one_hot(y_train, 10)
test_targets = one_hot(y_test, 10)
# --- set up network parameters
#Want to encode and decode the image
n_vis = X_train.shape[1]
n_out = X_train.shape[1]
#number of neurons/dimensions of semantic pointer
n_hid = 5000 #Try with more neurons for more accuracy
#n_hid = 1000
#Want the encoding/decoding done on the training images
ens_params = dict(
eval_points=X_train,
neuron_type=nengo.LIFRate(), #Why not use LIF?
intercepts=nengo.dists.Choice([-0.5]),
max_rates=nengo.dists.Choice([100]),
)
#Least-squares solver with L2 regularization.
solver = nengo.solvers.LstsqL2(reg=0.01)
#solver = nengo.solvers.LstsqL2(reg=0.0001)
solver2 = nengo.solvers.LstsqL2(reg=0.01)
#network that
with nengo.Network(seed=3) as model:
a = nengo.Ensemble(n_hid, n_vis, seed=3, **ens_params)
v = nengo.Node(size_in=n_out)
conn = nengo.Connection(
a, v, synapse=None,
eval_points=X_train, function=X_train,#want the same thing out
solver=solver)
'''
v2 = nengo.Node(size_in=train_targets.shape[1])
conn2 = nengo.Connection(
a, v2, synapse=None,
eval_points=X_train, function=train_targets, #Want to get the labels out
solver=solver2)
'''
def get_outs(sim, images):
_, acts = nengo.utils.ensemble.tuning_curves(a, sim, inputs=images)
return np.dot(acts, sim.data[conn2].weights.T)
'''
def get_error(sim, images, labels):
return np.argmax(get_outs(sim, images), axis=1) != labels
def get_labels(sim,images):
return np.argmax(get_outs(sim, images), axis=1)
'''
#Get the neuron activity of an image or group of images (this is the semantic pointer in this case)
def get_activities(sim, images):
_, acts = nengo.utils.ensemble.tuning_curves(a, sim, inputs=images)
return acts
def get_encoder_outputs(sim,images):
outs = np.dot(images,sim.data[a].encoders.T) #before the neurons Why transpose?
return outs
def intense(img):
newImg = img.copy()
newImg[newImg < 0] = -1
newImg[newImg > 0] = 1
return newImg
def filtered(img):
return intense(scipy.ndimage.gaussian_filter(img, sigma=1))
#Images to train, starting at random orientation
orig_imgs = X_train[:100000].copy()
for img in orig_imgs:
img[:] = filtered(scipy.ndimage.interpolation.rotate(np.reshape(img,(28,28)),
(np.random.randint(360)),reshape=False,mode="nearest").ravel())
degrees = 6
#Images rotated a fixed amount from the original random orientation
rotated_imgs =orig_imgs.copy()
for img in rotated_imgs:
img[:] = filtered(scipy.ndimage.interpolation.rotate(np.reshape(img,(28,28)),degrees,reshape=False,mode="nearest").ravel())
#^encoder outputs
#Add noise
for img in orig_imgs:
noise = np.random.random([28,28]).ravel()
img[:] = img + noise
'''#Images not used for training, but for testing (all at random orientations)
test_imgs = X_test[:1000].copy()
for img in test_imgs:
img[:] = scipy.ndimage.interpolation.rotate(np.reshape(img,(28,28)),
(np.random.randint(360)),reshape=False,mode="nearest").ravel()
'''
# linear filter used for edge detection as encoders, more plausible for human visual system
encoders = Gabor().generate(n_hid, (11, 11), rng=rng)
encoders = Mask((28, 28)).populate(encoders, rng=rng, flatten=True)
#Set the ensembles encoders to this
a.encoders = encoders
#Check the encoders were correctly made
#plt.imshow(encoders[0].reshape(28, 28), vmin=encoders[0].min(), vmax=encoders[0].max(), cmap='gray')
with nengo.Simulator(model) as sim:
#Neuron activities of different mnist images
#The semantic pointers
orig_acts = get_activities(sim,orig_imgs)
#rotated_acts = get_activities(sim,rotated_imgs)
#test_acts = get_activities(sim,test_imgs)
#X_test_acts = get_activities(sim,X_test)
#labels_out = get_outs(sim,X_test)
rotated_after_encoders = get_encoder_outputs(sim,rotated_imgs)
#original_after_encoders = get_encoder_outputs(sim,orig_imgs)
#solvers for a learning rule
#solver_tranform = nengo.solvers.LstsqL2(reg=1e-8)
#solver_word = nengo.solvers.LstsqL2(reg=1e-8)
solver_rotate_encoder = nengo.solvers.LstsqL2(reg=1e-8)
#solver_identity_encoder = nengo.solvers.LstsqL2(reg=1e-8)
#find weight matrix between neuron activity of the original image and the rotated image
#weights returns a tuple including information about learning process, just want the weight matrix
#weights,_ = solver_tranform(orig_acts, rotated_acts)
#find weight matrix between labels and neuron activity
#label_weights,_ = solver_word(labels_out,X_test_acts)
rotated_after_encoder_weights,_ = solver_rotate_encoder(orig_acts,rotated_after_encoders)
#identity_after_encoder_weights,_ = solver_identity_encoder(orig_acts,original_after_encoders)
#filename = "label_weights_clockwise" + str(n_hid) +".p"
#cPickle.dump(label_weights, open( filename, "wb" ) )
#filename = "activity_to_img_weights_clockwise" + str(n_hid) +".p"
#cPickle.dump(sim.data[conn].weights.T, open( filename, "wb" ) )
#filename = "rotation_weights_clockwise" + str(n_hid) +".p"
#cPickle.dump(weights, open( filename, "wb" ) )
filename = "rotated_after_encoder_weights_counter_filter_noise" + str(n_hid) +".p"
cPickle.dump(rotated_after_encoder_weights, open( filename, "wb" ) )
#filename = "identity_after_encoder_weights" + str(n_hid) +".p"
#cPickle.dump(identity_after_encoder_weights, open( filename, "wb" ) )
|
science-of-imagination/nengo-buffer
|
Project/mental_rotation_training_counterclockwise_noise.py
|
Python
|
gpl-3.0
| 6,424
|
[
"NEURON"
] |
e8bd08299a9a00dc9fa014f9dcb8575c5757dc40980294874cd2208e67153054
|
import json;
import os;
import numpy as np;
def CreateDataBundle(config):
# dataPath = config["dataPath"]; #'/Users/zhaot/Work/neutube/neurolabi/data';
swcPath = config['swcDir']; #sessionPath + '/' + config['swcDir'];
swcFileList = list();
if config.has_key("minBodySize") | config.has_key("maxBodySize"):
data = np.loadtxt(config['bundlePath'] + '/bodysize.txt', delimiter=',');
bodyList = data[:, 0];
bodySize = data[:, 1];
lowerThreshold = -1;
upperThreshold = -1;
if config.has_key("minBodySize"):
lowerThreshold = config["minBodySize"];
if config.has_key("maxBodySize"):
upperThreshold = config["maxBodySize"];
print lowerThreshold;
print upperThreshold;
for i in range(0, len(bodyList)):
# print bodySize[i];
# print (bodySize[i] >= lowerThreshold) | (lowerThreshold < 0);
# print (bodySize[i] <= upperThreshold) | (upperThreshold < 0);
# print ((bodySize[i] >= lowerThreshold) | (lowerThreshold < 0)) & ((bodySize[i] <= upperThreshold) | (upperThreshold < 0));
if ((bodySize[i] >= lowerThreshold) | (lowerThreshold < 0)) & ((bodySize[i] <= upperThreshold) | (upperThreshold < 0)):
print str(int(bodyList[i]));
swcFileList.append(str(int(bodyList[i])) + '.swc');
else:
swcFileList = os.listdir(swcPath);
print len(swcFileList), " neurons";
dataBundle = {"neuron": list()};
neuronClass = {};
#Add predicted
if (config["addingClass"]):
# predictPath = sessionPath + "/predict.txt";
predictPath = config["predict"];
if predictPath.endswith(".json"):
f = open(predictPath);
classBundle = json.load(f);
f.close();
for neuron in classBundle["neuron"]:
for bodyId in neuron["id"]:
neuronClass[bodyId] = neuron["class"];
else:
f = open(predictPath);
lines = f.readlines();
f.close();
for line in lines:
line.strip();
line = line.replace("_", " ");
line = line.replace("(", " ");
line = line.replace(")", " ");
line = line.replace(":", " ");
tokens = line.split();
#print(tokens)
if len(tokens) > 1:
print(int(tokens[1]), tokens[3]);
neuronClass[int(tokens[1])] = tokens[3];
if tokens[3] == "unknown":
neuronClass[int(tokens[1])] = tokens[3] + " " + tokens[4];
neuronName = {};
if "bodyAnnotation" in config:
bodyAnnotationPath = config["bodyAnnotation"];
f = open(bodyAnnotationPath);
bodyAnnotation = json.load(f);
f.close();
for body in bodyAnnotation["data"]:
if "name" in body:
bodyId = body["body ID"];
neuronName[bodyId] = body["name"];
for f in swcFileList:
if f.endswith('.swc'):
# print f;
bodyId = f.split('.')[0];
neuron = {"id": int(bodyId), "name": "FIB_" + bodyId,
"model": swcPath + "/" + f};
if (config["addingClass"]):
neuron["class"] = neuronClass[int(bodyId)];
if (config["addingName"]):
if int(bodyId) in neuronName:
neuron["name"] = neuronName[int(bodyId)];
if config.has_key("objDir"):
neuron["volume"] = (config["objDir"] + "/" + os.path.basename(f)).replace('.swc', '.sobj');
dataBundle["neuron"].append(neuron);
otherFields = ["image_resolution", "synapse_scale", "synapse", "source_offset",
"source_dimension", "swc_resolution", "image_resolution"];
for field in otherFields:
if config.has_key(field):
dataBundle[field] = config[field];
# dataBundlePath = sessionPath + "/data_bundle_with_class.json";
dataBundlePath = config["output"]; #sessionPath + "/data_bundle.json";
outFile = open(dataBundlePath, "w");
json.dump(dataBundle, outFile, indent = 2);
outFile.close();
print dataBundlePath + " saved";
if __name__ == '__main__':
dataPath = '/Users/zhaot/Work/neutube/neurolabi/data';
config = { "sessionPath": dataPath + '/flyem/FIB/skeletonization/session11', "addingClass": False};
config["output"] = config["sessionPath"] + "/data_bundle.json";
CreateDataBundle(config);
|
stephenplaza/NeuTu
|
neurolabi/python/flyem/CreateDataBundle.py
|
Python
|
bsd-3-clause
| 4,703
|
[
"NEURON"
] |
241d682eb52a8b746fc035a84a859c4b3b046dd7728d04bc14ba2de0aa44c7b0
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from shutil import rmtree
import tempfile
import unittest
from pyspark.ml import Transformer
from pyspark.ml.classification import (
DecisionTreeClassifier,
FMClassifier,
FMClassificationModel,
LogisticRegression,
MultilayerPerceptronClassifier,
MultilayerPerceptronClassificationModel,
OneVsRest,
OneVsRestModel,
)
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import Binarizer, HashingTF, PCA
from pyspark.ml.linalg import Vectors
from pyspark.ml.param import Params
from pyspark.ml.pipeline import Pipeline, PipelineModel
from pyspark.ml.regression import (
DecisionTreeRegressor,
GeneralizedLinearRegression,
GeneralizedLinearRegressionModel,
LinearRegression,
)
from pyspark.ml.util import DefaultParamsReadable, DefaultParamsWriter
from pyspark.ml.wrapper import JavaParams
from pyspark.testing.mlutils import MockUnaryTransformer, SparkSessionTestCase
class TestDefaultSolver(SparkSessionTestCase):
def test_multilayer_load(self):
df = self.spark.createDataFrame(
[
(0.0, Vectors.dense([0.0, 0.0])),
(1.0, Vectors.dense([0.0, 1.0])),
(1.0, Vectors.dense([1.0, 0.0])),
(0.0, Vectors.dense([1.0, 1.0])),
],
["label", "features"],
)
mlp = MultilayerPerceptronClassifier(layers=[2, 2, 2], seed=123)
model = mlp.fit(df)
self.assertEqual(model.getSolver(), "l-bfgs")
transformed1 = model.transform(df)
path = tempfile.mkdtemp()
model_path = path + "/mlp"
model.save(model_path)
model2 = MultilayerPerceptronClassificationModel.load(model_path)
self.assertEqual(model2.getSolver(), "l-bfgs")
transformed2 = model2.transform(df)
self.assertEqual(transformed1.take(4), transformed2.take(4))
def test_fm_load(self):
df = self.spark.createDataFrame(
[(1.0, Vectors.dense(1.0)), (0.0, Vectors.sparse(1, [], []))], ["label", "features"]
)
fm = FMClassifier(factorSize=2, maxIter=50, stepSize=2.0)
model = fm.fit(df)
self.assertEqual(model.getSolver(), "adamW")
transformed1 = model.transform(df)
path = tempfile.mkdtemp()
model_path = path + "/fm"
model.save(model_path)
model2 = FMClassificationModel.load(model_path)
self.assertEqual(model2.getSolver(), "adamW")
transformed2 = model2.transform(df)
self.assertEqual(transformed1.take(2), transformed2.take(2))
def test_glr_load(self):
df = self.spark.createDataFrame(
[
(1.0, Vectors.dense(0.0, 0.0)),
(1.0, Vectors.dense(1.0, 2.0)),
(2.0, Vectors.dense(0.0, 0.0)),
(2.0, Vectors.dense(1.0, 1.0)),
],
["label", "features"],
)
glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
model = glr.fit(df)
self.assertEqual(model.getSolver(), "irls")
transformed1 = model.transform(df)
path = tempfile.mkdtemp()
model_path = path + "/glr"
model.save(model_path)
model2 = GeneralizedLinearRegressionModel.load(model_path)
self.assertEqual(model2.getSolver(), "irls")
transformed2 = model2.transform(df)
self.assertEqual(transformed1.take(4), transformed2.take(4))
class PersistenceTest(SparkSessionTestCase):
def test_linear_regression(self):
lr = LinearRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/lr"
lr.save(lr_path)
lr2 = LinearRegression.load(lr_path)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(type(lr.uid), type(lr2.uid))
self.assertEqual(
lr2.uid,
lr2.maxIter.parent,
"Loaded LinearRegression instance uid (%s) did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent),
)
self.assertEqual(
lr._defaultParamMap[lr.maxIter],
lr2._defaultParamMap[lr2.maxIter],
"Loaded LinearRegression instance default params did not match " + "original defaults",
)
try:
rmtree(path)
except OSError:
pass
def test_linear_regression_pmml_basic(self):
# Most of the validation is done in the Scala side, here we just check
# that we output text rather than parquet (e.g. that the format flag
# was respected).
df = self.spark.createDataFrame(
[(1.0, 2.0, Vectors.dense(1.0)), (0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"],
)
lr = LinearRegression(maxIter=1)
model = lr.fit(df)
path = tempfile.mkdtemp()
lr_path = path + "/lr-pmml"
model.write().format("pmml").save(lr_path)
pmml_text_list = self.sc.textFile(lr_path).collect()
pmml_text = "\n".join(pmml_text_list)
self.assertIn("Apache Spark", pmml_text)
self.assertIn("PMML", pmml_text)
def test_logistic_regression(self):
lr = LogisticRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/logreg"
lr.save(lr_path)
lr2 = LogisticRegression.load(lr_path)
self.assertEqual(
lr2.uid,
lr2.maxIter.parent,
"Loaded LogisticRegression instance uid (%s) "
"did not match Param's uid (%s)" % (lr2.uid, lr2.maxIter.parent),
)
self.assertEqual(
lr._defaultParamMap[lr.maxIter],
lr2._defaultParamMap[lr2.maxIter],
"Loaded LogisticRegression instance default params did not match "
+ "original defaults",
)
try:
rmtree(path)
except OSError:
pass
def test_kmeans(self):
kmeans = KMeans(k=2, seed=1)
path = tempfile.mkdtemp()
km_path = path + "/km"
kmeans.save(km_path)
kmeans2 = KMeans.load(km_path)
self.assertEqual(kmeans.uid, kmeans2.uid)
self.assertEqual(type(kmeans.uid), type(kmeans2.uid))
self.assertEqual(
kmeans2.uid,
kmeans2.k.parent,
"Loaded KMeans instance uid (%s) did not match Param's uid (%s)"
% (kmeans2.uid, kmeans2.k.parent),
)
self.assertEqual(
kmeans._defaultParamMap[kmeans.k],
kmeans2._defaultParamMap[kmeans2.k],
"Loaded KMeans instance default params did not match " + "original defaults",
)
try:
rmtree(path)
except OSError:
pass
def test_kmean_pmml_basic(self):
# Most of the validation is done in the Scala side, here we just check
# that we output text rather than parquet (e.g. that the format flag
# was respected).
data = [
(Vectors.dense([0.0, 0.0]),),
(Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),),
(Vectors.dense([8.0, 9.0]),),
]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
path = tempfile.mkdtemp()
km_path = path + "/km-pmml"
model.write().format("pmml").save(km_path)
pmml_text_list = self.sc.textFile(km_path).collect()
pmml_text = "\n".join(pmml_text_list)
self.assertIn("Apache Spark", pmml_text)
self.assertIn("PMML", pmml_text)
def _compare_params(self, m1, m2, param):
"""
Compare 2 ML Params instances for the given param, and assert both have the same param value
and parent. The param must be a parameter of m1.
"""
# Prevent key not found error in case of some param in neither paramMap nor defaultParamMap.
if m1.isDefined(param):
paramValue1 = m1.getOrDefault(param)
paramValue2 = m2.getOrDefault(m2.getParam(param.name))
if isinstance(paramValue1, Params):
self._compare_pipelines(paramValue1, paramValue2)
else:
self.assertEqual(paramValue1, paramValue2) # for general types param
# Assert parents are equal
self.assertEqual(param.parent, m2.getParam(param.name).parent)
else:
# If m1 is not defined param, then m2 should not, too. See SPARK-14931.
self.assertFalse(m2.isDefined(m2.getParam(param.name)))
def _compare_pipelines(self, m1, m2):
"""
Compare 2 ML types, asserting that they are equivalent.
This currently supports:
- basic types
- Pipeline, PipelineModel
- OneVsRest, OneVsRestModel
This checks:
- uid
- type
- Param values and parents
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
if isinstance(m1, JavaParams) or isinstance(m1, Transformer):
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
self._compare_params(m1, m2, p)
elif isinstance(m1, Pipeline):
self.assertEqual(len(m1.getStages()), len(m2.getStages()))
for s1, s2 in zip(m1.getStages(), m2.getStages()):
self._compare_pipelines(s1, s2)
elif isinstance(m1, PipelineModel):
self.assertEqual(len(m1.stages), len(m2.stages))
for s1, s2 in zip(m1.stages, m2.stages):
self._compare_pipelines(s1, s2)
elif isinstance(m1, OneVsRest) or isinstance(m1, OneVsRestModel):
for p in m1.params:
self._compare_params(m1, m2, p)
if isinstance(m1, OneVsRestModel):
self.assertEqual(len(m1.models), len(m2.models))
for x, y in zip(m1.models, m2.models):
self._compare_pipelines(x, y)
elif isinstance(m1, Params):
# Test on python backend Estimator/Transformer/Model/Evaluator
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
self._compare_params(m1, m2, p)
else:
raise RuntimeError("_compare_pipelines does not yet support type: %s" % type(m1))
def test_pipeline_persistence(self):
"""
Pipeline[HashingTF, PCA]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
pl = Pipeline(stages=[tf, pca])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_nested_pipeline_persistence(self):
"""
Pipeline[HashingTF, Pipeline[PCA]]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
p0 = Pipeline(stages=[pca])
pl = Pipeline(stages=[tf, p0])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_python_transformer_pipeline_persistence(self):
"""
Pipeline[MockUnaryTransformer, Binarizer]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.range(0, 10).toDF("input")
tf = MockUnaryTransformer(shiftVal=2).setInputCol("input").setOutputCol("shiftedInput")
tf2 = Binarizer(threshold=6, inputCol="shiftedInput", outputCol="binarized")
pl = Pipeline(stages=[tf, tf2])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def _run_test_onevsrest(self, LogisticRegressionCls):
temp_path = tempfile.mkdtemp()
df = self.spark.createDataFrame(
[
(0.0, 0.5, Vectors.dense(1.0, 0.8)),
(1.0, 0.5, Vectors.sparse(2, [], [])),
(2.0, 1.0, Vectors.dense(0.5, 0.5)),
]
* 10,
["label", "wt", "features"],
)
lr = LogisticRegressionCls(maxIter=5, regParam=0.01)
OneVsRest(classifier=lr)
def reload_and_compare(ovr, suffix):
model = ovr.fit(df)
ovrPath = temp_path + "/{}".format(suffix)
ovr.save(ovrPath)
loadedOvr = OneVsRest.load(ovrPath)
self._compare_pipelines(ovr, loadedOvr)
modelPath = temp_path + "/{}Model".format(suffix)
model.save(modelPath)
loadedModel = OneVsRestModel.load(modelPath)
self._compare_pipelines(model, loadedModel)
reload_and_compare(OneVsRest(classifier=lr), "ovr")
reload_and_compare(OneVsRest(classifier=lr).setWeightCol("wt"), "ovrw")
def test_onevsrest(self):
from pyspark.testing.mlutils import DummyLogisticRegression
self._run_test_onevsrest(LogisticRegression)
self._run_test_onevsrest(DummyLogisticRegression)
def test_decisiontree_classifier(self):
dt = DecisionTreeClassifier(maxDepth=1)
path = tempfile.mkdtemp()
dtc_path = path + "/dtc"
dt.save(dtc_path)
dt2 = DecisionTreeClassifier.load(dtc_path)
self.assertEqual(
dt2.uid,
dt2.maxDepth.parent,
"Loaded DecisionTreeClassifier instance uid (%s) "
"did not match Param's uid (%s)" % (dt2.uid, dt2.maxDepth.parent),
)
self.assertEqual(
dt._defaultParamMap[dt.maxDepth],
dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeClassifier instance default params did not match "
+ "original defaults",
)
try:
rmtree(path)
except OSError:
pass
def test_decisiontree_regressor(self):
dt = DecisionTreeRegressor(maxDepth=1)
path = tempfile.mkdtemp()
dtr_path = path + "/dtr"
dt.save(dtr_path)
dt2 = DecisionTreeClassifier.load(dtr_path)
self.assertEqual(
dt2.uid,
dt2.maxDepth.parent,
"Loaded DecisionTreeRegressor instance uid (%s) "
"did not match Param's uid (%s)" % (dt2.uid, dt2.maxDepth.parent),
)
self.assertEqual(
dt._defaultParamMap[dt.maxDepth],
dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeRegressor instance default params did not match "
+ "original defaults",
)
try:
rmtree(path)
except OSError:
pass
def test_default_read_write(self):
temp_path = tempfile.mkdtemp()
lr = LogisticRegression()
lr.setMaxIter(50)
lr.setThreshold(0.75)
writer = DefaultParamsWriter(lr)
savePath = temp_path + "/lr"
writer.save(savePath)
reader = DefaultParamsReadable.read()
lr2 = reader.load(savePath)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(lr.extractParamMap(), lr2.extractParamMap())
# test overwrite
lr.setThreshold(0.8)
writer.overwrite().save(savePath)
reader = DefaultParamsReadable.read()
lr3 = reader.load(savePath)
self.assertEqual(lr.uid, lr3.uid)
self.assertEqual(lr.extractParamMap(), lr3.extractParamMap())
def test_default_read_write_default_params(self):
lr = LogisticRegression()
self.assertFalse(lr.isSet(lr.getParam("threshold")))
lr.setMaxIter(50)
lr.setThreshold(0.75)
# `threshold` is set by user, default param `predictionCol` is not set by user.
self.assertTrue(lr.isSet(lr.getParam("threshold")))
self.assertFalse(lr.isSet(lr.getParam("predictionCol")))
self.assertTrue(lr.hasDefault(lr.getParam("predictionCol")))
writer = DefaultParamsWriter(lr)
metadata = json.loads(writer._get_metadata_to_save(lr, self.sc))
self.assertTrue("defaultParamMap" in metadata)
reader = DefaultParamsReadable.read()
metadataStr = json.dumps(metadata, separators=[",", ":"])
loadedMetadata = reader._parseMetaData(
metadataStr,
)
reader.getAndSetParams(lr, loadedMetadata)
self.assertTrue(lr.isSet(lr.getParam("threshold")))
self.assertFalse(lr.isSet(lr.getParam("predictionCol")))
self.assertTrue(lr.hasDefault(lr.getParam("predictionCol")))
# manually create metadata without `defaultParamMap` section.
del metadata["defaultParamMap"]
metadataStr = json.dumps(metadata, separators=[",", ":"])
loadedMetadata = reader._parseMetaData(
metadataStr,
)
with self.assertRaisesRegex(AssertionError, "`defaultParamMap` section not found"):
reader.getAndSetParams(lr, loadedMetadata)
# Prior to 2.4.0, metadata doesn't have `defaultParamMap`.
metadata["sparkVersion"] = "2.3.0"
metadataStr = json.dumps(metadata, separators=[",", ":"])
loadedMetadata = reader._parseMetaData(
metadataStr,
)
reader.getAndSetParams(lr, loadedMetadata)
if __name__ == "__main__":
from pyspark.ml.tests.test_persistence import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
ueshin/apache-spark
|
python/pyspark/ml/tests/test_persistence.py
|
Python
|
apache-2.0
| 20,116
|
[
"Gaussian"
] |
0afe2567466c9c9aecc45db839ddebd9f1d263ea4b7a3a68f29c83d2f33a8aec
|
__author__ = 'Zhouhao Zeng'
import HTSeq
import sys
from optparse import OptionParser
import numpy
def getSummitProfile(ga, summit_pos_set, window_size, resolution, UpstreamExtension, DownstreamExtension):
upstream_num_points = UpstreamExtension / resolution
downstream_num_points = DownstreamExtension / resolution
total_num_points = upstream_num_points + downstream_num_points + 1
profile = numpy.zeros(total_num_points)
num_summits = 0
for summit_pos in summit_pos_set:
if summit_pos.pos - upstream_num_points * resolution - window_size / 2 < 0:
continue
num_summits += 1
index = 0
while index < total_num_points:
count_in_window = 0
index_pos = summit_pos.pos + (index - upstream_num_points) * resolution
summit_pos_window_iv = HTSeq.GenomicInterval(summit_pos.chrom, index_pos - window_size / 2,
index_pos + window_size / 2)
for step_iv, step_count in ga[summit_pos_window_iv].steps():
count_in_window += step_count * step_iv.length
profile[index] += count_in_window
index += 1
return profile, num_summits
def main(argv):
desc = """This is a template for the analysis of aggretated tag distribution with respect to a set of points, such as the TSSs of known genes, with one profile from each strand."""
parser = OptionParser(description=desc)
parser.add_option("-s", "--summits_file", action="store", type="string",
dest="summitsfile", metavar="<file>", help="summits bed file")
parser.add_option("-b", "--bedfile", action="store", type="string",
dest="bed_file", help="bed file with interval and score", metavar="<file>")
parser.add_option("-o", "--outfile", action="store", type="string",
dest="outfile", help="outfile name", metavar="<file>")
parser.add_option("-u", "--UpstreamExtension", action="store", type="int",
dest="upstreamExtension", help="UpstreamExtension", metavar="<int>")
parser.add_option("-d", "--DownstreamExtension", action="store", type="int",
dest="downstreamExtension", help="DownstreamExtension", metavar="<int>")
parser.add_option("-w", "--WindowSize", action="store", type="int",
dest="window_size",
help="window size for averaging. When window size > resolution, there is smoothing",
metavar="<int>")
parser.add_option("-r", "--resolution", action="store", type="int",
dest="resolution", help="resolution of the upstream and downstream profile, eg, 5",
metavar="<int>")
(opt, args) = parser.parse_args(argv)
if len(argv) < 14:
parser.print_help()
sys.exit(1)
window_size = opt.window_size
UpstreamExtension = opt.upstreamExtension
DownstreamExtension = opt.downstreamExtension
resolution = opt.resolution
print "Upstream extension: %i" % UpstreamExtension
print "Downstream extension: %i" % DownstreamExtension
print "Scanning window size: %i" % window_size
print "Scanning resolution: %i" % resolution
num_tags = 0
ga = HTSeq.GenomicArray("auto", stranded=False, typecode="d")
bedfile = HTSeq.BED_Reader(opt.bed_file)
for alt in bedfile:
ga[alt.iv] += alt.score * 1.0 / alt.iv.length
summit_pos_set = set()
summitsfile = HTSeq.BED_Reader(opt.summitsfile)
for alt in summitsfile:
summit_pos_set.add(alt.iv.start_d_as_pos)
profile, num_summits = getSummitProfile(ga, summit_pos_set, window_size, resolution, UpstreamExtension,
DownstreamExtension)
normalization = num_summits
normalization *= window_size / 1000.0
print "Number of locations: %i" % num_summits
print "Normalization = %f" % normalization
f = open(opt.outfile, "w")
xValues = numpy.arange(-UpstreamExtension, DownstreamExtension + 1, resolution)
normalized_profile = profile / normalization
for index in range(len(xValues)):
outline = str(xValues[index]) + "\t" + str(normalized_profile[index]) + "\n"
f.write(outline)
f.close()
if __name__ == "__main__":
main(sys.argv)
|
zhouhaozeng/bioinformatics-codebase
|
profile/generate_pileup_profile_around_summits.py
|
Python
|
gpl-3.0
| 4,364
|
[
"HTSeq"
] |
563ce2ccb3c86385cf8f05b0f00edc1e3d682fbe8b1e0f3e0b476db4893b9d65
|
from math import sin, cos, pi, sqrt
import numpy as np
from ase.atoms import Atoms, Atom
from ase.parallel import paropen
"""Module to read and write atoms in cif file format"""
def read_cif(fileobj, index=-1):
if isinstance(fileobj, str):
fileobj = open(fileobj)
def search_key(fobj, key):
for line in fobj:
if key in line:
return line
return None
def get_key(fobj, key, pos=1):
line = search_key(fobj, key)
if line:
return float(line.split()[pos].split('(')[0])
return None
a = get_key(fileobj, '_cell_length_a')
b = get_key(fileobj, '_cell_length_b')
c = get_key(fileobj, '_cell_length_c')
alpha = pi * get_key(fileobj, '_cell_angle_alpha') / 180
beta = pi * get_key(fileobj, '_cell_angle_beta') / 180
gamma = pi * get_key(fileobj, '_cell_angle_gamma') / 180
va = a * np.array([1, 0, 0])
vb = b * np.array([cos(gamma), sin(gamma), 0])
cx = cos(beta)
cy = (cos(alpha) - cos(beta) * cos(gamma)) / sin(gamma)
cz = sqrt(1. - cx*cx - cy*cy)
vc = c * np.array([cx, cy, cz])
cell = np.array([va, vb, vc])
atoms = Atoms(cell=cell)
read = False
for line in fileobj:
if not read:
if '_atom_site_disorder_group' in line:
read = True
else:
word = line.split()
if len(word) < 5:
break
symbol = word[1]
pos = (float(word[2].split('(')[0]) * va +
float(word[3].split('(')[0]) * vb +
float(word[4].split('(')[0]) * vc )
atoms.append(Atom(symbol, pos))
return atoms
|
freephys/python_ase
|
ase/io/cif.py
|
Python
|
gpl-3.0
| 1,693
|
[
"ASE"
] |
4fb12c81b095078577b216b77da3f17201998859f4bf19b9f6ca9a508e68a335
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
****************************
espressopp.integrator.Settle
****************************
.. function:: espressopp.integrator.Settle(system, fixedtuplelist, mO, mH, distHH, distOH)
:param system:
:param fixedtuplelist:
:param mO: (default: 16.0)
:param mH: (default: 1.0)
:param distHH: (default: 1.58)
:param distOH: (default: 1.0)
:type system:
:type fixedtuplelist:
:type mO: real
:type mH: real
:type distHH: real
:type distOH: real
.. function:: espressopp.integrator.Settle.addMolecules(moleculelist)
:param moleculelist:
:type moleculelist:
:rtype:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_Settle
class SettleLocal(ExtensionLocal, integrator_Settle):
def __init__(self, system, fixedtuplelist, mO=16.0, mH=1.0, distHH=1.58, distOH=1.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_Settle, system, fixedtuplelist, mO, mH, distHH, distOH)
def addMolecules(self, moleculelist):
"""
Each processor takes the broadcasted list.
"""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
for pid in moleculelist:
self.cxxclass.add(self, pid)
if pmi.isController:
class Settle(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.SettleLocal',
pmicall = [ "addMolecules" ]
)
|
kkreis/espressopp
|
src/integrator/Settle.py
|
Python
|
gpl-3.0
| 2,518
|
[
"ESPResSo"
] |
90cd4bf985e1eaf062d6b4caf66c086846614e81532752f0cb199a701f2c7cdf
|
from __future__ import with_statement
import numpy as np
import getpass
import time
def _fread3(fobj):
"""Read a 3-byte int from an open binary file object
Parameters
----------
fobj : file
File descriptor
Returns
-------
n : int
A 3 byte int
"""
b1, b2, b3 = np.fromfile(fobj, ">u1", 3)
return (b1 << 16) + (b2 << 8) + b3
def _fread3_many(fobj, n):
"""Read 3-byte ints from an open binary file object.
Parameters
----------
fobj : file
File descriptor
Returns
-------
out : 1D array
An array of 3 byte int
"""
b1, b2, b3 = np.fromfile(fobj, ">u1", 3 * n).reshape(-1,
3).astype(np.int).T
return (b1 << 16) + (b2 << 8) + b3
def read_geometry(filepath):
"""Read a triangular format Freesurfer surface mesh.
Parameters
----------
filepath : str
Path to surface file
Returns
-------
coords : numpy array
nvtx x 3 array of vertex (x, y, z) coordinates
faces : numpy array
nfaces x 3 array of defining mesh triangles
"""
with open(filepath, "rb") as fobj:
magic = _fread3(fobj)
if magic == 16777215: # Quad file
nvert = _fread3(fobj)
nquad = _fread3(fobj)
coords = np.fromfile(fobj, ">i2", nvert * 3).astype(np.float)
coords = coords.reshape(-1, 3) / 100.0
quads = _fread3_many(fobj, nquad * 4)
quads = quads.reshape(nquad, 4)
#
# Face splitting follows
#
faces = np.zeros((2 * nquad, 3), dtype=np.int)
nface = 0
for quad in quads:
if (quad[0] % 2) == 0:
faces[nface] = quad[0], quad[1], quad[3]
nface += 1
faces[nface] = quad[2], quad[3], quad[1]
nface += 1
else:
faces[nface] = quad[0], quad[1], quad[2]
nface += 1
faces[nface] = quad[0], quad[2], quad[3]
nface += 1
elif magic == 16777214: # Triangle file
create_stamp = fobj.readline()
_ = fobj.readline()
vnum = np.fromfile(fobj, ">i4", 1)[0]
fnum = np.fromfile(fobj, ">i4", 1)[0]
coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
else:
raise ValueError("File does not appear to be a Freesurfer surface")
coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits
return coords, faces
def write_geometry(filepath, coords, faces, create_stamp=None):
"""Write a triangular format Freesurfer surface mesh.
Parameters
----------
filepath : str
Path to surface file
coords : numpy array
nvtx x 3 array of vertex (x, y, z) coordinates
faces : numpy array
nfaces x 3 array of defining mesh triangles
create_stamp : str
User/time stamp (default: "created by <user> on <ctime>")
"""
magic_bytes = np.array([255, 255, 254], dtype=np.uint8)
if create_stamp is None:
create_stamp = "created by %s on %s" % (getpass.getuser(),
time.ctime())
with open(filepath, 'wb') as fobj:
magic_bytes.tofile(fobj)
fobj.write("%s\n\n" % create_stamp)
np.array([coords.shape[0], faces.shape[0]], dtype='>i4').tofile(fobj)
# Coerce types, just to be safe
coords.astype('>f4').reshape(-1).tofile(fobj)
faces.astype('>i4').reshape(-1).tofile(fobj)
def read_morph_data(filepath):
"""Read a Freesurfer morphometry data file.
This function reads in what Freesurfer internally calls "curv" file types,
(e.g. ?h. curv, ?h.thickness), but as that has the potential to cause
confusion where "curv" also refers to the surface curvature values,
we refer to these files as "morphometry" files with PySurfer.
Parameters
----------
filepath : str
Path to morphometry file
Returns
-------
curv : numpy array
Vector representation of surface morpometry values
"""
with open(filepath, "rb") as fobj:
magic = _fread3(fobj)
if magic == 16777215:
vnum = np.fromfile(fobj, ">i4", 3)[0]
curv = np.fromfile(fobj, ">f4", vnum)
else:
vnum = magic
_ = _fread3(fobj)
curv = np.fromfile(fobj, ">i2", vnum) / 100
return curv
def read_annot(filepath, orig_ids=False):
"""Read in a Freesurfer annotation from a .annot file.
Parameters
----------
filepath : str
Path to annotation file
orig_ids : bool
Whether to return the vertex ids as stored in the annotation
file or the positional colortable ids
Returns
-------
labels : n_vtx numpy array
Annotation id at each vertex
ctab : numpy array
RGBA + label id colortable array
"""
with open(filepath, "rb") as fobj:
dt = ">i4"
vnum = np.fromfile(fobj, dt, 1)[0]
data = np.fromfile(fobj, dt, vnum * 2).reshape(vnum, 2)
labels = data[:, 1]
ctab_exists = np.fromfile(fobj, dt, 1)[0]
if not ctab_exists:
raise Exception('Color table not found in annotation file')
n_entries = np.fromfile(fobj, dt, 1)[0]
if n_entries > 0:
length = np.fromfile(fobj, dt, 1)[0]
orig_tab = np.fromfile(fobj, '>c', length)
orig_tab = orig_tab[:-1]
names = list()
ctab = np.zeros((n_entries, 5), np.int)
for i in xrange(n_entries):
name_length = np.fromfile(fobj, dt, 1)[0]
name = np.fromfile(fobj, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fobj, dt, 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16) +
ctab[i, 3] * (2 ** 24))
else:
ctab_version = -n_entries
if ctab_version != 2:
raise Exception('Color table version not supported')
n_entries = np.fromfile(fobj, dt, 1)[0]
ctab = np.zeros((n_entries, 5), np.int)
length = np.fromfile(fobj, dt, 1)[0]
_ = np.fromfile(fobj, "|S%d" % length, 1)[0] # Orig table path
entries_to_read = np.fromfile(fobj, dt, 1)[0]
names = list()
for i in xrange(entries_to_read):
_ = np.fromfile(fobj, dt, 1)[0] # Structure
name_length = np.fromfile(fobj, dt, 1)[0]
name = np.fromfile(fobj, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fobj, dt, 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16))
ctab[:, 3] = 255
if not orig_ids:
ord = np.argsort(ctab[:, -1])
labels = ord[np.searchsorted(ctab[ord, -1], labels)]
return labels, ctab, names
def read_label(filepath):
"""Load in a Freesurfer .label file.
Parameters
----------
filepath : str
Path to label file
Returns
-------
label_array : numpy array
Array with indices of vertices included in label
"""
label_array = np.loadtxt(filepath, dtype=np.int, skiprows=2, usecols=[0])
return label_array
|
ME-ICA/me-ica
|
meica.libs/nibabel/freesurfer/io.py
|
Python
|
lgpl-2.1
| 7,638
|
[
"Mayavi"
] |
0fa09810f96d5de36ce8dfd042562c8c075e318e36cc12081bff2a17caac6bfa
|
# from .name.other.album import AlbumGenerator
# from .name.other.band import BandGenerator
# from ._unknown.motivation import MotivationGenerator
# from ._unknown.world import WorldGenerator
# from ._unknown.space.galaxy import GalaxyGenerator
# from ._unknown.space.star import StarGenerator
# from ._unknown.space.planet import PlanetGenerator
from .other import *
# from .description.race import RandomRaceGenerator
generators = {
# "album": AlbumGenerator,
# "band": BandGenerator,
"battlecry": BattleCry,
# "concept-art-place": ArtConceptPlace,
# "concept-art-being": ArtConceptBeing,
"concept-art": ArtConcept,
# "concept-story-character": StoryConceptCharacter,
# "concept-story-event": StoryConceptEvent,
"concept-story": StoryConcept,
# "demonym": Demonym,
"motivation": CharacterGoal,
"haiku": Haiku,
"idiom": Idiom,
"motto": Motto,
# "prayer-forgive": ForgivePrayer,
# "prayer-aid": AidPrayer,
"prayer": Prayer,
"riddle": Riddle,
"subject": SchoolSubject,
"slogan": Slogan,
# "swear": Swear,
# "wisdom": WisdomQuote,
# "world": World,
# "galaxy": Galaxy,
# "star": Star,
# "planet": Planet,
# "alien": RandomRace,
}
|
d2emon/generator-pack
|
src/genesys/generator/fng/__init__.py
|
Python
|
gpl-3.0
| 1,240
|
[
"Galaxy"
] |
8975bdc662ce9bf926b4feaa68f261f7331c0b67465a78379bc5c1ee1c8433e6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Piero Dalle Pezze
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import shutil
from sbpipe.utils.io import replace_str_in_file
def generic_preproc(infile, outfile):
"""
Copy the model file
:param infile: the input file
:param outfile: the output file
"""
shutil.copyfile(infile, outfile)
def copasi_preproc(infile, outfile):
"""
Replicate a copasi model and adds an id.
:param infile: the input file
:param outfile: the output file
"""
generic_preproc(infile, outfile)
replace_str_in_file(outfile,
os.path.splitext(os.path.basename(infile))[0] + ".csv",
os.path.splitext(os.path.basename(outfile))[0] + ".csv")
replace_str_in_file(outfile,
os.path.splitext(os.path.basename(infile))[0] + ".txt",
os.path.splitext(os.path.basename(outfile))[0] + ".csv")
replace_str_in_file(outfile,
os.path.splitext(os.path.basename(infile))[0] + ".tsv",
os.path.splitext(os.path.basename(outfile))[0] + ".csv")
replace_str_in_file(outfile,
os.path.splitext(os.path.basename(infile))[0] + ".dat",
os.path.splitext(os.path.basename(outfile))[0] + ".csv")
def preproc(infile, outfile, copasi=False):
"""
Replicate a copasi model and adds an id.
:param infile: the input file
:param outfile: the output file
:param copasi: True if the model is a Copasi model
"""
if copasi:
copasi_preproc(infile, outfile)
else:
generic_preproc(infile, outfile)
|
pdp10/sbpipe
|
sbpipe/snakemake/preproc.py
|
Python
|
mit
| 2,731
|
[
"COPASI"
] |
6da3f2b02fc9d0e9cc1630715578c893b500015c5c16ca512eb4be0c82e85657
|
'''
Set up Visual Sudio 2010 to build a specified MPIR configuration
Copyright (C) 2011, Brian Gladman
32-bit:
x86w: x86, i386, i486, i586, pentium, pentiummmx, k6, k62, k63, k7, athlon, viac3, viac32, x86_64
p6: pentiumpro, i686
mmx: pentium2
sse2: pentium4, prescott, core, netburst, netburstlahf
p3mmx: pentium3, k8, k10, k102, k103, bulldozer, bobcat, core2, penryn, nehalem, westmere, sandybridge, atom, nano
64-bit:
x86_64: x86_64w
netburst: netburst, netburstlahf
k8only: k8, nano
k10: k10
k102: k102, k103, bulldozer
bobcat: bobcat
core2: core2
penryn: penryn
nehalem: nehalem
westmere: westmere
sandybridge: sandybridge
atom atom
'''
from __future__ import print_function
from operator import itemgetter
from os import listdir, walk, unlink, makedirs, getcwd
from os.path import split, splitext, isdir, relpath, join, exists
from copy import deepcopy
from sys import argv, exit
from filecmp import cmp
from shutil import copy
from re import compile, search
from collections import defaultdict
from uuid import uuid1
from time import sleep
# for script debugging
debug = False
# either add a prebuild step to the project files or do it here
add_prebuild = True
# output a build project for the C++ static library
add_cpp_lib = False
# The path to the mpir root directory
build_vc = 'build.vc12/'
mpir_dir = '../'
build_dir = mpir_dir + build_vc
cfg_dir = './cdata'
# paths that might include source files(*.c, *.h, *.asm)
c_directories = ( '', 'build.vc12', 'fft', 'mpf', 'mpq', 'mpz', 'printf', 'scanf' )
# files that are to be excluded from the build
exclude_file_list = ('config.guess', 'cfg', 'getopt', 'getrusage', 'gettimeofday', 'cpuid',
'obsolete', 'win_timing', 'gmp-mparam', 'tal-debug', 'tal-notreent',
'new_fft', 'new_fft_with_flint', 'compat', 'udiv_w_sdiv' )
# copy from file ipath to file opath but avoid copying if
# opath exists and is the same as ipath (this is to avoid
# triggering an unecessary rebuild).
def write_f(ipath, opath):
if exists(ipath) and not isdir(ipath):
if exists(opath):
if isdir(opath) or cmp(ipath, opath):
return
copy(ipath, opath)
# append a file (ipath) to an existing file (opath)
def append_f(ipath, opath):
try:
with open(opath, 'ab') as out_file:
try:
with open(ipath, 'rb') as in_file:
buf = in_file.read(8192)
while buf:
out_file.write(buf)
buf = in_file.read(8192)
except IOError:
print('error reading {0:s} for input'.format(f))
return
except IOError:
print('error opening {0:s} for output'.format(opath))
# copy files in a list from in_dir to out_dir
def copy_files(file_list, in_dir, out_dir):
try:
makedirs(out_dir)
except:
IOError
for f in file_list:
copy(join(in_dir, f), out_dir)
# Recursively search a given directory tree to find header,
# C and assembler code files that either replace or augment
# the generic C source files in the input list 'src_list'.
# As the directory tree is searched, files in each directory
# become the source code files for the current directory and
# the default source code files for its child directories.
#
# Lists of default header, C and assembler source code files
# are maintained as the tree is traversed and if a file in
# the current directory matches the name of a file in the
# default file list (name matches ignore file extensions),
# the name in the list is removed and is replaced by the new
# file found. On return each directory in the tree had an
# entry in the returned dictionary that contains:
#
# 1. The list of header files
#
# 2. The list of C source code files for the directory
#
# 3. The list of assembler code files that replace C files
#
# 4. The list of assembler files that are not C replacements
#
def find_asm(path, cf_list):
d = dict()
for root, dirs, files in walk(path):
if '.svn' in dirs: # ignore SVN directories
dirs.remove('.svn')
if 'fat' in dirs: # ignore fat directory
dirs.remove('fat')
relp = relpath(root, path) # path from asm root
relr = relpath(root, mpir_dir) # path from MPIR root
if relp == '.': # set C files as default
relp = h = t = ''
d[''] = [ [], deepcopy(cf_list), [], [], relr ]
else:
h, t = split(relp) # h = parent, t = this directory
# copy defaults from this directories parent
d[relp] = [ deepcopy(d[h][0]), deepcopy(d[h][1]),
deepcopy(d[h][2]), deepcopy(d[h][3]), relr ]
for f in files: # for the files in this directory
n, x = splitext(f)
if x == '.h': # if it is a header file, remove
for cf in reversed(d[relp][0]): # any matching default
if cf[0] == n:
d[relp][0].remove(cf)
d[relp][0] += [(n, x, relr)] # and add the local header file
if x == '.c': # if it is a C file, remove
for cf in reversed(d[relp][1]): # any matching default
if cf[0] == n:
d[relp][1].remove(cf)
d[relp][1] += [(n, x, relr)] # and add the local C file
if x == '.asm': # if it is an assembler file
match = False
for cf in reversed(d[relp][1]): # remove any matching C file
if cf[0] == n:
d[relp][1].remove(cf)
match = True
break
for cf in reversed(d[relp][2]): # and remove any matching
if cf[0] == n: # assembler file
d[relp][2].remove(cf)
match = True
break
if match: # if a match was found, put the
d[relp][2] += [(n, x, relr)] # file in the replacement list
else: # otherwise look for it in the
for cf in reversed(d[relp][3]): # additional files list
if cf[0] == n:
d[relp][3].remove(cf)
break
d[relp][3] += [(n, x, relr)]
for k in d: # additional assembler list
for i in range(4):
d[k][i].sort(key=itemgetter(0)) # sort the four file lists
return d
# create 4 lists of c, h, cc (or cpp) and asm (or as) files in a directory
def find_src(dir_list):
# list number from file extension
di = { '.h': 0, '.c': 1, '.cc': 2, '.cpp': 2, '.asm': 3, '.as': 3 }
list = [ [], [], [], [] ]
for d in dir_list:
for f in listdir(join(mpir_dir, d)):
if f == '.svn':
continue # ignore SVN directories
if not isdir(f):
n, x = splitext(f) # split into name + extension
if x in di and not n in exclude_file_list:
list[di[x]] += [(n, x, d)] # if of the right type and is
for x in list: # not in the exclude list
x.sort(key=itemgetter(2, 0, 1)) # add it to appropriate list
return list
# scan the files in the input set and find the symbols
# defined in the files
fr_sym = compile(r'LEAF_PROC\s+(\w+)')
lf_sym = compile(r'FRAME_PROC\s+(\w+)')
wf_sym = compile(r'WIN64_GCC_PROC\s+(\w+)')
g3_sym = compile(r'global\s+___g(\w+)')
g2_sym = compile(r'global\s+__g(\w+)')
def get_symbols(setf, sym_dir):
for f in setf:
fn = join(mpir_dir, f[2], f[0] + f[1])
with open(fn, 'r') as inf:
lines = inf.readlines()
for l in lines:
m = fr_sym.search(l)
if m:
sym_dir[f] |= set((m.groups(1)[0],))
m = lf_sym.search(l)
if m:
sym_dir[f] |= set((m.groups(1)[0],))
m = wf_sym.search(l)
if m:
sym_dir[f] |= set((m.groups(1)[0],))
m = g3_sym.search(l)
if m:
sym_dir[f] |= set((m.groups(1)[0],))
else:
m = g2_sym.search(l)
if m:
sym_dir[f] |= set((m.groups(1)[0],))
def file_symbols(cf):
sym_dir = defaultdict(set)
for c in cf:
if c == 'fat':
continue
setf = set()
for f in cf[c][2] + cf[c][3]:
setf |= set((f,))
get_symbols(setf, sym_dir)
return sym_dir
def gen_have_list(c, sym_dir, out_dir):
set_sym2 = set()
for f in c[2]:
set_sym2 |= sym_dir[f]
set_sym3 = set()
for f in c[3]:
set_sym3 |= sym_dir[f]
c += [sorted(list(set_sym2)), sorted(list(set_sym3))]
fd = join(out_dir, c[4])
try:
makedirs(fd)
except:
IOError
with open(join(fd, 'cfg.h'), 'w') as outf:
for sym in sorted(set_sym2 | set_sym3):
print(sym, file=outf)
# print('/* assembler symbols also available in C files */', file=outf)
# for sym in sorted(set_sym2):
# print(sym, file=outf)
# print('/* assembler symbols not available in C files */', file=outf)
# for sym in sorted(set_sym3):
# print(sym, file=outf)
# generate Visual Studio IDE Filter
def filter_folders(cf_list, af_list, outf):
f1 = r''' <ItemGroup>
<Filter Include="Header Files" />
<Filter Include="Source Files" />
'''
f2 = r''' <Filter Include="Source Files\{0:s}" />
'''
f3 = r''' </ItemGroup>
'''
c_dirs = set(i[2] for i in cf_list)
a_dirs = set(i[2] for i in af_list)
if a_dirs:
c_dirs |= set(('mpn\yasm',))
outf.write(f1)
for d in sorted(c_dirs):
if d:
t = d if d != 'mpn\generic' else 'mpn'
outf.write(f2.format(t))
outf.write(f3)
filter_hdr_item = ' <ClInclude Include="..\..\{}">\n <Filter>Header Files</Filter>\n </ClInclude>\n'
def filter_headers(hdr_list, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <ClInclude Include="..\..\{}">
<Filter>Header Files</Filter>
</ClInclude>
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for h in hdr_list:
outf.write(f2.format(h))
outf.write(f3)
def filter_csrc(cf_list, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <ClCompile Include="..\..\{0:s}">
<Filter>Source Files</Filter>
</ClCompile>
'''
f3 = r''' <ClCompile Include="..\..\{1:s}\{0:s}">
<Filter>Source Files\{2:s}</Filter>
</ClCompile>
'''
f4 = r''' </ItemGroup>
'''
outf.write(f1)
for i in cf_list:
if not i[2]:
outf.write(f2.format(i[0] + i[1]))
else:
t = 'mpn' if i[2].endswith('generic') else i[2]
outf.write(f3.format(i[0] + i[1], i[2], t))
outf.write(f4)
def filter_asrc(af_list, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <YASM Include="..\..\{1:s}\{0:s}">
<Filter>Source Files\mpn\yasm</Filter>
</YASM>
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for i in af_list:
outf.write(f2.format(i[0] + i[1], i[2], i[2]))
outf.write(f3)
def gen_filter(name, hf_list, cf_list, af_list):
f1 = r'''<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
'''
f2 = r''' <ItemGroup>
<None Include="..\..\gmp-h.in" />
</ItemGroup>
</Project>
'''
fn = join(build_dir, name)
try:
makedirs(split(fn)[0])
except:
IOError
with open(fn, 'w') as outf:
outf.write(f1)
filter_folders(cf_list, af_list, outf)
filter_headers(hf_list, outf)
filter_csrc(cf_list, outf)
if af_list:
filter_asrc(af_list, outf)
outf.write(f2)
# generate vcxproj file
def vcx_proj_cfg(plat, outf):
f1 = r''' <ItemGroup Label="ProjectConfigurations">
'''
f2 = r''' <ProjectConfiguration Include="{1:s}|{0:s}">
<Configuration>{1:s}</Configuration>
<Platform>{0:s}</Platform>
</ProjectConfiguration>
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for pl in plat:
for conf in ('Release', 'Debug'):
outf.write(f2.format(pl, conf))
outf.write(f3)
def vcx_globals(name, guid, outf):
f1 = r''' <PropertyGroup Label="Globals">
<RootNamespace>{0:s}</RootNamespace>
<Keyword>Win32Proj</Keyword>
<ProjectGuid>{1:s}</ProjectGuid>
</PropertyGroup>
'''
outf.write(f1.format(name, guid))
def vcx_default_cpp_props(outf):
f1 = r''' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
'''
outf.write(f1)
def vcx_library_type(plat, is_dll, outf):
f1 = r''' <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'" Label="Configuration">
<ConfigurationType>{2:s}Library</ConfigurationType>
<PlatformToolset>v120</PlatformToolset>
</PropertyGroup>
'''
for pl in plat:
for conf in ('Release', 'Debug'):
outf.write(f1.format(pl, conf, 'Dynamic' if is_dll else 'Static' ))
def vcx_cpp_props(outf):
f1 = r''' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
'''
outf.write(f1)
def vcx_extensions(outf):
f1 = r''' <ImportGroup Label="ExtensionSettings">
<Import Project="..\vsyasm.props" />
</ImportGroup>
'''
outf.write(f1)
def vcx_user_props(plat, outf):
f1 = r''' <ImportGroup Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" />
</ImportGroup>
'''
for pl in plat:
for conf in ('Release', 'Debug'):
outf.write(f1.format(pl, conf))
def vcx_target_name_and_dirs(name, plat, is_dll, outf):
f1 = r''' <PropertyGroup>
<_ProjectFileVersion>10.0.21006.1</_ProjectFileVersion>
'''
f2 = r''' <TargetName Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'">{2:s}</TargetName>
<IntDir Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'">$(Platform)\$(Configuration)\</IntDir>
<OutDir Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'">$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
'''
f3 = r''' </PropertyGroup>
'''
outf.write(f1)
for pl in plat:
for conf in ('Release', 'Debug'):
outf.write(f2.format(pl, conf, name))
outf.write(f3)
def yasm_options(plat, is_dll, outf):
f1 = r''' <YASM>
<Defines>{0:s}</Defines>
<IncludePaths>..\..\mpn\x86{1:s}w\</IncludePaths>
<Debug>true</Debug>
<ObjectFileName>$(IntDir)mpn\</ObjectFileName>
<ObjectFile>$(IntDir)mpn\</ObjectFile>
</YASM>
'''
outf.write(f1.format('DLL' if is_dll else '', '' if plat == 'Win32' else '_64'))
def compiler_options(plat, is_dll, is_debug, outf):
f1 = r''' <ClCompile>
<Optimization>{0:s}</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>..\..\</AdditionalIncludeDirectories>
<PreprocessorDefinitions>{1:s}%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreaded{2:s}</RuntimeLibrary>
<ProgramDataBaseFileName>$(TargetDir)$(TargetName).pdb</ProgramDataBaseFileName>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
</ClCompile>
'''
if is_dll:
s1 = 'DEBUG;HAVE_CONFIG_H;MSC_BUILD_DLL;'
s2 = 'DLL'
else:
s1 = 'DEBUG;_LIB;HAVE_CONFIG_H;'
s2 = ''
if plat == 'x64':
s1 = s1 + '_WIN64;'
if is_debug:
opt, defines, crt = 'Disabled', '_' + s1, 'Debug' + s2
else:
opt, defines, crt = 'Full', 'N' + s1, s2
outf.write(f1.format(opt, defines, crt))
def linker_options(outf):
f1 = r''' <Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<LargeAddressAware>true</LargeAddressAware>
</Link>
'''
outf.write(f1)
def vcx_pre_build(name, plat, outf):
f1 = r''' <PreBuildEvent>
<Command>cd ..\
prebuild {0:s} {1:s}
</Command>
</PreBuildEvent>
'''
outf.write(f1.format(name, plat))
def vcx_post_build(is_cpp, outf):
f1 = r'''
<PostBuildEvent>
<Command>cd ..\
postbuild "$(TargetPath)"
</Command>
</PostBuildEvent>
'''
outf.write(f1)
def vcx_tool_options(config, plat, is_dll, is_cpp, af_list, outf):
f1 = r''' <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'">
'''
f2 = r''' </ItemDefinitionGroup>
'''
for pl in plat:
for is_debug in (False, True):
outf.write(f1.format(pl, 'Debug' if is_debug else 'Release'))
if add_prebuild and not is_cpp:
vcx_pre_build(config, pl, outf)
if af_list:
yasm_options(plat, is_dll, outf)
compiler_options(pl, is_dll, is_debug, outf)
if is_dll:
linker_options(outf)
vcx_post_build(is_cpp, outf)
outf.write(f2)
def vcx_hdr_items(hdr_list, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <ClInclude Include="..\..\{0:s}" />
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for i in hdr_list:
outf.write(f2.format(i))
outf.write(f3)
def vcx_c_items(cf_list, plat, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <ClCompile Include="..\..\{0[0]:s}{0[1]:s}" />
'''
f3 = r''' <ClCompile Include="..\..\{0[2]:s}\{0[0]:s}{0[1]:s}">
'''
f4 = r''' <ObjectFileName Condition="'$(Configuration)|$(Platform)'=='{0:s}|{1:s}'">$(IntDir){2:s}\</ObjectFileName>
'''
f5 = r''' </ClCompile>
'''
f6 = r''' </ItemGroup>
'''
outf.write(f1)
for nxd in cf_list:
if nxd[2] == '':
outf.write(f2.format(nxd))
else:
outf.write(f3.format(nxd))
for cf in ('Release', 'Debug'):
for pl in plat:
outf.write(f4.format(cf, pl, 'mpn' if nxd[2].endswith('generic') else nxd[2]))
outf.write(f5)
outf.write(f6)
def vcx_a_items(af_list, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <YASM Include="..\..\{0[2]:s}\{0[0]:s}{0[1]:s}" />
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for nxd in af_list:
outf.write(f2.format(nxd))
outf.write(f3)
def gen_vcxproj(proj_name, file_name, guid, config, plat, is_dll, is_cpp, hf_list, cf_list, af_list):
f1 = r'''<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
'''
f2 = r''' <PropertyGroup Label="UserMacros" />
'''
f3 = r''' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
'''
f4 = r''' <ImportGroup Label="ExtensionTargets">
<Import Project="..\vsyasm.targets" />
</ImportGroup>
'''
f5 = r'''<ItemGroup>
<None Include="..\..\gmp-h.in" />
</ItemGroup>
</Project>
'''
with open(join(build_dir, file_name), 'w') as outf:
outf.write(f1)
vcx_proj_cfg(plat, outf)
vcx_globals(proj_name, guid, outf)
vcx_default_cpp_props(outf)
vcx_library_type(plat, is_dll, outf)
vcx_cpp_props(outf)
if af_list:
vcx_extensions(outf)
vcx_user_props(plat, outf)
outf.write(f2)
vcx_target_name_and_dirs(proj_name, plat, is_dll, outf)
vcx_tool_options(config, plat, is_dll, is_cpp, af_list, outf)
vcx_hdr_items(hf_list, outf)
vcx_c_items(cf_list, plat, outf)
vcx_a_items(af_list, outf)
outf.write(f3)
if af_list:
outf.write(f4)
outf.write(f5)
# add a project file to the solution
def add_proj_to_sln(proj_name, file_name, guid):
f6 = r'''Project("{0:s}") = "{1:s}", "{2:s}", "{3:s}"
'''
f7 = r'''EndProject
'''
re_guid = compile(r'Project\s*\(\s*\"\s*\{([^\}]+)\s*\}\s*\"\s*\)')
lines = open(join(build_dir, 'mpir.sln')).readlines()
s_guid = ''
i_pos = 0
for i, ln in enumerate(lines):
m = re_guid.search(ln)
if m:
if not s_guid:
s_guid = '{' + m.group(1) + '}'
if ln.find(proj_name) != -1:
i_pos = (i, i + 2)
break
if ln.find(r'Global') != -1:
i_pos = (i, i)
break
else:
print('error in updating the solution')
exit()
if i_pos and s_guid:
lines[i_pos[0]:i_pos[1]] = [f6.format(s_guid, proj_name, file_name, guid), f7]
open(join(build_dir, 'mpir.sln'), 'w').writelines(lines)
# compile list of C files
t = find_src(c_directories)
c_hdr_list = t[0]
c_src_list = t[1]
if t[2] or t[3]:
print('found C++ and/or assembler file(s) in a C directory')
if t[2]:
for f in t[2]:
print(f)
print()
if t[3]:
for f in t[3]:
print(f)
print()
# compile list of C++ files
t = find_src(['cxx'])
cc_hdr_list = t[0]
cc_src_list = t[2]
if t[1] or t[3]:
print('found C and/or assembler file(s) in a C++ directory')
if t[1]:
for f in t[1]:
print(f)
print()
if t[3]:
for f in cc_src_list:
print(f)
print()
# compile list of C files in mpn\generic
t = find_src([r'mpn\generic'])
gc_hdr_list = t[0]
gc_src_list = t[1]
if t[2] or t[3]:
print('found C++ and/or assembler file(s) in a C directory')
if t[2]:
for f in gc_hdr_list:
print(f)
print()
if t[3]:
for f in gc_src_list:
print(f)
print()
# prepare the generic C build
mpn_gc = dict((('gc',[ gc_hdr_list, gc_src_list, [], [] ]),))
# prepare the list of Win32 builds
mpn_32 = find_asm(mpir_dir + 'mpn/x86w', gc_src_list)
syms32 = file_symbols(mpn_32)
del mpn_32['']
# prepare the list of x64 builds
mpn_64 = find_asm(mpir_dir + 'mpn/x86_64w', gc_src_list)
syms64 = file_symbols(mpn_64)
del mpn_64['']
if len(argv) != 1 and not (int(argv[1]) & 2):
exit()
nd_gc = len(mpn_gc)
nd_32 = nd_gc + len(mpn_32)
nd_nd = nd_32 + len(mpn_64)
# now ask user which builds they wish to generate
while True:
cnt = 0
for v in sorted(mpn_gc):
cnt += 1
print('{0:2d}. {1:24s} '.format(cnt, v))
for v in sorted(mpn_32):
cnt += 1
print('{0:2d}. {1:24s} (win32)'.format(cnt, v))
for v in sorted(mpn_64):
cnt += 1
print('{0:2d}. {1:24s} (x64)'.format(cnt, v))
s = input('Space separated list of builds (1..{0:d}, 0 to exit)? '.format(cnt))
n_list = [int(c) for c in s.split()]
if 0 in n_list:
exit()
if any(n < 1 or n > nd_nd for n in n_list):
print('list contains invalid build numbers')
sleep(2)
else:
break
# multiple builds must each have their own prebuilds
if len(n_list) > 1:
add_prebuild = True
# now gnerate the requested builds
for n in n_list:
if 0 < n <= nd_gc:
config = sorted(mpn_gc)[n - 1]
mode = ('Win32', 'x64')
mpn_f = mpn_gc[config]
elif nd_gc < n <= nd_32:
config = sorted(mpn_32)[n - 1 - nd_gc]
if len(argv) == 1 or int(argv[1]) & 1:
gen_have_list(mpn_32[config], syms32, cfg_dir)
mode = ('Win32', )
mpn_f = mpn_32[config]
elif nd_32 < n <= nd_nd:
config = sorted(mpn_64)[n - 1 - nd_32]
if len(argv) == 1 or int(argv[1]) & 1:
gen_have_list(mpn_64[config], syms64, cfg_dir)
mode = ('x64', )
mpn_f = mpn_64[config]
else:
print('internal error')
exit()
if mode[0] == 'x64':
for l in mpn_f[1:]:
for t in l:
if t[0].startswith('preinv_'):
if ('x64' in mode and t[0] == 'preinv_divrem_1'):
l.remove(t)
print(config, mode)
if not add_prebuild:
# generate mpir.h and gmp.h from gmp_h.in
gmp_h = '''
#ifdef _WIN32
# ifdef _WIN64
# define _LONG_LONG_LIMB 1
# define GMP_LIMB_BITS 64
# else
# define GMP_LIMB_BITS 32
# endif
# define __GMP_BITS_PER_MP_LIMB GMP_LIMB_BITS
# define SIZEOF_MP_LIMB_T (GMP_LIMB_BITS >> 3)
# define GMP_NAIL_BITS 0
#endif
'''
try:
lines = open(join(mpir_dir, 'gmp-h.in'), 'r').readlines()
except IOError:
print('error attempting to read from gmp_h.in')
exit()
try:
tfile = join(mpir_dir, 'tmp.h')
with open(tfile, 'w') as outf:
first = True
for line in lines:
if search('@\w+@', line):
if first:
first = False
outf.writelines(gmp_h)
else:
outf.writelines([line])
# write result to mpir.h but only overwrite the existing
# version if this version is different (don't trigger an
# unnecessary rebuild)
write_f(tfile, join(mpir_dir, 'mpir.h'))
write_f(tfile, join(mpir_dir, 'gmp.h'))
unlink(tfile)
except IOError:
print('error attempting to create mpir.h from gmp-h.in')
exit()
# generate config.h
try:
tfile = join(mpir_dir, 'tmp.h')
with open(tfile, 'w') as outf:
for i in sorted(mpn_f[5] + mpn_f[6]):
outf.writelines(['#define HAVE_NATIVE_{0:s} 1\n'.format(i)])
append_f(join(build_dir, 'cfg.h'), tfile)
write_f(tfile, join(mpir_dir, 'config.h'))
unlink(tfile)
except IOError:
print('error attempting to write to {0:s}'.format(tfile))
exit()
# generate longlong.h and copy gmp-mparam.h
try:
li_file = None
for i in mpn_f[0]:
if i[0] == 'longlong_inc':
li_file = join(mpir_dir, join(i[2], r'longlong_inc.h'))
if i[0] == 'gmp-mparam':
write_f(join(mpir_dir, join(i[2], 'gmp-mparam.h')), join(mpir_dir, 'gmp-mparam.h'))
if not li_file or not exists(li_file):
print('error attempting to read {0:s}'.format(li_file))
exit()
tfile = join(mpir_dir, 'tmp.h')
write_f(join(mpir_dir, 'longlong_pre.h'), tfile)
append_f(li_file, tfile)
append_f(join(mpir_dir, 'longlong_post.h'), tfile)
write_f(tfile, join(mpir_dir, 'longlong.h'))
unlink(tfile)
except IOError:
print('error attempting to generate longlong.h')
exit()
# generate the vcxproj and the IDE filter files
# and add/replace project in the solution file
hf_list = ('config.h', 'gmp-impl.h', 'longlong.h', 'mpir.h', 'gmp-mparam.h')
af_list = sorted(mpn_f[2] + mpn_f[3])
proj_name = 'mpir'
cf = config.replace('\\', '_')
# set up DLL build
guid = '{' + str(uuid1()) + '}'
vcx_name = 'dll_mpir_' + cf
vcx_path = 'dll_mpir_' + cf + '\\' + vcx_name + '.vcxproj'
gen_filter(vcx_path + '.filters', hf_list, c_src_list + cc_src_list + mpn_f[1], af_list)
gen_vcxproj(proj_name, vcx_path, guid, config, mode, True, False, hf_list, c_src_list + cc_src_list + mpn_f[1], af_list)
add_proj_to_sln(vcx_name, vcx_path, guid)
# set up LIB build
guid = '{' + str(uuid1()) + '}'
vcx_name = 'lib_mpir_' + cf
vcx_path = 'lib_mpir_' + cf + '\\' + vcx_name + '.vcxproj'
gen_filter(vcx_path + '.filters', hf_list, c_src_list + mpn_f[1], af_list)
gen_vcxproj(proj_name, vcx_path, guid, config, mode, False, False, hf_list, c_src_list + mpn_f[1], af_list)
add_proj_to_sln(vcx_name, vcx_path, guid)
# C++ library build
if add_cpp_lib:
guid = '{' + str(uuid1()) + '}'
proj_name = 'mpirxx'
mode = ('Win32', 'x64')
vcx_name = 'lib_mpir_cxx'
vcx_path = 'lib_mpir_cxx\\' + vcx_name + '.vcxproj'
th = hf_list + ('mpirxx.h',)
gen_filter(vcx_path + '.filters', th, cc_src_list, '')
gen_vcxproj(proj_name, vcx_path, guid, config, mode, False, True, th, cc_src_list, '')
add_proj_to_sln(vcx_name, vcx_path, guid)
# the following code is for diagnostic purposes only
if debug:
for x in sorted(mpn_f[0] + mpn_f[1]):
print(x)
print()
for x in sorted(mpn_f[2] + mpn_f[3]):
print(x)
print()
# mpn_files = dict()
# mpn_files.update(mpn_32)
# mpn_files.update(mpn_64)
for x in mpn_f[config]:
print(x)
if False:
print('1:')
for y in mpn_files[x][0]:
print(y)
print('2:')
for y in mpn_files[x][1]:
print(y)
print('3:')
for y in mpn_files[x][2]:
print(y)
print('4:')
for y in mpn_files[x][3]:
print(y)
print()
for y in sorted(x[2] + x[3]):
print(y)
print()
print()
if debug:
mpn_dirs = ('mpn/generic', 'mpn/x86_64w', 'mpn/x86w' )
# compile a list of files in directories in 'dl' under root 'r' with extension 'p'
def findf(r, dl, p):
l = []
for d in dl:
for root, dirs, files in walk(r + d):
relp = relpath(root, r) # path relative to mpir root directory
if '.svn' in dirs:
dirs.remove('.svn') # ignore SVN directories
if d == '' or root.endswith(build_vc):
for d in reversed(dirs): # don't scan build.vc12 subdirectories
dirs.remove(d)
for f in files:
if f.endswith(p):
l += [(tuple(relp.split('\\')), f)]
return sorted(l)
hdr_list = findf(mpir_dir, c_directories, '.h')
for x in hdr_list:
print(x)
print()
src_list = findf(mpir_dir, c_directories, '.c')
for x in src_list:
print(x)
print()
cpp_list = findf(mpir_dir, ['cpp'], '.cc')
for x in cpp_list:
print(x)
print()
gnc_list = findf(mpir_dir + 'mpn/', ['generic'], '.c')
for x in gnc_list:
print(x)
print()
w32_list = findf(mpir_dir + 'mpn/', ['x86w'], '.asm')
for x in w32_list:
print(x)
print()
x64_list = findf(mpir_dir + 'mpn/', ['x86_64w'], '.asm')
for x in x64_list:
print(x)
print()
nd = dict([])
for d, f in gnc_list:
n, x = splitext(f)
nd[n] = nd.get(n, []) + [(d, 'c')]
for d, f in x64_list:
n, x = splitext(f)
nd[n] = nd.get(n, []) + [(d, 'asm')]
for d, f in w32_list:
n, x = splitext(f)
nd[n] = nd.get(n, []) + [(d, 'asm')]
for x in nd:
print(x, nd[x])
|
libtetcoin/mpir
|
build.vc12/mpir_config.py
|
Python
|
gpl-3.0
| 28,839
|
[
"Brian"
] |
f0912108b20b9b91d6f3d0ec7cceff5878645188546fff6480000e3f65d9e2b5
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class ROrganismdbi(RPackage):
"""The package enables a simple unified interface to several annotation
packages each of which has its own schema by taking advantage of the
fact that each of these packages implements a select methods."""
homepage = "https://bioconductor.org/packages/OrganismDbi/"
git = "https://git.bioconductor.org/packages/OrganismDbi.git"
version('1.18.1', commit='ba2d1237256805e935d9534a0c6f1ded07b42e95')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-genomicfeatures', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-biocinstaller', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-graph', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-rbgl', type=('build', 'run'))
depends_on('r-dbi', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.18.1')
|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-organismdbi/package.py
|
Python
|
lgpl-2.1
| 2,355
|
[
"Bioconductor"
] |
a8e4032ac27605f700ceb77dfcd8842749aa115263789018d2cc1cdf7d503427
|
#!/usr/bin/python -tt
# Pulls out sequences with organismal name (based on CAMERA Microbial Eukaryotic Transcriptome)
# Any inputs with - or -- are optional and will default to certain values.
# Written by: Christopher R. Main, University of Delaware
# Last Updated: 09/19/2013
# Versions:
# 0.1 - Open fasta file correctly
# 0.2 - Pull record names and parse them
# 0.3 - Print out cluster range that is wanted
# 0.4 - Write wanted sequences to file
# 0.5 - Comestic interactions
# 0.6 - Changes to argument handling
# Allow opening of FASTA file
from Bio import SeqIO
# Ready arguments from the commandline
import argparse
# Read and parse the arguments from the command line
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--version", action="version", version='Version 0.6')
parser.add_argument("filename", help="location of FASTA file")
parser.add_argument("out_file", help="filename for output of BLAST search results")
parser.add_argument("strain", help="Strain of Heterosigma")
args = parser.parse_args()
print "Loading %s to memory..." % (args.filename)
handle = open(args.filename, "rU")
# Parse the data file
fasta_parse = list(SeqIO.parse(handle, "fasta"))
# Use taxon ID instead
# Heterosigma = 536047
taxon = "536047"
# Search FASTA file with tab delimited file
print "Searching %s for Heterosigma..." % (args.filename)
# 2 will give you taxon ID, 6 will give you the Strain Name (Based on CAMERA fasta)
records = (r for r in fasta_parse if taxon in r.description.split()[2])
print "Searching for %s..." % (args.strain)
strain = (r for r in records if args.strain in r.description.split()[6])
count = SeqIO.write(strain, args.out_file, "fasta")
print "Saved %i records to %s" % (count, args.out_file)
# Begin for loop to write several files
# for i in range(len_file):
#Write
# output_handle = open(args.out_file + i, "w")
#Write sequences to file
# for i in range(int(args.first), int(args.last) + 1):
# SeqIO.write(records[i], output_handle, "fasta")
# print "Writing %s to file" % (records[i].id)
# output_handle.close()
# print "Writing of %s complete, closing file..." % (args.out_file)
handle.close()
|
calandryll/transcriptome
|
scripts/old/fasta_pull.py
|
Python
|
gpl-2.0
| 2,159
|
[
"BLAST"
] |
c835a77fdf2d717d476b599e7bf7e72cdbc170715f41a2eb659c34fbcfb6690d
|
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
## last $Author$
## last $Date$
## $Revision$
"""
Settings
========
This module provides global settings as fields. Throughout Biskit a
(environment-dependent) parameter such as, e.g., ssh_bin can be addressed as:
>>> import Biskit.settings as S
>>> bin = S.ssh_bin
However, since a user should not be required to hack python modules,
ssh_bin is not actually defined in settings.py. Instead, the value is
taken from C{~/.biskit/settings.cfg} -- which should have an entry
like C{ssh_bin=/bin/ssh # comment}. If this entry (or the config file)
is not found, settings.py uses the default value from
C{biskit/Biskit/data/defaults/settings.cfg}.
If missing, the user configuration file C{~/.biskit/settings.cfg} is
created automatically during the startup of Biskit (i.e. for any
import). The auto-generated file only contains parameters for which
the default values don't seem to work (invalid paths or binaries).
See L{Biskit.SettingsManager}
Summary for Biskit users
------------------------
If you want to change a biskit parameter, do so in C{~/.biskit/settings.cfg}
Summary for Biskit developpers
------------------------------
If you want to create a new user-adjustable parameter, do so in
C{biskit/Biskit/data/defaults/settings.cfg}.
Summary for all
---------------
!Dont't touch C{settings.py}!
"""
import Biskit as B
import Biskit.tools as T
import Biskit.SettingsManager as M
import user, sys
__CFG_DEFAULT = T.dataRoot() + '/defaults/settings.cfg'
__CFG_USER = user.home + '/.biskit/settings.cfg'
## BISKIT_PATH = T.projectRoot() ## Hack to make test suite path independent
try:
m = M.SettingsManager(__CFG_DEFAULT, __CFG_USER, createmissing=True )
m.updateNamespace( locals() )
except Exception, why:
B.EHandler.fatal( 'Error importing Biskit settings')
##
## Create some settings on the fly
##
python_bin = sys.executable
projectRoot= T.projectRoot()
pymol_scripts = T.dataRoot() + '/pymol/'
###################################
## required environment variables.
## format: ENV_NAME : path_example
## Todo: These need to go to their Exe_*.dat files
env = {}
blast_env = {'BLASTDB':'/home/Bis/raik/data/prog/blast/db',
'BLASTMA':'/home/Bis/johan/APPLICATIONS/blast'}
amber_env = {'AMBERHOME_8':'/Bis/shared/rh73/amber8_intel-7.1'}
env.update(blast_env)
env.update(amber_env)
######################
## clean up name space
del B, T, M, user, sys
del __CFG_DEFAULT, __CFG_USER, m
################
## empty test ##
import Biskit.test as BT
class Test(BT.BiskitTest):
"""Mock test, settings is always executed anyway."""
pass
|
ostrokach/biskit
|
Biskit/settings.py
|
Python
|
gpl-3.0
| 3,438
|
[
"BLAST",
"PyMOL"
] |
68c7684ccfffd11dde257812670eb0c8e52316c78cd551a7766cde1d20948933
|
#!/usr/bin/env python
#
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Standard setup script.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from distutils.command.install_data import install_data
from distutils.command.sdist import sdist
from distutils.core import setup
from buildbot_worker import version
class our_install_data(install_data):
def finalize_options(self):
self.set_undefined_options('install',
('install_lib', 'install_dir'),
)
install_data.finalize_options(self)
def run(self):
install_data.run(self)
# ensure there's a buildbot_worker/VERSION file
fn = os.path.join(self.install_dir, 'buildbot_worker', 'VERSION')
with open(fn, 'w') as f:
f.write(version)
self.outfiles.append(fn)
class our_sdist(sdist):
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
# ensure there's a buildbot_worker/VERSION file
fn = os.path.join(base_dir, 'buildbot_worker', 'VERSION')
open(fn, 'w').write(version)
# ensure that NEWS has a copy of the latest release notes, copied from
# the master tree, with the proper version substituted
src_fn = os.path.join('..', 'master', 'docs', 'relnotes/index.rst')
with open(src_fn) as f:
src = f.read()
src = src.replace('|version|', version)
dst_fn = os.path.join(base_dir, 'NEWS')
with open(dst_fn, 'w') as f:
f.write(src)
setup_args = {
'name': "buildbot-worker",
'version': version,
'description': "Buildbot Worker Daemon",
'long_description': "See the 'buildbot' package for details",
'author': "Brian Warner",
'author_email': "warner-buildbot@lothar.com",
'maintainer': "Dustin J. Mitchell",
'maintainer_email': "dustin@v.igoro.us",
'url': "http://buildbot.net/",
'license': "GNU GPL",
'classifiers': [
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
'packages': [
"buildbot_worker",
"buildbot_worker.util",
"buildbot_worker.backports",
"buildbot_worker.commands",
"buildbot_worker.scripts",
"buildbot_worker.monkeypatches",
"buildbot_worker.test",
"buildbot_worker.test.fake",
"buildbot_worker.test.unit",
"buildbot_worker.test.util",
],
# mention data_files, even if empty, so install_data is called and
# VERSION gets copied
'data_files': [("buildbot_worker", [])],
'cmdclass': {
'install_data': our_install_data,
'sdist': our_sdist
},
'entry_points': {
'console_scripts': [
'buildbot-worker=buildbot_worker.scripts.runner:run',
# this will also be shipped on non windows :-(
'buildbot_worker_windows_service=buildbot_worker.scripts.windows_service:HandleCommandLine',
]}
}
# set zip_safe to false to force Windows installs to always unpack eggs
# into directories, which seems to work better --
# see http://buildbot.net/trac/ticket/907
if sys.platform == "win32":
setup_args['zip_safe'] = False
if sys.version_info[0] >= 3:
twisted_ver = ">= 17.5.0"
else:
twisted_ver = ">= 10.2.0"
try:
# If setuptools is installed, then we'll add setuptools-specific arguments
# to the setup args.
import setuptools # @UnusedImport
except ImportError:
pass
else:
setup_args['install_requires'] = [
'twisted ' + twisted_ver,
'future',
]
# Unit test hard dependencies.
test_deps = [
'mock',
]
setup_args['tests_require'] = test_deps
setup_args['extras_require'] = {
'test': [
'pep8',
# spellcheck introduced in version 1.4.0
'pylint>=1.4.0',
'pyenchant',
'flake8~=2.6.0',
] + test_deps,
}
if '--help-commands' in sys.argv or 'trial' in sys.argv or 'test' in sys.argv:
setup_args['setup_requires'] = [
'setuptools_trial',
]
if os.getenv('NO_INSTALL_REQS'):
setup_args['install_requires'] = None
setup_args['extras_require'] = None
setup(**setup_args)
|
protatremy/buildbot
|
worker/setup.py
|
Python
|
gpl-2.0
| 5,545
|
[
"Brian"
] |
1ddac18b3c0f203fef8b83fe7ce1d5d7af64ab061ab61bff399febc72dc951ce
|
import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems, integrators
precision = "mixed"
testsystem = testsystems.DHFRExplicit(nonbondedCutoff=1.1*u.nanometers, nonbondedMethod=app.PME, switch_width=2.0*u.angstroms, ewaldErrorTolerance=5E-5)
system, topology, positions = testsystem.system, testsystem.topology, testsystem.positions
platform = mm.Platform.getPlatformByName('CUDA')
properties = {'CudaPrecision': precision}
simulation = app.Simulation(topology, system, integrator, platform=platform, platformProperties=properties)
simulation.context.setPositions(positions)
simulation.context.setVelocitiesToTemperature(temperature)
hmc_integrators.guess_force_groups(system, nonbonded=1, others=0, fft=2)
del simulation, integrator
timestep = 2.0 * u.femtoseconds
#integrator = mm.LangevinIntegrator(temperature, 2.0 / u.picoseconds, timestep)
#integrator = mm.VerletIntegrator(timestep)
total_steps = 3000
extra_chances = 3
steps_per_hmc = 100
steps = total_steps
steps = total_steps / steps_per_hmc
#integrator = hmc_integrators.GHMCIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=timestep)
integrator = hmc_integrators.XCGHMCIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, extra_chances=extra_chances)
#integrator = integrators.VelocityVerletIntegrator(2.0 * u.femtoseconds)
simulation = lb_loader.build(testsystem, integrator, temperature, precision=precision)
integrator.reset_time()
import time
t0 = time.time()
integrator.step(steps)
dt = time.time() - t0
ns_per_day = (timestep / u.nanoseconds) * total_steps / dt * 60 * 60 * 24
dt, ns_per_day
integrator.ns_per_day
|
kyleabeauchamp/HMCNotes
|
code/misc/testing_force_group_speed.py
|
Python
|
gpl-2.0
| 1,760
|
[
"OpenMM"
] |
d72a46a4713b2da8522bb2ebfcdd32527e51083e0b95913caedbe4ab71c222dd
|
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import sys
import yaml
import time
from collections import defaultdict
from jinja2 import Environment
import ansible.constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.token import GalaxyToken
from ansible.playbook.role.requirement import RoleRequirement
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True,
help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('-p', '--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option(
'--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.add_option('--github-token', dest='token', default=None,
help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.add_option('--platforms', dest='platforms',
help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='tags',
help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author',
help='GitHub username')
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]")
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
# options that apply to more than one action
if not self.action in ("delete","import","init","login","setup"):
self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. '
'The default is the roles_path configured in your '
'ansible.cfg file (/etc/ansible/roles if not configured)')
if self.action in ("import","info","init","install","login","search","setup","delete"):
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER,
help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=False,
help='Ignore SSL certificate validation errors.')
if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role')
self.options, self.args =self.parser.parse_args()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
return True
def run(self):
super(GalaxyCLI, self).run()
# if not offline, get connect to galaxy api
if self.action in ("import","info","install","search","login","setup","delete") or \
(self.action == 'init' and not self.options.offline):
self.api = GalaxyAPI(self.galaxy)
self.execute()
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.get_opt("ignore_errors", False):
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def _display_role_info(self, role_info):
text = "\nRole: %s \n" % role_info['name']
text += "\tdescription: %s \n" % role_info.get('description', '')
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text += "\t%s: \n" % (k)
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text += "\t\t%s: %s\n" % (key, role_info[k][key])
else:
text += "\t%s: %s\n" % (k, role_info[k])
return text
############################
# execute actions
############################
def execute_init(self):
"""
Executes the init action, which creates the skeleton framework
of a role that complies with the galaxy metadata format.
"""
init_path = self.get_opt('init_path', './')
force = self.get_opt('force', False)
offline = self.get_opt('offline', False)
role_name = self.args.pop(0).strip() if self.args else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
# create default README.md
if not os.path.exists(role_path):
os.makedirs(role_path)
readme_path = os.path.join(role_path, "README.md")
f = open(readme_path, "wb")
f.write(self.galaxy.default_readme)
f.close()
# create default .travis.yml
travis = Environment().from_string(self.galaxy.default_travis).render()
f = open(os.path.join(role_path, '.travis.yml'), 'w')
f.write(travis)
f.close()
for dir in GalaxyRole.ROLE_DIRS:
dir_path = os.path.join(init_path, role_name, dir)
main_yml_path = os.path.join(dir_path, 'main.yml')
# create the directory if it doesn't exist already
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# now create the main.yml file for that directory
if dir == "meta":
# create a skeleton meta/main.yml with a valid galaxy_info
# datastructure in place, plus with all of the available
# platforms included (but commented out), the galaxy_tags
# list, and the dependencies section
platforms = []
if not offline and self.api:
platforms = self.api.get_list("platforms") or []
# group the list of platforms from the api based
# on their names, with the release field being
# appended to a list of versions
platform_groups = defaultdict(list)
for platform in platforms:
platform_groups[platform['name']].append(platform['release'])
platform_groups[platform['name']].sort()
inject = dict(
author = 'your name',
company = 'your company (optional)',
license = 'license (GPLv2, CC-BY, etc)',
issue_tracker_url = 'http://example.com/issue/tracker',
min_ansible_version = '1.2',
platforms = platform_groups,
)
rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject)
f = open(main_yml_path, 'w')
f.write(rendered_meta)
f.close()
pass
elif dir == "tests":
# create tests/test.yml
inject = dict(
role_name = role_name
)
playbook = Environment().from_string(self.galaxy.default_test).render(inject)
f = open(os.path.join(dir_path, 'test.yml'), 'w')
f.write(playbook)
f.close()
# create tests/inventory
f = open(os.path.join(dir_path, 'inventory'), 'w')
f.write('localhost')
f.close()
elif dir not in ('files','templates'):
# just write a (mostly) empty YAML file for main.yml
f = open(main_yml_path, 'w')
f.write('---\n# %s file for %s\n' % (dir,role_name))
f.close()
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
Executes the info action. This action prints out detailed
information about an installed role as well as info available
from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.get_opt("roles_path")
data = ''
for role in self.args:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if self.api:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec= req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data += self._display_role_info(role_info)
if not data:
data += "\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
Executes the installation action. The args list contains the
roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github),
or it can be a local .tar.gz file.
"""
role_file = self.get_opt("role_file", None)
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file
# or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
elif len(self.args) == 1 and role_file is not None:
# using a role file is mutually exclusive of specifying
# the role name on the command line
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
no_deps = self.get_opt("no_deps", False)
force = self.get_opt('force', False)
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
role = RoleRequirement.role_yaml_parse(role)
display.vvv('found role %s in yaml file' % str(role))
if 'name' not in role and 'scm' not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
display.deprecated("going forward only the yaml format will be supported")
# roles listed in a file, one per line
for rline in f.readlines():
if rline.startswith("#") or rline.strip() == '':
continue
display.debug('found role %s in text file' % str(rline))
role = RoleRequirement.role_yaml_parse(rline.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
f.close()
except (IOError, OSError) as e:
display.error('Unable to open %s: %s' % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
roles_left.append(GalaxyRole(self.galaxy, rname.strip()))
for role in roles_left:
display.vvv('Installing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None and not force:
display.display('- %s is already installed, skipping.' % role.name)
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None or force:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % dep_role.name)
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
Executes the remove action. The args list contains the list
of roles to be removed. This list can contain more than one role.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
Executes the list action. The args list can contain zero
or one role. If one is specified, only that role will be
shown, otherwise all roles in the specified directory will
be shown.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
display.display("- %s, %s" % (name, version))
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.get_opt('roles_path')
roles_path = os.path.expanduser(roles_path)
if not os.path.exists(roles_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path)
elif not os.path.isdir(roles_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path)
path_files = os.listdir(roles_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (path_file, version))
return 0
def execute_search(self):
page_size = 1000
search = None
if len(self.args):
terms = []
for i in range(len(self.args)):
terms.append(self.args.pop())
search = '+'.join(terms[::-1])
if not search and not self.options.platforms and not self.options.tags and not self.options.author:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=self.options.platforms,
tags=self.options.tags, author=self.options.author, page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color="yellow")
return True
data = ''
if response['count'] > page_size:
data += ("\nFound %d roles matching your search. Showing first %s.\n" % (response['count'], page_size))
else:
data += ("\nFound %d roles matching your search:\n" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = " %%-%ds %%s\n" % name_len
data +='\n'
data += (format_str % ("Name", "Description"))
data += (format_str % ("----", "-----------"))
for role in response['results']:
data += (format_str % (role['username'] + '.' + role['name'],role['description']))
self.pager(data)
return True
def execute_login(self):
"""
Verify user's identify via Github and retreive an auth token from Galaxy.
"""
# Authenticate with github and retrieve a token
if self.options.token is None:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = self.options.token
galaxy_response = self.api.authenticate(github_token)
if self.options.token is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
"""
Import a role into Galaxy
"""
colors = {
'INFO': 'normal',
'WARNING': 'yellow',
'ERROR': 'red',
'SUCCESS': 'green',
'FAILED': 'red'
}
if len(self.args) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_repo = self.args.pop()
github_user = self.args.pop()
if self.options.check_status:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference)
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color='yellow')
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color='yellow')
display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo),
color='yellow')
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not self.options.wait:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo']))
if self.options.check_status or self.options.wait:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
"""
Setup an integration from Github or Travis
"""
if self.options.setup_list:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color="green")
display.display("---------- ---------- ----------", color="green")
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']),color="green")
return 0
if self.options.remove_id:
# Remove a secret
self.api.remove_secret(self.options.remove_id)
display.display("Secret removed. Integrations using this secret will not longer work.", color="green")
return 0
if len(self.args) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
return 0
secret = self.args.pop()
github_repo = self.args.pop()
github_user = self.args.pop()
source = self.args.pop()
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
"""
Delete a role from galaxy.ansible.com
"""
if len(self.args) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_repo = self.args.pop()
github_user = self.args.pop()
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name))
display.display(resp['status'])
return True
|
dermute/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 29,958
|
[
"Galaxy"
] |
20f606cee43a988afd5cecb76dc22eae7dcdda6feb5252c7926a314dd9fc3c8a
|
import warnings
warnings.simplefilter('always', DeprecationWarning)
import os
import functools
import os.path
import re
import urllib
import urllib2
import imp
import tabulate
from connection import H2OConnection
from job import H2OJob
from expr import ExprNode
from frame import H2OFrame, _py_tmp_key
from model import H2OBinomialModel,H2OAutoEncoderModel,H2OClusteringModel,H2OMultinomialModel,H2ORegressionModel
import h2o_model_builder
def lazy_import(path):
"""
Import a single file or collection of files.
Parameters
----------
path : str
A path to a data file (remote or local).
:return: A new H2OFrame
"""
return [_import(p)[0] for p in path] if isinstance(path,(list,tuple)) else _import(path)
def _import(path):
j = H2OConnection.get_json(url_suffix="ImportFiles", path=path)
if j['fails']: raise ValueError("ImportFiles of " + path + " failed on " + str(j['fails']))
return j['destination_frames']
def upload_file(path, destination_frame=""):
"""
Upload a dataset at the path given from the local machine to the H2O cluster.
Parameters
----------
path : str
A path specifying the location of the data to upload.
destination_frame : H2OFrame
The name of the H2O Frame in the H2O Cluster.
:return: A new H2OFrame
"""
fui = {"file": os.path.abspath(path)}
destination_frame = _py_tmp_key() if destination_frame == "" else destination_frame
H2OConnection.post_json(url_suffix="PostFile", file_upload_info=fui,destination_frame=destination_frame)
return H2OFrame(raw_id=destination_frame)
def import_file(path=None):
"""
Import a frame from a file (remote or local machine). If you run H2O on Hadoop, you can access to HDFS
Parameters
----------
path : str
A path specifying the location of the data to import.
:return: A new H2OFrame
"""
return H2OFrame(file_path=path)
def parse_setup(raw_frames):
"""
Parameters
----------
raw_frames : H2OFrame
A collection of imported file frames
:return: A ParseSetup "object"
"""
# The H2O backend only accepts things that are quoted
if isinstance(raw_frames, unicode): raw_frames = [raw_frames]
j = H2OConnection.post_json(url_suffix="ParseSetup", source_frames=[_quoted(id) for id in raw_frames])
return j
def parse(setup, h2o_name, first_line_is_header=(-1, 0, 1)):
"""
Trigger a parse; blocking; removeFrame just keep the Vecs.
Parameters
----------
setup : dict
The result of calling parse_setup.
h2o_name : H2OFrame
The name of the H2O Frame on the back end.
first_line_is_header : int
-1 means data, 0 means guess, 1 means header.
:return: A new parsed object
"""
# Parse parameters (None values provided by setup)
p = { 'destination_frame' : h2o_name,
'parse_type' : None,
'separator' : None,
'single_quotes' : None,
'check_header' : None,
'number_columns' : None,
'chunk_size' : None,
'delete_on_done' : True,
'blocking' : False,
}
if isinstance(first_line_is_header, tuple):
first_line_is_header = setup["check_header"]
if setup["column_names"]:
setup["column_names"] = [_quoted(name) for name in setup["column_names"]]
p["column_names"] = None
if setup["column_types"]:
setup["column_types"] = [_quoted(name) for name in setup["column_types"]]
p["column_types"] = None
if setup["na_strings"]:
setup["na_strings"] = [[_quoted(na) for na in col] if col is not None else [] for col in setup["na_strings"]]
p["na_strings"] = None
# update the parse parameters with the parse_setup values
p.update({k: v for k, v in setup.iteritems() if k in p})
p["check_header"] = first_line_is_header
# Extract only 'name' from each src in the array of srcs
p['source_frames'] = [_quoted(src['name']) for src in setup['source_frames']]
# Request blocking parse
j = H2OJob(H2OConnection.post_json(url_suffix="Parse", **p), "Parse").poll()
return j.jobs
def parse_raw(setup, id=None, first_line_is_header=(-1, 0, 1)):
"""
Used in conjunction with lazy_import and parse_setup in order to make alterations before
parsing.
Parameters
----------
setup : dict
Result of h2o.parse_setup
id : str
An optional id for the frame.
first_line_is_header : int
-1,0,1 if the first line is to be used as the header
:return: An H2OFrame object
"""
id = setup["destination_frame"]
fr = H2OFrame()
parsed = parse(setup, id, first_line_is_header)
fr._computed = True
fr._id = id
fr._keep = True
fr._nrows = int(H2OFrame(expr=ExprNode("nrow", fr))._scalar()) #parsed['rows']
fr._ncols = parsed["number_columns"]
fr._col_names = parsed['column_names'] if parsed["column_names"] else ["C" + str(x) for x in range(1,fr._ncols+1)]
return fr
def _quoted(key):
if key == None: return "\"\""
is_quoted = len(re.findall(r'\"(.+?)\"', key)) != 0
key = key if is_quoted else "\"" + key + "\""
return key
def assign(data,id):
if data._computed:
rapids(data._id,id)
data._id = id
data._keep=True # named things are always safe
return data
def which(condition):
"""
Parameters
----------
condition : H2OFrame
A conditional statement.
:return: A H2OFrame of 1 column filled with 0-based indices for which the condition is True
"""
return H2OFrame(expr=ExprNode("h2o.which",condition))._frame()
def ifelse(test,yes,no):
"""
Semantically equivalent to R's ifelse.
Based on the booleans in the test vector, the output has the values of the yes and no
vectors interleaved (or merged together).
Parameters
----------
test : H2OFrame
A "test" H2OFrame
yes : H2OFrame
A "yes" H2OFrame
no : H2OFrame
A "no" H2OFrame
:return: An H2OFrame
"""
return H2OFrame(expr=ExprNode("ifelse",test,yes,no))._frame()
def get_future_model(future_model):
"""
Waits for the future model to finish building, and then returns the model.
Parameters
----------
future_model : H2OModelFuture
an H2OModelFuture object
:return: a resolved model (i.e. an H2OBinomialModel, H2ORegressionModel, H2OMultinomialModel, ...)
"""
return h2o_model_builder._resolve_model(future_model)
def get_model(model_id):
"""
Return the specified model
Parameters
----------
model_id : str
The model identification in h2o
:return: H2OModel
"""
model_json = H2OConnection.get_json("Models/"+model_id)["models"][0]
model_type = model_json["output"]["model_category"]
if model_type=="Binomial": return H2OBinomialModel(model_id, model_json)
elif model_type=="Clustering": return H2OClusteringModel(model_id, model_json)
elif model_type=="Regression": return H2ORegressionModel(model_id, model_json)
elif model_type=="Multinomial": return H2OMultinomialModel(model_id, model_json)
elif model_type=="AutoEncoder": return H2OAutoEncoderModel(model_id, model_json)
else: raise NotImplementedError(model_type)
def get_frame(frame_id):
"""
Obtain a handle to the frame in H2O with the frame_id key.
:return: An H2OFrame
"""
return H2OFrame.get_frame(frame_id)
def ou():
"""
Where is my baguette!?
:return: the name of the baguette. oh uhr uhr huhr
"""
from inspect import stack
return stack()[2][1]
def no_progress():
"""
Disable the progress bar from flushing to stdout. The completed progress bar is printed
when a job is complete so as to demarcate a log file.
:return: None
"""
H2OJob.__PROGRESS_BAR__ = False
def show_progress():
"""
Enable the progress bar. (Progress bar is enabled by default).
:return: None
"""
H2OJob.__PROGRESS_BAR__ = True
def log_and_echo(message):
"""
Log a message on the server-side logs
This is helpful when running several pieces of work one after the other on a single H2O
cluster and you want to make a notation in the H2O server side log where one piece of
work ends and the next piece of work begins.
Sends a message to H2O for logging. Generally used for debugging purposes.
Parameters
----------
message : str
A character string with the message to write to the log.
:return: None
"""
if message is None: message = ""
H2OConnection.post_json("LogAndEcho", message=message)
def remove(object):
"""
Remove object from H2O. This is a "hard" delete of the object. It removes all subparts.
Parameters
----------
object : H2OFrame or str
The object pointing to the object to be removed.
:return: None
"""
if object is None:
raise ValueError("remove with no object is not supported, for your protection")
if isinstance(object, H2OFrame): H2OConnection.delete("DKV/"+object._id)
if isinstance(object, str): H2OConnection.delete("DKV/"+object)
def remove_all():
"""
Remove all objects from H2O.
:return None
"""
H2OConnection.delete("DKV")
def removeFrameShallow(key):
"""
Do a shallow DKV remove of the frame (does not remove any internal Vecs).
This is a "soft" delete. Just removes the top level pointer, but all big data remains!
Parameters
----------
key : str
A Frame Key to be removed
:return: None
"""
rapids("(removeframe '"+key+"')")
return None
def rapids(expr, id=None):
"""
Fire off a Rapids expression.
Parameters
----------
expr : str
The rapids expression (ascii string).
:return: The JSON response of the Rapids execution
"""
return H2OConnection.post_json("Rapids", ast=urllib.quote(expr), _rest_version=99) if id is None else H2OConnection.post_json("Rapids", ast=urllib.quote(expr), id=id, _rest_version=99)
def ls():
"""
List Keys on an H2O Cluster
:return: Returns a list of keys in the current H2O instance
"""
return H2OFrame(expr=ExprNode("ls")).as_data_frame()
def frame(frame_id, exclude=""):
"""
Retrieve metadata for a id that points to a Frame.
Parameters
----------
frame_id : str
A pointer to a Frame in H2O.
:return: Meta information on the frame
"""
return H2OConnection.get_json("Frames/" + urllib.quote(frame_id+exclude))
def frames():
"""
Retrieve all the Frames.
:return: Meta information on the frames
"""
return H2OConnection.get_json("Frames")
def download_pojo(model,path="", get_jar=True):
"""
Download the POJO for this model to the directory specified by path (no trailing slash!).
If path is "", then dump to screen.
Parameters
----------
model : H2OModel
Retrieve this model's scoring POJO.
path : str
An absolute path to the directory where POJO should be saved.
get_jar : bool
Retreive the h2o genmodel jar also.
:return: None
"""
java = H2OConnection.get( "Models.java/"+model._id )
file_path = path + "/" + model._id + ".java"
if path == "": print java.text
else:
with open(file_path, 'w') as f:
f.write(java.text)
if get_jar and path!="":
url = H2OConnection.make_url("h2o-genmodel.jar")
filename = path + "/" + "h2o-genmodel.jar"
response = urllib2.urlopen(url)
with open(filename, "wb") as f:
f.write(response.read())
def download_csv(data, filename):
"""
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough hard drive space to accommodate the entire file.
Parameters
----------
data : H2OFrame
An H2OFrame object to be downloaded.
filename : str
A string indicating the name that the CSV file should be should be saved to.
:return: None
"""
data._eager()
if not isinstance(data, H2OFrame): raise(ValueError, "`data` argument must be an H2OFrame, but got " + type(data))
url = "http://{}:{}/3/DownloadDataset?frame_id={}".format(H2OConnection.ip(),H2OConnection.port(),data._id)
with open(filename, 'w') as f: f.write(urllib2.urlopen(url).read())
def download_all_logs(dirname=".",filename=None):
"""
Download H2O Log Files to Disk
Parameters
----------
dirname : str
(Optional) A character string indicating the directory that the log file should be saved in.
filename : str
(Optional) A string indicating the name that the CSV file should be
:return: path of logs written (as a string)
"""
url = 'http://{}:{}/Logs/download'.format(H2OConnection.ip(),H2OConnection.port())
response = urllib2.urlopen(url)
if not os.path.exists(dirname): os.mkdir(dirname)
if filename == None:
for h in response.headers.headers:
if 'filename=' in h:
filename = h.split("filename=")[1].strip()
break
path = os.path.join(dirname,filename)
print "Writing H2O logs to " + path
with open(path, 'w') as f: f.write(urllib2.urlopen(url).read())
return path
def save_model(model, path="", force=False):
"""
Save an H2O Model Object to Disk.
Parameters
----------
model : H2OModel
The model object to save.
path : str
A path to save the model at (hdfs, s3, local)
force : bool
Overwrite destination directory in case it exists or throw exception if set to false.
:return: the path of the saved model (string)
"""
path=os.path.join(os.getcwd() if path=="" else path,model._id)
return H2OConnection.get_json("Models.bin/"+model._id,dir=path,force=force,_rest_version=99)["dir"]
def load_model(path):
"""
Load a saved H2O model from disk.
Example:
>>> path = h2o.save_model(my_model,dir=my_path)
>>> h2o.load_model(path) # use the result of save_model
Parameters
----------
path : str
The full path of the H2O Model to be imported.
:return: the model
"""
res = H2OConnection.post_json("Models.bin/",dir=path,_rest_version=99)
return get_model(res['models'][0]['model_id']['name'])
def cluster_status():
"""
TODO: This isn't really a cluster status... it's a node status check for the node we're connected to.
This is possibly confusing because this can come back without warning,
but if a user tries to do any remoteSend, they will get a "cloud sick warning"
Retrieve information on the status of the cluster running H2O.
:return: None
"""
cluster_json = H2OConnection.get_json("Cloud?skip_ticks=true")
print "Version: {0}".format(cluster_json['version'])
print "Cloud name: {0}".format(cluster_json['cloud_name'])
print "Cloud size: {0}".format(cluster_json['cloud_size'])
if cluster_json['locked']: print "Cloud is locked\n"
else: print "Accepting new members\n"
if cluster_json['nodes'] == None or len(cluster_json['nodes']) == 0:
print "No nodes found"
return
status = []
for node in cluster_json['nodes']:
for k, v in zip(node.keys(),node.values()):
if k in ["h2o", "healthy", "last_ping", "num_cpus", "sys_load", "mem_value_size", "total_value_size",
"free_mem", "tot_mem", "max_mem", "free_disk", "max_disk", "pid", "num_keys", "tcps_active",
"open_fds", "rpcs_active"]: status.append(k+": {0}".format(v))
print ', '.join(status)
print
def init(ip="localhost", port=54321, size=1, start_h2o=False, enable_assertions=False,
license=None, max_mem_size_GB=None, min_mem_size_GB=None, ice_root=None, strict_version_check=False):
"""
Initiate an H2O connection to the specified ip and port.
Parameters
----------
ip : str
A string representing the hostname or IP address of the server where H2O is running.
port : int
A port, default is 54321
size : int
THe expected number of h2o instances (ignored if start_h2o is True)
start_h2o : bool
A boolean dictating whether this module should start the H2O jvm. An attempt is made anyways if _connect fails.
enable_assertions : bool
If start_h2o, pass `-ea` as a VM option.s
license : str
If not None, is a path to a license file.
max_mem_size_GB : int
Maximum heap size (jvm option Xmx) in gigabytes.
min_mem_size_GB : int
Minimum heap size (jvm option Xms) in gigabytes.
ice_root : str
A temporary directory (default location is determined by tempfile.mkdtemp()) to hold H2O log files.
:return: None
"""
H2OConnection(ip=ip, port=port,start_h2o=start_h2o,enable_assertions=enable_assertions,license=license,max_mem_size_GB=max_mem_size_GB,min_mem_size_GB=min_mem_size_GB,ice_root=ice_root,strict_version_check=strict_version_check)
return None
def export_file(frame,path,force=False):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to. To view the current session, call h2o.cluster_info().
Parameters
----------
frame : H2OFrame
The Frame to save to disk.
path : str
The path to the save point on disk.
force : bool
Overwrite any preexisting file with the same path
:return: None
"""
frame._eager()
H2OJob(H2OConnection.get_json("Frames/"+frame._id+"/export/"+path+"/overwrite/"+("true" if force else "false")), "Export File").poll()
def cluster_info():
"""
Display the current H2O cluster information.
:return: None
"""
H2OConnection._cluster_info()
def shutdown(conn=None, prompt=True):
"""
Shut down the specified instance. All data will be lost.
This method checks if H2O is running at the specified IP address and port, and if it is, shuts down that H2O instance.
Parameters
----------
conn : H2OConnection
An H2OConnection object containing the IP address and port of the server running H2O.
prompt : bool
A logical value indicating whether to prompt the user before shutting down the H2O server.
:return: None
"""
if conn == None: conn = H2OConnection.current_connection()
H2OConnection._shutdown(conn=conn, prompt=prompt)
def deeplearning(x,y=None,validation_x=None,validation_y=None,training_frame=None,model_id=None,
overwrite_with_best_model=None,validation_frame=None,checkpoint=None,autoencoder=None,
use_all_factor_levels=None,activation=None,hidden=None,epochs=None,train_samples_per_iteration=None,
seed=None,adaptive_rate=None,rho=None,epsilon=None,rate=None,rate_annealing=None,rate_decay=None,
momentum_start=None,momentum_ramp=None,momentum_stable=None,nesterov_accelerated_gradient=None,
input_dropout_ratio=None,hidden_dropout_ratios=None,l1=None,l2=None,max_w2=None,initial_weight_distribution=None,
initial_weight_scale=None,loss=None,distribution=None,tweedie_power=None,score_interval=None,score_training_samples=None,
score_validation_samples=None,score_duty_cycle=None,classification_stop=None,regression_stop=None,quiet_mode=None,
max_confusion_matrix_size=None,max_hit_ratio_k=None,balance_classes=None,class_sampling_factors=None,
max_after_balance_size=None,score_validation_sampling=None,diagnostics=None,variable_importances=None,
fast_mode=None,ignore_const_cols=None,force_load_balance=None,replicate_training_data=None,single_node_mode=None,
shuffle_training_data=None,sparse=None,col_major=None,average_activation=None,sparsity_beta=None,
max_categorical_features=None,reproducible=None,export_weights_and_biases=None,offset_column=None,weights_column=None,
nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None):
"""
Build a supervised Deep Learning model
Performs Deep Learning neural networks on an H2OFrame
Parameters
----------
x : H2OFrame
An H2OFrame containing the predictors in the model.
y : H2OFrame
An H2OFrame of the response variable in the model.
training_frame : H2OFrame
(Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
model_id : str
(Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
overwrite_with_best_model : bool
Logical. If True, overwrite the final model with the best model found during training. Defaults to True.
validation_frame : H2OFrame
(Optional) An H2OFrame object indicating the validation dataset used to construct the confusion matrix. If left blank, this defaults to the
training data when nfolds = 0
checkpoint : H2ODeepLearningModel
"Model checkpoint (either key or H2ODeepLearningModel) to resume training with."
autoencoder : bool
Enable auto-encoder for model building.
use_all_factor_levels : bool
Logical. Use all factor levels of categorical variance. Otherwise the first factor level is omitted (without loss of accuracy). Useful for variable
importances and auto-enabled for autoencoder.
activation : str
A string indicating the activation function to use. Must be either "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout", "Maxout", or "MaxoutWithDropout"
hidden : list
Hidden layer sizes (e.g. c(100,100))
epochs : float
How many times the dataset should be iterated (streamed), can be fractional
train_samples_per_iteration : int
Number of training samples (globally) per MapReduce iteration. Special values are: 0 one epoch; -1 all available data (e.g., replicated training data);
or -2 auto-tuning (default)
seed : int
Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded
adaptive_rate : bool
Logical. Adaptive learning rate (ADAELTA)
rho : float
Adaptive learning rate time decay factor (similarity to prior updates)
epsilon : float
Adaptive learning rate parameter, similar to learn rate annealing during initial training phase. Typical values are between 1.0e-10 and 1.0e-4
rate : float
Learning rate (higher => less stable, lower => slower convergence)
rate_annealing : float
Learning rate annealing: \eqn{(rate)/(1 + rate_annealing*samples)
rate_decay : float
Learning rate decay factor between layers (N-th layer: \eqn{rate*\alpha^(N-1))
momentum_start : float
Initial momentum at the beginning of training (try 0.5)
momentum_ramp : float
Number of training samples for which momentum increases
momentum_stable : float
Final momentum after the amp is over (try 0.99)
nesterov_accelerated_gradient : bool
Logical. Use Nesterov accelerated gradient (recommended)
input_dropout_ratio : float
A fraction of the features for each training row to be omitted from training in order to improve generalization (dimension sampling).
hidden_dropout_ratios : float
Input layer dropout ratio (can improve generalization) specify one value per hidden layer, defaults to 0.5
l1 : float
L1 regularization (can add stability and improve generalization, causes many weights to become 0)
l2 : float
L2 regularization (can add stability and improve generalization, causes many weights to be small)
max_w2 : float
Constraint for squared sum of incoming weights per unit (e.g. Rectifier)
initial_weight_distribution : str
Can be "Uniform", "UniformAdaptive", or "Normal"
initial_weight_scale : str
Uniform: -value ... value, Normal: stddev
loss : str
Loss function: "Automatic", "CrossEntropy" (for classification only), "MeanSquare", "Absolute" (experimental) or "Huber" (experimental)
distribution : str
A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie", "laplace",
"huber" or "gaussian"
tweedie_power : float
Tweedie power (only for Tweedie distribution, must be between 1 and 2)
score_interval : int
Shortest time interval (in secs) between model scoring
score_training_samples : int
Number of training set samples for scoring (0 for all)
score_validation_samples : int
Number of validation set samples for scoring (0 for all)
score_duty_cycle : float
Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring)
classification_stop : float
Stopping criterion for classification error fraction on training data (-1 to disable)
regression_stop : float
Stopping criterion for regression error (MSE) on training data (-1 to disable)
quiet_mode : bool
Enable quiet mode for less output to standard output
max_confusion_matrix_size : int
Max. size (number of classes) for confusion matrices to be shown
max_hit_ratio_k : float
Max number (top K) of predictions to use for hit ratio computation(for multi-class only, 0 to disable)
balance_classes : bool
Balance training data class counts via over/under-sampling (for imbalanced data)
class_sampling_factors : list
Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to
obtain class balance during training. Requires balance_classes.
max_after_balance_size : float
Maximum relative size of the training data after balancing class counts (can be less than 1.0)
score_validation_sampling :
Method used to sample validation dataset for scoring
diagnostics : bool
Enable diagnostics for hidden layers
variable_importances : bool
Compute variable importances for input features (Gedeon method) - can be slow for large networks)
fast_mode : bool
Enable fast mode (minor approximations in back-propagation)
ignore_const_cols : bool
Ignore constant columns (no information can be gained anyway)
force_load_balance : bool
Force extra load balancing to increase training speed for small datasets (to keep all cores busy)
replicate_training_data : bool
Replicate the entire training dataset onto every node for faster training
single_node_mode : bool
Run on a single node for fine-tuning of model parameters
shuffle_training_data : bool
Enable shuffling of training data (recommended if training data is replicated and train_samples_per_iteration is close to \eqn{numRows*numNodes
sparse : bool
Sparse data handling (Experimental)
col_major : bool
Use a column major weight matrix for input layer. Can speed up forward propagation, but might slow down backpropagation (Experimental)
average_activation : float
Average activation for sparse auto-encoder (Experimental)
sparsity_beta : bool
Sparsity regularization (Experimental)
max_categorical_features : int
Max. number of categorical features, enforced via hashing Experimental)
reproducible : bool
Force reproducibility on small data (will be slow - only uses 1 thread)
export_weights_and_biases : bool
Whether to export Neural Network weights and biases to H2O Frames"
offset_column : H2OFrame
Specify the offset column.
weights_column : H2OFrame
Specify the weights column.
nfolds : int
(Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
fold_column : H2OFrame
(Optional) Column with cross-validation fold index assignment per observation
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
keep_cross_validation_predictions : bool
Whether to keep the predictions of the cross-validation models
:return: Return a new classifier or regression model.
"""
parms = {k:v for k,v in locals().items() if k in ["y","training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="deeplearning"
return h2o_model_builder.supervised(parms)
def autoencoder(x,training_frame=None,model_id=None,overwrite_with_best_model=None,checkpoint=None,
use_all_factor_levels=None,activation=None,hidden=None,epochs=None,train_samples_per_iteration=None,
seed=None,adaptive_rate=None,rho=None,epsilon=None,rate=None,rate_annealing=None,rate_decay=None,
momentum_start=None,momentum_ramp=None,momentum_stable=None,nesterov_accelerated_gradient=None,
input_dropout_ratio=None,hidden_dropout_ratios=None,l1=None,l2=None,max_w2=None,initial_weight_distribution=None,
initial_weight_scale=None,loss=None,distribution=None,tweedie_power=None,score_interval=None,score_training_samples=None,
score_duty_cycle=None,classification_stop=None,regression_stop=None,quiet_mode=None,
max_confusion_matrix_size=None,max_hit_ratio_k=None,balance_classes=None,class_sampling_factors=None,
max_after_balance_size=None,diagnostics=None,variable_importances=None,
fast_mode=None,ignore_const_cols=None,force_load_balance=None,replicate_training_data=None,single_node_mode=None,
shuffle_training_data=None,sparse=None,col_major=None,average_activation=None,sparsity_beta=None,
max_categorical_features=None,reproducible=None,export_weights_and_biases=None):
"""
Build unsupervised auto encoder using H2O Deeplearning
Parameters
----------
x : H2OFrame
An H2OFrame containing the predictors in the model.
training_frame : H2OFrame
(Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
model_id : str
(Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
overwrite_with_best_model : bool
Logical. If True, overwrite the final model with the best model found during training. Defaults to True.
checkpoint : H2ODeepLearningModel
"Model checkpoint (either key or H2ODeepLearningModel) to resume training with."
use_all_factor_levels : bool
Logical. Use all factor levels of categorical variance. Otherwise the first factor level is omitted (without loss of accuracy).
Useful for variable importances and auto-enabled for autoencoder.
activation : str
A string indicating the activation function to use. Must be either "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout", "Maxout", or "MaxoutWithDropout"
hidden : list
Hidden layer sizes (e.g. c(100,100))
epochs : float
How many times the dataset should be iterated (streamed), can be fractional
train_samples_per_iteration : int
Number of training samples (globally) per MapReduce iteration. Special values are: 0 one epoch; -1 all available data
(e.g., replicated training data); or -2 auto-tuning (default)
seed : int
Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded
adaptive_rate : bool
Logical. Adaptive learning rate (ADAELTA)
rho : float
Adaptive learning rate time decay factor (similarity to prior updates)
epsilon : float
Adaptive learning rate parameter, similar to learn rate annealing during initial training phase. Typical values are between 1.0e-10 and 1.0e-4
rate : float
Learning rate (higher => less stable, lower => slower convergence)
rate_annealing : float
Learning rate annealing: \eqn{(rate)/(1 + rate_annealing*samples)
rate_decay : float
Learning rate decay factor between layers (N-th layer: \eqn{rate*\alpha^(N-1))
momentum_start : float
Initial momentum at the beginning of training (try 0.5)
momentum_ramp : int
Number of training samples for which momentum increases
momentum_stable : float
Final momentum after the amp is over (try 0.99)
nesterov_accelerated_gradient : bool
Logical. Use Nesterov accelerated gradient (recommended)
input_dropout_ratio : float
A fraction of the features for each training row to be omitted from training in order to improve generalization (dimension sampling).
hidden_dropout_ratios : float
Input layer dropout ratio (can improve generalization) specify one value per hidden layer, defaults to 0.5
l1 : float
L1 regularization (can add stability and improve generalization, causes many weights to become 0)
l2: float
L2 regularization (can add stability and improve generalization, causes many weights to be small)
max_w2 : float
Constraint for squared sum of incoming weights per unit (e.g. Rectifier)
initial_weight_distribution : str
Can be "Uniform", "UniformAdaptive", or "Normal"
initial_weight_scale : str
Uniform: -value ... value, Normal: stddev
loss : str
Loss function: "Automatic", "CrossEntropy" (for classification only), "MeanSquare", "Absolute" (experimental) or "Huber" (experimental)
distribution : str
A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma",
"tweedie", "laplace", "huber" or "gaussian"
tweedie_power : float
Tweedie power (only for Tweedie distribution, must be between 1 and 2)
score_interval : int
Shortest time interval (in secs) between model scoring
score_training_samples : int
Number of training set samples for scoring (0 for all)
score_duty_cycle : float
Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring)
classification_stop : float
Stopping criterion for classification error fraction on training data (-1 to disable)
regression_stop : float
Stopping criterion for regression error (MSE) on training data (-1 to disable)
quiet_mode : bool
Enable quiet mode for less output to standard output
max_confusion_matrix_size : int
Max. size (number of classes) for confusion matrices to be shown
max_hit_ratio_k : float
Max number (top K) of predictions to use for hit ratio computation(for multi-class only, 0 to disable)
balance_classes : bool
Balance training data class counts via over/under-sampling (for imbalanced data)
class_sampling_factors : list
Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to obtain
class balance during training. Requires balance_classes.
max_after_balance_size : float
Maximum relative size of the training data after balancing class counts (can be less than 1.0)
diagnostics : bool
Enable diagnostics for hidden layers
variable_importances : bool
Compute variable importances for input features (Gedeon method) - can be slow for large networks)
fast_mode : bool
Enable fast mode (minor approximations in back-propagation)
ignore_const_cols : bool
Ignore constant columns (no information can be gained anyway)
force_load_balance : bool
Force extra load balancing to increase training speed for small datasets (to keep all cores busy)
replicate_training_data : bool
Replicate the entire training dataset onto every node for faster training
single_node_mode : bool
Run on a single node for fine-tuning of model parameters
shuffle_training_data : bool
Enable shuffling of training data (recommended if training data is replicated and train_samples_per_iteration is close to \eqn{numRows*numNodes
sparse : bool
Sparse data handling (Experimental)
col_major : bool
Use a column major weight matrix for input layer. Can speed up forward propagation, but might slow down backpropagation (Experimental)
average_activation : float
Average activation for sparse auto-encoder (Experimental)
sparsity_beta : float
Sparsity regularization (Experimental)
max_categorical_features : int
Max. number of categorical features, enforced via hashing Experimental)
reproducible : bool
Force reproducibility on small data (will be slow - only uses 1 thread)
export_weights_and_biases : bool
Whether to export Neural Network weights and biases to H2O Frames"
:return: H2OAutoEncoderModel
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="deeplearning"
parms["autoencoder"]=True
return h2o_model_builder.unsupervised(parms)
def gbm(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None,
distribution=None,tweedie_power=None,ntrees=None,max_depth=None,min_rows=None,
learn_rate=None,nbins=None,nbins_cats=None,validation_frame=None,
balance_classes=None,max_after_balance_size=None,seed=None,build_tree_one_node=None,
nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None,
score_each_iteration=None,offset_column=None,weights_column=None,do_future=None,checkpoint=None):
"""
Builds gradient boosted classification trees, and gradient boosted regression trees on a parsed data set.
The default distribution function will guess the model type based on the response column typerun properly the
response column must be an numeric for "gaussian" or an enum for "bernoulli" or "multinomial".
Parameters
----------
x : H2OFrame
An H2OFrame containing the predictors in the model.
y : H2OFrame
An H2OFrame of the response variable in the model.
training_frame : H2OFrame
(Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
model_id : str
(Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
distribution : str
A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie" or "gaussian"
tweedie_power : float
Tweedie power (only for Tweedie distribution, must be between 1 and 2)
ntrees : int
A non-negative integer that determines the number of trees to grow.
max_depth : int
Maximum depth to grow the tree.
min_rows : int
Minimum number of rows to assign to terminal nodes.
learn_rate : float
An integer from 0.0 to 1.0
nbins : int
For numerical columns (real/int), build a histogram of this many bins, then split at the best point
nbins_cats : int
For categorical columns (enum), build a histogram of this many bins, then split at the best point. Higher values can lead to more overfitting.
validation_frame : H2OFrame
An H2OFrame object indicating the validation dataset used to contruct the confusion matrix. If left blank, this defaults to the training data when nfolds = 0
balance_classes : bool
logical, indicates whether or not to balance training data class counts via over/under-sampling (for imbalanced data)
max_after_balance_size : float
Maximum relative size of the training data after balancing class counts (can be less than 1.0)
seed : int
Seed for random numbers (affects sampling when balance_classes=T)
build_tree_one_node : bool
Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.
nfolds : int
(Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
fold_column : H2OFrame
(Optional) Column with cross-validation fold index assignment per observation
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
keep_cross_validation_predictions : bool
Whether to keep the predictions of the cross-validation models
score_each_iteration : bool
Attempts to score each tree.
offset_column : H2OFrame
Specify the offset column.
weights_column : H2OFrame
Specify the weights column.
:return: A new classifier or regression model.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="gbm"
return h2o_model_builder.supervised(parms)
def glm(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None,validation_frame=None,
max_iterations=None,beta_epsilon=None,solver=None,standardize=None,family=None,link=None,
tweedie_variance_power=None,tweedie_link_power=None,alpha=None,prior=None,lambda_search=None,
nlambdas=None,lambda_min_ratio=None,beta_constraints=None,offset_column=None,weights_column=None,
nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None,
intercept=None, Lambda=None, max_active_predictors=None, do_future=None, checkpoint=None):
"""
Build a Generalized Linear Model
Fit a generalized linear model, specified by a response variable, a set of predictors, and a description of the error
distribution.
Parameters
----------
x : H2OFrame
An H2OFrame containing the predictors in the model.
y : H2OFrame
An H2OFrame of the response variable in the model.
training_frame : H2OFrame
(Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
model_id : str
(Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
validation_frame : H2OFrame
An H2OFrame object containing the variables in the model.
max_iterations : int
A non-negative integer specifying the maximum number of iterations.
beta_epsilon : int
A non-negative number specifying the magnitude of the maximum difference between the coefficient estimates from successive iterations.
Defines the convergence criterion for h2o.glm.
solver : str
A character string specifying the solver used: IRLSM (supports more features), L_BFGS (scales better for datasets with many columns)
standardize : bool
A logical value indicating whether the numeric predictors should be standardized to have a mean of 0 and a variance of 1 prior to training the models.
family : str
A character string specifying the distribution of the model: gaussian, binomial, poisson, gamma, tweedie.
link : str
A character string specifying the link function. The default is the canonical link for the family.
The supported links for each of the family specifications are:\n
"gaussian": "identity", "log", "inverse"\n
"binomial": "logit", "log"
"poisson": "log", "identity"
"gamma": "inverse", "log", "identity"
"tweedie": "tweedie"
tweedie_variance_power : int
numeric specifying the power for the variance function when family = "tweedie".
tweedie_link_power : int
A numeric specifying the power for the link function when family = "tweedie".
alpha : float
A numeric in [0, 1] specifying the elastic-net mixing parameter.
The elastic-net penalty is defined to be:
eqn{P(\alpha,\beta) = (1-\alpha)/2||\beta||_2^2 + \alpha||\beta||_1 = \sum_j [(1-\alpha)/2 \beta_j^2 + \alpha|\beta_j|],
making alpha = 1 the lasso penalty and alpha = 0 the ridge penalty.
Lambda : float
A non-negative shrinkage parameter for the elastic-net, which multiplies \eqn{P(\alpha,\beta) in the objective function.
When Lambda = 0, no elastic-net penalty is applied and ordinary generalized linear models are fit.
prior : float
(Optional) A numeric specifying the prior probability of class 1 in the response when family = "binomial". The default prior is the observational frequency of class 1.
lambda_search : bool
A logical value indicating whether to conduct a search over the space of lambda values starting from the lambda max, given lambda is interpreted as lambda minself.
nlambdas : int
The number of lambda values to use when lambda_search = TRUE.
lambda_min_ratio : float
Smallest value for lambda as a fraction of lambda.max. By default if the number of observations is greater than the the number of
variables then lambda_min_ratio = 0.0001; if the number of observations is less than the number of variables then lambda_min_ratio = 0.01.
beta_constraints : H2OFrame
A data.frame or H2OParsedData object with the columns ["names", "lower_bounds", "upper_bounds", "beta_given"], where each row corresponds to a predictor
in the GLM. "names" contains the predictor names, "lower"/"upper_bounds", are the lower and upper bounds of beta, and "beta_given" is some supplied starting
values for the
offset_column : H2OFrame
Specify the offset column.
weights_column : H2OFrame
Specify the weights column.
nfolds : int
(Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
fold_column : H2OFrame
(Optional) Column with cross-validation fold index assignment per observation
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
keep_cross_validation_predictions : bool
Whether to keep the predictions of the cross-validation models
intercept : bool
Logical, include constant term (intercept) in the model
max_active_predictors : int
(Optional) Convergence criteria for number of predictors when using L1 penalty.
Returns: A subclass of ModelBase is returned. The specific subclass depends on the machine learning task at hand (if
it's binomial classification, then an H2OBinomialModel is returned, if it's regression then a H2ORegressionModel is
returned). The default print-out of the models is shown, but further GLM-specifc information can be queried out of
the object.
Upon completion of the GLM, the resulting object has coefficients, normalized coefficients, residual/null deviance,
aic, and a host of model metrics including MSE, AUC (for logistic regression), degrees of freedom, and confusion
matrices.
"""
parms = {k.lower():v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
if "alpha" in parms and not isinstance(parms["alpha"], (list,tuple)): parms["alpha"] = [parms["alpha"]]
parms["algo"]="glm"
return h2o_model_builder.supervised(parms)
def start_glm_job(x,y,validation_x=None,validation_y=None,**kwargs):
"""
Build a Generalized Linear Model
Note: this function is the same as glm(), but it doesn't block on model-build. Instead, it returns and H2OModelFuture
object immediately. The model can be retrieved from the H2OModelFuture object with get_future_model().
:return: H2OModelFuture
"""
kwargs["do_future"] = True
return glm(x,y,validation_x,validation_y,**kwargs)
def kmeans(x,validation_x=None,k=None,model_id=None,max_iterations=None,standardize=None,init=None,seed=None,
nfolds=None,fold_column=None,fold_assignment=None,training_frame=None,validation_frame=None,
user_points=None,ignored_columns=None,score_each_iteration=None,keep_cross_validation_predictions=None,
ignore_const_cols=None,checkpoint=None):
"""
Performs k-means clustering on an H2O dataset.
Parameters
----------
x : H2OFrame
The data columns on which k-means operates.\n
k : int
The number of clusters. Must be between 1 and 1e7 inclusive. k may be omitted if the user specifies the
initial centers in the init parameter. If k is not omitted, in this case, then it should be equal to the number of
user-specified centers.
model_id : str
(Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
max_iterations : int
The maximum number of iterations allowed. Must be between 0 and 1e6 inclusive.
standardize : bool
Indicates whether the data should be standardized before running k-means.
init : str
A character string that selects the initial set of k cluster centers. Possible values are "Random": for
random initialization, "PlusPlus": for k-means plus initialization, or "Furthest": for initialization at the furthest
point from each successive center. Additionally, the user may specify a the initial centers as a matrix, data.frame,
H2OFrame, or list of vectors. For matrices, data.frames, and H2OFrames, each row of the respective structure is an
initial center. For lists of vectors, each vector is an initial center.
seed : int
(Optional) Random seed used to initialize the cluster centroids.
nfolds : int
(Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
fold_column : H2OFrame
(Optional) Column with cross-validation fold index assignment per observation
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:return: An instance of H2OClusteringModel.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="kmeans"
return h2o_model_builder.unsupervised(parms)
def random_forest(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None,mtries=None,sample_rate=None,
build_tree_one_node=None,ntrees=None,max_depth=None,min_rows=None,nbins=None,nbins_cats=None,
binomial_double_trees=None,validation_frame=None,balance_classes=None,max_after_balance_size=None,
seed=None,offset_column=None,weights_column=None,nfolds=None,fold_column=None,fold_assignment=None,
keep_cross_validation_predictions=None,checkpoint=None):
"""
Build a Big Data Random Forest Model
Builds a Random Forest Model on an H2OFrame
Parameters
----------
x : H2OFrame
An H2OFrame containing the predictors in the model.
y : H2OFrame
An H2OFrame of the response variable in the model.
training_frame : H2OFrame
(Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
model_id : str
(Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
mtries : int
Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p} for classification, and p/3 for regression,
where p is the number of predictors.
sample_rate : float
Sample rate, from 0 to 1.0.
build_tree_one_node : bool
Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.
ntrees : int
A nonnegative integer that determines the number of trees to grow.
max_depth : int
Maximum depth to grow the tree.
min_rows : int
Minimum number of rows to assign to teminal nodes.
nbins : int
For numerical columns (real/int), build a histogram of this many bins, then split at the best point.
nbins_cats : int
For categorical columns (enum), build a histogram of this many bins, then split at the best point. Higher values can lead to more overfitting.
binomial_double_trees : bool
or binary classification: Build 2x as many trees (one per class) - can lead to higher accuracy.
validation_frame : H2OFrame
An H2OFrame object containing the variables in the model.
balance_classes : bool
logical, indicates whether or not to balance training data class counts via over/under-sampling (for imbalanced data)
max_after_balance_size : float
Maximum relative size of the training data after balancing class counts (can be less than 1.0)
seed : int
Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded
offset_column : H2OFrame
Specify the offset column.
weights_column : H2OFrame
Specify the weights column.
nfolds : int
(Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
fold_column : H2OFrame
(Optional) Column with cross-validation fold index assignment per observation
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
keep_cross_validation_predictions : bool
Whether to keep the predictions of the cross-validation models
:return: A new classifier or regression model.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="drf"
return h2o_model_builder.supervised(parms)
def prcomp(x,validation_x=None,k=None,model_id=None,max_iterations=None,transform=None,seed=None,use_all_factor_levels=None,
training_frame=None,validation_frame=None,pca_method=None):
"""
Principal components analysis of a H2O dataset.
Parameters
----------
k : int
The number of principal components to be computed. This must be between 1 and min(ncol(training_frame), nrow(training_frame)) inclusive.
model_id : str
(Optional) The unique hex key assigned to the resulting model. Automatically generated if none is provided.
max_iterations : int
The maximum number of iterations to run each power iteration loop. Must be between 1 and 1e6 inclusive.
transform : str
A character string that indicates how the training data should be transformed before running PCA.
Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE":
for dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE":
for demeaning and dividing each column by its range (max - min).
seed : int
(Optional) Random seed used to initialize the right singular vectors at the beginning of each power method iteration.
use_all_factor_levels : bool
(Optional) A logical value indicating whether all factor levels should be included in each categorical column expansion.
If FALSE, the indicator column corresponding to the first factor level of every categorical variable will be dropped. Defaults to FALSE.
pca_method : str
A character string that indicates how PCA should be calculated.
Possible values are "GramSVD": distributed computation of the Gram matrix followed by a local SVD using the JAMA package,
"Power": computation of the SVD using the power iteration method, "GLRM": fit a generalized low rank model with an l2 loss function
(no regularization) and solve for the SVD using local matrix algebra.
:return: a new dim reduction model
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="pca"
return h2o_model_builder.unsupervised(parms)
def svd(x,validation_x=None,training_frame=None,validation_frame=None,nv=None,max_iterations=None,transform=None,seed=None,
use_all_factor_levels=None,svd_method=None):
"""
Singular value decomposition of a H2O dataset.
Parameters
----------
nv : int
The number of right singular vectors to be computed. This must be between 1 and min(ncol(training_frame), snrow(training_frame)) inclusive.
max_iterations : int
The maximum number of iterations to run each power iteration loop. Must be between 1 and
1e6 inclusive.max_iterations The maximum number of iterations to run each power iteration loop. Must be between 1
and 1e6 inclusive.
transform : str
A character string that indicates how the training data should be transformed before running SVD.
Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE": for
dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE": for
demeaning and dividing each column by its range (max - min).
seed : int
(Optional) Random seed used to initialize the right singular vectors at the beginning of each power method iteration.
use_all_factor_levels : bool
(Optional) A logical value indicating whether all factor levels should be included in each categorical column expansion.
If FALSE, the indicator column corresponding to the first factor level of every categorical variable will be dropped. Defaults to TRUE.
svd_method : str
A character string that indicates how SVD should be calculated.
Possible values are "GramSVD": distributed computation of the Gram matrix followed by a local SVD using the JAMA package,
"Power": computation of the SVD using the power iteration method, "Randomized": approximate SVD by projecting onto a random subspace.
:return: a new dim reduction model
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="svd"
parms['_rest_version']=99
return h2o_model_builder.unsupervised(parms)
def glrm(x,validation_x=None,training_frame=None,validation_frame=None,k=None,max_iterations=None,transform=None,seed=None,
ignore_const_cols=None,loss=None,multi_loss=None,loss_by_col=None,loss_by_col_idx=None,regularization_x=None,
regularization_y=None,gamma_x=None,gamma_y=None,init_step_size=None,min_step_size=None,init=None,user_points=None,recover_svd=None):
"""
Builds a generalized low rank model of a H2O dataset.
Parameters
----------
k : int
The rank of the resulting decomposition. This must be between 1 and the number of columns in the training frame inclusive.
max_iterations : int
The maximum number of iterations to run the optimization loop. Each iteration consists of an update of the X matrix, followed by an
update of the Y matrix.
transform : str
A character string that indicates how the training data should be transformed before running GLRM.
Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE": for
dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE": for
demeaning and dividing each column by its range (max - min).
seed : int
(Optional) Random seed used to initialize the X and Y matrices.
ignore_const_cols : bool
(Optional) A logical value indicating whether to ignore constant columns in the training frame. A column is constant if all of its
non-missing values are the same value.
loss : str
A character string indicating the default loss function for numeric columns. Possible values are "Quadratic" (default), "L1", "Huber",
"Poisson", "Hinge", and "Logistic".
multi_loss : str
A character string indicating the default loss function for enum columns. Possible values are "Categorical" and "Ordinal".
loss_by_col : str
(Optional) A list of strings indicating the loss function for specific columns by corresponding index in loss_by_col_idx.
Will override loss for numeric columns and multi_loss for enum columns.
loss_by_col_idx : str
(Optional) A list of column indices to which the corresponding loss functions in loss_by_col are assigned. Must be zero indexed.
regularization_x : str
A character string indicating the regularization function for the X matrix. Possible values are "None" (default), "Quadratic",
"L2", "L1", "NonNegative", "OneSparse", "UnitOneSparse", and "Simplex".
regularization_y : str
A character string indicating the regularization function for the Y matrix. Possible values are "None" (default), "Quadratic",
"L2", "L1", "NonNegative", "OneSparse", "UnitOneSparse", and "Simplex".
gamma_x : float
The weight on the X matrix regularization term.
gamma_y : float
The weight on the Y matrix regularization term.
init_step_size : float
Initial step size. Divided by number of columns in the training frame when calculating the proximal gradient update. The algorithm
begins at init_step_size and decreases the step size at each iteration until a termination condition is reached.
min_step_size : float
Minimum step size upon which the algorithm is terminated.
init : str
A character string indicating how to select the initial Y matrix.
Possible values are "Random": for initialization to a random array from the standard normal distribution, "PlusPlus": for initialization
using the clusters from k-means++ initialization, or "SVD": for initialization using the first k (approximate) right singular vectors.
Additionally, the user may specify the initial Y as a matrix, data.frame, H2OFrame, or list of vectors.
recover_svd : bool
A logical value indicating whether the singular values and eigenvectors should be recovered during post-processing of the generalized
low rank decomposition.
:return: a new dim reduction model
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="glrm"
parms['_rest_version']=99
return h2o_model_builder.unsupervised(parms)
def naive_bayes(x,y,validation_x=None,validation_y=None,training_frame=None,validation_frame=None,
laplace=None,threshold=None,eps=None,compute_metrics=None,offset_column=None,weights_column=None,
balance_classes=None,max_after_balance_size=None, nfolds=None,fold_column=None,fold_assignment=None,
keep_cross_validation_predictions=None,checkpoint=None):
"""
The naive Bayes classifier assumes independence between predictor variables conditional on the response, and a
Gaussian distribution of numeric predictors with mean and standard deviation computed from the training dataset.
When building a naive Bayes classifier, every row in the training dataset that contains at least one NA will be
skipped completely. If the test dataset has missing values, then those predictors are omitted in the probability
calculation during prediction.
Parameters
----------
laplace : int
A positive number controlling Laplace smoothing. The default zero disables smoothing.
threshold : float
The minimum standard deviation to use for observations without enough data. Must be at least 1e-10.
eps : float
A threshold cutoff to deal with numeric instability, must be positive.
compute_metrics : bool
A logical value indicating whether model metrics should be computed. Set to FALSE to reduce the runtime of the algorithm.
training_frame : H2OFrame
Training Frame
validation_frame : H2OFrame
Validation Frame
offset_column : H2OFrame
Specify the offset column.
weights_column : H2OFrame
Specify the weights column.
nfolds : int
(Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
fold_column : H2OFrame
(Optional) Column with cross-validation fold index assignment per observation
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
keep_cross_validation_predictions : bool
Whether to keep the predictions of the cross-validation models
:return: Returns an H2OBinomialModel if the response has two categorical levels, H2OMultinomialModel otherwise.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="naivebayes"
return h2o_model_builder.supervised(parms)
def create_frame(id = None, rows = 10000, cols = 10, randomize = True, value = 0, real_range = 100,
categorical_fraction = 0.2, factors = 100, integer_fraction = 0.2, integer_range = 100,
binary_fraction = 0.1, binary_ones_fraction = 0.02, missing_fraction = 0.01, response_factors = 2,
has_response = False, seed=None):
"""
Data Frame Creation in H2O.
Creates a data frame in H2O with real-valued, categorical, integer, and binary columns specified by the user.
Parameters
----------
id : str
A string indicating the destination key. If empty, this will be auto-generated by H2O.
rows : int
The number of rows of data to generate.
cols : int
The number of columns of data to generate. Excludes the response column if has_response == True.
randomize : bool
A logical value indicating whether data values should be randomly generated. This must be TRUE if either categorical_fraction or integer_fraction is non-zero.
value : int
If randomize == FALSE, then all real-valued entries will be set to this value.
real_range : float
The range of randomly generated real values.
categorical_fraction : float
The fraction of total columns that are categorical.
factors : int
The number of (unique) factor levels in each categorical column.
integer_fraction : float
The fraction of total columns that are integer-valued.
integer_range : list
The range of randomly generated integer values.
binary_fraction : float
The fraction of total columns that are binary-valued.
binary_ones_fraction : float
The fraction of values in a binary column that are set to 1.
missing_fraction : float
The fraction of total entries in the data frame that are set to NA.
response_factors : int
If has_response == TRUE, then this is the number of factor levels in the response column.
has_response : bool
A logical value indicating whether an additional response column should be pre-pended to the final H2O data frame. If set to TRUE, the total number
of columns will be cols+1.
seed : int
A seed used to generate random values when randomize = TRUE.
:return: the H2OFrame that was created
"""
parms = {"dest": _py_tmp_key() if id is None else id,
"rows": rows,
"cols": cols,
"randomize": randomize,
"value": value,
"real_range": real_range,
"categorical_fraction": categorical_fraction,
"factors": factors,
"integer_fraction": integer_fraction,
"integer_range": integer_range,
"binary_fraction": binary_fraction,
"binary_ones_fraction": binary_ones_fraction,
"missing_fraction": missing_fraction,
"response_factors": response_factors,
"has_response": has_response,
"seed": -1 if seed is None else seed,
}
H2OJob(H2OConnection.post_json("CreateFrame", **parms), "Create Frame").poll()
return get_frame(parms["dest"])
def interaction(data, factors, pairwise, max_factors, min_occurrence, destination_frame=None):
"""
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
Parameters
----------
data : H2OFrame
the H2OFrame that holds the target categorical columns.
factors : list
factors Factor columns (either indices or column names).
pairwise : bool
Whether to create pairwise interactions between factors (otherwise create one higher-order interaction). Only applicable if there are 3 or more factors.
max_factors : int
Max. number of factor levels in pair-wise interaction terms (if enforced, one extra catch-all factor will be made)
min_occurrence : int
Min. occurrence threshold for factor levels in pair-wise interaction terms
destination_frame : str
A string indicating the destination key. If empty, this will be auto-generated by H2O.
:return: H2OFrame
"""
data._eager()
factors = [data.names[n] if isinstance(n,int) else n for n in factors]
parms = {"dest": _py_tmp_key() if destination_frame is None else destination_frame,
"source_frame": data._id,
"factor_columns": [_quoted(f) for f in factors],
"pairwise": pairwise,
"max_factors": max_factors,
"min_occurrence": min_occurrence,
}
H2OJob(H2OConnection.post_json("Interaction", **parms), "Interactions").poll()
return get_frame(parms["dest"])
def network_test():
res = H2OConnection.get_json(url_suffix="NetworkTest")
res["table"].show()
def locate(path):
"""
Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
"""
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
while (True):
if (os.path.exists(possible_result)):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if (next_tmp_dir == tmp_dir):
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path)
def store_size():
"""
Get the H2O store size (current count of keys).
:return: number of keys in H2O cloud
"""
return rapids("(store_size)")["result"]
def keys_leaked(num_keys):
"""
Ask H2O if any keys leaked.
@param num_keys: The number of keys that should be there.
:return: A boolean True/False if keys leaked. If keys leaked, check H2O logs for further detail.
"""
return rapids("keys_leaked #{})".format(num_keys))["result"]=="TRUE"
def as_list(data, use_pandas=True):
"""
Convert an H2O data object into a python-specific object.
WARNING: This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the data frame.
Otherwise, a list-of-lists populated by character data will be returned (so the types of data will
all be str).
Parameters
----------
data : H2OFrame
An H2O data object.
use_pandas : bool
Try to use pandas for reading in the data.
:return: List of list (Rows x Columns).
"""
return H2OFrame.as_data_frame(data, use_pandas)
def set_timezone(tz):
"""
Set the Time Zone on the H2O Cloud
Parameters
----------
tz : str
The desired timezone.
:return: None
"""
rapids(ExprNode._collapse_sb(ExprNode("setTimeZone", tz)._eager()))
def get_timezone():
"""
Get the Time Zone on the H2O Cloud
:return: the time zone (string)
"""
return H2OFrame(expr=ExprNode("getTimeZone"))._scalar()
def list_timezones():
"""
Get a list of all the timezones
:return: the time zones (as an H2OFrame)
"""
return H2OFrame(expr=ExprNode("listTimeZones"))._frame()
def turn_off_ref_cnts():
"""
Reference counting on H2OFrame's allows for eager deletion of H2OFrames that go out of
scope. If multiple threads are spawned, however, and data is to live beyond the use of
the thread (e.g. when launching multiple jobs via Parallel in scikit-learn), then there
may be referers outside of the current context. Use this to prevent deletion of H2OFrame
instances.
:return: None
"""
H2OFrame.COUNTING=False
def turn_on_ref_cnts():
"""
See the note in turn_off_ref_cnts
:return: None
"""
H2OFrame.del_dropped()
H2OFrame.COUNTING=True
class H2ODisplay:
"""
Pretty printing for H2O Objects;
Handles both IPython and vanilla console display
"""
THOUSANDS = "{:,}"
def __init__(self,table=None,header=None,table_header=None,**kwargs):
self.table_header=table_header
self.header=header
self.table=table
self.kwargs=kwargs
self.do_print=True
# one-shot display... never return an H2ODisplay object (or try not to)
# if holding onto a display object, then may have odd printing behavior
# the __repr__ and _repr_html_ methods will try to save you from many prints,
# but just be WARNED that your mileage may vary!
#
# In other words, it's better to just new one of these when you're ready to print out.
if self.table_header is not None:
print
print self.table_header + ":"
print
if H2ODisplay._in_ipy():
from IPython.display import display
display(self)
self.do_print=False
else:
self.pprint()
self.do_print=False
# for Ipython
def _repr_html_(self):
if self.do_print:
return H2ODisplay._html_table(self.table,self.header)
def pprint(self):
r = self.__repr__()
print r
# for python REPL console
def __repr__(self):
if self.do_print or not H2ODisplay._in_ipy():
if self.header is None: return tabulate.tabulate(self.table,**self.kwargs)
else: return tabulate.tabulate(self.table,headers=self.header,**self.kwargs)
self.do_print=True
return ""
@staticmethod
def _in_ipy(): # are we in ipy? then pretty print tables with _repr_html
try:
__IPYTHON__
return True
except NameError:
return False
# some html table builder helper things
@staticmethod
def _html_table(rows, header=None):
table= "<div style=\"overflow:auto\"><table style=\"width:50%\">{}</table></div>" # keep table in a div for scroll-a-bility
table_rows=[]
if header is not None:
table_rows.append(H2ODisplay._html_row(header))
for row in rows:
table_rows.append(H2ODisplay._html_row(row))
return table.format("\n".join(table_rows))
@staticmethod
def _html_row(row):
res = "<tr>{}</tr>"
entry = "<td>{}</td>"
entries = "\n".join([entry.format(str(r)) for r in row])
return res.format(entries)
def can_use_pandas():
try:
imp.find_module('pandas')
return True
except ImportError:
return False
# ALL DEPRECATED METHODS BELOW #
def h2o_deprecated(newfun=None):
def o(fun):
if newfun is not None: m = "{} is deprecated. Use {}.".format(fun.__name__,newfun.__name__)
else: m = "{} is deprecated.".format(fun.__name__)
@functools.wraps(fun)
def i(*args, **kwargs):
print
print
warnings.warn(m, category=DeprecationWarning, stacklevel=2)
return fun(*args, **kwargs)
return i
return o
@h2o_deprecated(import_file)
def import_frame(path=None):
"""
Deprecated for import_file.
Parameters
----------
path : str
A path specifiying the location of the data to import.
:return: A new H2OFrame
"""
warnings.warn("deprecated: Use import_file", DeprecationWarning)
return import_file(path)
|
junwucs/h2o-3
|
h2o-py/h2o/h2o.py
|
Python
|
apache-2.0
| 74,681
|
[
"Gaussian"
] |
be5ccc98f5e16561ea8b576d978f36dff8ee161eb40fca8227676c40a1d7475d
|
"""Test the Amber Electric Sensors."""
from collections.abc import AsyncGenerator
from unittest.mock import Mock, patch
from amberelectric.model.current_interval import CurrentInterval
from amberelectric.model.range import Range
import pytest
from homeassistant.components.amberelectric.const import (
CONF_API_TOKEN,
CONF_SITE_ID,
CONF_SITE_NAME,
DOMAIN,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
from tests.components.amberelectric.helpers import (
CONTROLLED_LOAD_CHANNEL,
FEED_IN_CHANNEL,
GENERAL_AND_CONTROLLED_SITE_ID,
GENERAL_AND_FEED_IN_SITE_ID,
GENERAL_CHANNEL,
GENERAL_ONLY_SITE_ID,
)
MOCK_API_TOKEN = "psk_0000000000000000"
@pytest.fixture
async def setup_general(hass) -> AsyncGenerator:
"""Set up general channel."""
MockConfigEntry(
domain="amberelectric",
data={
CONF_SITE_NAME: "mock_title",
CONF_API_TOKEN: MOCK_API_TOKEN,
CONF_SITE_ID: GENERAL_ONLY_SITE_ID,
},
).add_to_hass(hass)
instance = Mock()
with patch(
"amberelectric.api.AmberApi.create",
return_value=instance,
) as mock_update:
instance.get_current_price = Mock(return_value=GENERAL_CHANNEL)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
yield mock_update.return_value
@pytest.fixture
async def setup_general_and_controlled_load(hass) -> AsyncGenerator:
"""Set up general channel and controller load channel."""
MockConfigEntry(
domain="amberelectric",
data={
CONF_API_TOKEN: MOCK_API_TOKEN,
CONF_SITE_ID: GENERAL_AND_CONTROLLED_SITE_ID,
},
).add_to_hass(hass)
instance = Mock()
with patch(
"amberelectric.api.AmberApi.create",
return_value=instance,
) as mock_update:
instance.get_current_price = Mock(
return_value=GENERAL_CHANNEL + CONTROLLED_LOAD_CHANNEL
)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
yield mock_update.return_value
@pytest.fixture
async def setup_general_and_feed_in(hass) -> AsyncGenerator:
"""Set up general channel and feed in channel."""
MockConfigEntry(
domain="amberelectric",
data={
CONF_API_TOKEN: MOCK_API_TOKEN,
CONF_SITE_ID: GENERAL_AND_FEED_IN_SITE_ID,
},
).add_to_hass(hass)
instance = Mock()
with patch(
"amberelectric.api.AmberApi.create",
return_value=instance,
) as mock_update:
instance.get_current_price = Mock(
return_value=GENERAL_CHANNEL + FEED_IN_CHANNEL
)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
yield mock_update.return_value
async def test_general_price_sensor(hass: HomeAssistant, setup_general: Mock) -> None:
"""Test the General Price sensor."""
assert len(hass.states.async_all()) == 4
price = hass.states.get("sensor.mock_title_general_price")
assert price
assert price.state == "0.08"
attributes = price.attributes
assert attributes["duration"] == 30
assert attributes["date"] == "2021-09-21"
assert attributes["per_kwh"] == 0.08
assert attributes["nem_date"] == "2021-09-21T08:30:00+10:00"
assert attributes["spot_per_kwh"] == 0.01
assert attributes["start_time"] == "2021-09-21T08:00:00+10:00"
assert attributes["end_time"] == "2021-09-21T08:30:00+10:00"
assert attributes["renewables"] == 51
assert attributes["estimate"] is True
assert attributes["spike_status"] == "none"
assert attributes["channel_type"] == "general"
assert attributes["attribution"] == "Data provided by Amber Electric"
assert attributes.get("range_min") is None
assert attributes.get("range_max") is None
with_range: list[CurrentInterval] = GENERAL_CHANNEL
with_range[0].range = Range(7.8, 12.4)
setup_general.get_current_price.return_value = with_range
config_entry = hass.config_entries.async_entries(DOMAIN)[0]
await hass.config_entries.async_reload(config_entry.entry_id)
await hass.async_block_till_done()
price = hass.states.get("sensor.mock_title_general_price")
assert price
attributes = price.attributes
assert attributes.get("range_min") == 0.08
assert attributes.get("range_max") == 0.12
async def test_general_and_controlled_load_price_sensor(
hass: HomeAssistant, setup_general_and_controlled_load: Mock
) -> None:
"""Test the Controlled Price sensor."""
assert len(hass.states.async_all()) == 6
price = hass.states.get("sensor.mock_title_controlled_load_price")
assert price
assert price.state == "0.08"
attributes = price.attributes
assert attributes["duration"] == 30
assert attributes["date"] == "2021-09-21"
assert attributes["per_kwh"] == 0.08
assert attributes["nem_date"] == "2021-09-21T08:30:00+10:00"
assert attributes["spot_per_kwh"] == 0.01
assert attributes["start_time"] == "2021-09-21T08:00:00+10:00"
assert attributes["end_time"] == "2021-09-21T08:30:00+10:00"
assert attributes["renewables"] == 51
assert attributes["estimate"] is True
assert attributes["spike_status"] == "none"
assert attributes["channel_type"] == "controlledLoad"
assert attributes["attribution"] == "Data provided by Amber Electric"
async def test_general_and_feed_in_price_sensor(
hass: HomeAssistant, setup_general_and_feed_in: Mock
) -> None:
"""Test the Feed In sensor."""
assert len(hass.states.async_all()) == 6
price = hass.states.get("sensor.mock_title_feed_in_price")
assert price
assert price.state == "-0.08"
attributes = price.attributes
assert attributes["duration"] == 30
assert attributes["date"] == "2021-09-21"
assert attributes["per_kwh"] == -0.08
assert attributes["nem_date"] == "2021-09-21T08:30:00+10:00"
assert attributes["spot_per_kwh"] == 0.01
assert attributes["start_time"] == "2021-09-21T08:00:00+10:00"
assert attributes["end_time"] == "2021-09-21T08:30:00+10:00"
assert attributes["renewables"] == 51
assert attributes["estimate"] is True
assert attributes["spike_status"] == "none"
assert attributes["channel_type"] == "feedIn"
assert attributes["attribution"] == "Data provided by Amber Electric"
async def test_general_forecast_sensor(
hass: HomeAssistant, setup_general: Mock
) -> None:
"""Test the General Forecast sensor."""
assert len(hass.states.async_all()) == 4
price = hass.states.get("sensor.mock_title_general_forecast")
assert price
assert price.state == "0.09"
attributes = price.attributes
assert attributes["channel_type"] == "general"
assert attributes["attribution"] == "Data provided by Amber Electric"
first_forecast = attributes["forecasts"][0]
assert first_forecast["duration"] == 30
assert first_forecast["date"] == "2021-09-21"
assert first_forecast["per_kwh"] == 0.09
assert first_forecast["nem_date"] == "2021-09-21T09:00:00+10:00"
assert first_forecast["spot_per_kwh"] == 0.01
assert first_forecast["start_time"] == "2021-09-21T08:30:00+10:00"
assert first_forecast["end_time"] == "2021-09-21T09:00:00+10:00"
assert first_forecast["renewables"] == 50
assert first_forecast["spike_status"] == "none"
assert first_forecast.get("range_min") is None
assert first_forecast.get("range_max") is None
with_range: list[CurrentInterval] = GENERAL_CHANNEL
with_range[1].range = Range(7.8, 12.4)
setup_general.get_current_price.return_value = with_range
config_entry = hass.config_entries.async_entries(DOMAIN)[0]
await hass.config_entries.async_reload(config_entry.entry_id)
await hass.async_block_till_done()
price = hass.states.get("sensor.mock_title_general_forecast")
assert price
attributes = price.attributes
first_forecast = attributes["forecasts"][0]
assert first_forecast.get("range_min") == 0.08
assert first_forecast.get("range_max") == 0.12
async def test_controlled_load_forecast_sensor(
hass: HomeAssistant, setup_general_and_controlled_load: Mock
) -> None:
"""Test the Controlled Load Forecast sensor."""
assert len(hass.states.async_all()) == 6
price = hass.states.get("sensor.mock_title_controlled_load_forecast")
assert price
assert price.state == "0.09"
attributes = price.attributes
assert attributes["channel_type"] == "controlledLoad"
assert attributes["attribution"] == "Data provided by Amber Electric"
first_forecast = attributes["forecasts"][0]
assert first_forecast["duration"] == 30
assert first_forecast["date"] == "2021-09-21"
assert first_forecast["per_kwh"] == 0.09
assert first_forecast["nem_date"] == "2021-09-21T09:00:00+10:00"
assert first_forecast["spot_per_kwh"] == 0.01
assert first_forecast["start_time"] == "2021-09-21T08:30:00+10:00"
assert first_forecast["end_time"] == "2021-09-21T09:00:00+10:00"
assert first_forecast["renewables"] == 50
assert first_forecast["spike_status"] == "none"
async def test_feed_in_forecast_sensor(
hass: HomeAssistant, setup_general_and_feed_in: Mock
) -> None:
"""Test the Feed In Forecast sensor."""
assert len(hass.states.async_all()) == 6
price = hass.states.get("sensor.mock_title_feed_in_forecast")
assert price
assert price.state == "-0.09"
attributes = price.attributes
assert attributes["channel_type"] == "feedIn"
assert attributes["attribution"] == "Data provided by Amber Electric"
first_forecast = attributes["forecasts"][0]
assert first_forecast["duration"] == 30
assert first_forecast["date"] == "2021-09-21"
assert first_forecast["per_kwh"] == -0.09
assert first_forecast["nem_date"] == "2021-09-21T09:00:00+10:00"
assert first_forecast["spot_per_kwh"] == 0.01
assert first_forecast["start_time"] == "2021-09-21T08:30:00+10:00"
assert first_forecast["end_time"] == "2021-09-21T09:00:00+10:00"
assert first_forecast["renewables"] == 50
assert first_forecast["spike_status"] == "none"
def test_renewable_sensor(hass: HomeAssistant, setup_general) -> None:
"""Testing the creation of the Amber renewables sensor."""
assert len(hass.states.async_all()) == 4
sensor = hass.states.get("sensor.mock_title_renewables")
assert sensor
assert sensor.state == "51"
|
rohitranjan1991/home-assistant
|
tests/components/amberelectric/test_sensor.py
|
Python
|
mit
| 10,601
|
[
"Amber"
] |
3ad7859e7756de37c9b9f08ca2ab7345969ca1e33d2a33be25e01890f3443458
|
# Beware! Only tested for non-spin-polarized case
import re
import sys
import rlcompleter
import readline
#from numpy import *
from enterfi import enterfi
from outputfi import outputfi
gridfname = enterfi("Enter VASP field data (CHGCAR, LOCPOT, etc.)")
outfname = outputfi("Enter output file name ")
gridfi = open(gridfname,"r")
gridfi.readline() # Skip system name
# Read lattice scaling constant
li = gridfi.readline().split()
scale = [0.0,0.0,0.0]
if len(li) == 1:
li = float(li[0])
for i in range(3):
scale[i] = li
else:
if len(li) == 3:
for i in range(3):
scale[i] = float(li[i])
# Read lattice vectors
latcons = [[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0]]
for i in range(3):
li = gridfi.readline().split()
for j in range(3):
latcons[i][j] = float(li[j])*scale[j]
print latcons
# Is this lattice orthorhombic in z direction?
assert latcons[0][2] <= 1.0e-8
assert latcons[1][2] <= 1.0e-8
assert latcons[2][0] <= 1.0e-8
assert latcons[2][1] <= 1.0e-8
# Read number of atoms
# Is this from vasp5 or vasp4? vasp5 has element names on the sixth line
# while vasp 4 does not.
li = gridfi.readline().split()
if re.match("[0-9]",li[0].strip()):
# It's vasp4
nspecs = len(li)
natoms = 0
for i in range(nspecs):
li[i] = int(li[i])
natoms = natoms + li[i]
else:
# It's vasp5. Read one more line.
li = gridfi.readline().split()
nspecs = len(li)
natoms = 0
for i in range(nspecs):
li[i] = int(li[i])
natoms = natoms + li[i]
print natoms
gridfi.readline() # Skip one line. It probably says "Direct".
for i in range(natoms+1):
gridfi.readline() # Skip the atom coordinates plus 1 blank line
# Read the grid dimensions
grid = gridfi.readline().split()
for i in range(len(grid)):
grid[i]=int(grid[i])
ngrid = grid[0] * grid[1] * grid[2]
dz = latcons[2][2]/grid[2]
# Now read the rest of the file
data=gridfi.read().split()
for i in range(ngrid):
data[i]=float(data[i])
zavg=[]
for i in range(grid[2]):
zavgtmp=0.0
for j in range(grid[0]*grid[1]):
zavgtmp+=data[i*grid[0]*grid[1]+j]
zavgtmp=zavgtmp/(grid[0]*grid[1])
zavg.append(zavgtmp)
outfi = open(outfname,"w")
for i in range(len(zavg)):
outfi.write(str(dz*i) + " " + str(zavg[i]) + "\n")
#print zavg
|
skasamatsu/vaspgrid
|
zav.py
|
Python
|
mit
| 2,327
|
[
"VASP"
] |
870f7cb41a3d1cb4ae7a03782f4e79c0a512d5f8359c97bb721c69584428613a
|
#!/usr/bin/env python
# This code extracts the necessary information from current VASP output folders (self, dielectric, phonon, nself_aMoBT etc)
# and run aMoBT to calculate the electronic properties: mobility, conductivity and thermopower at different temperatures
# Please contact alireza@wustl.edu if you had any questions. Bug reports are very much appreciated.
# By: Alireza Faghaninia and Mike Sullivan
from __future__ import division
import os
import numpy as np
import re
import argparse
import pymatgen as mg
def generate_MATLAB_defect_plot_input(total_defects,defect_types,n_defects):
with file('defects_dopants_info_cation_rich.m', 'w') as indata:
indata.write('% This is an input file for defects and neutral dopants information \n')
indata.write('% Follow this format \n')
indata.write('% *at first line enter the # of defects +# of dopants+ # of defect types \n')
indata.write('% *next line: number of each type of defect/dopant in order \n')
indata.write('% *next line: The name of the defect (arbitrary) \n')
indata.write('% *next line: Relative_Fermi space Corrected formation energy (2 numbers) \n')
indata.write('% this together with the next line info (which must be different that this \n')
indata.write('% is used to make the linear defect \n')
indata.write('% formation energy with respect to the Fermi level inside the band gap \n')
indata.write('% *next line: Relative_Fermi space Corrected formation energy (2 numbers) \n')
indata.write('% *next line: Charge of this defect \n')
indata.write('% *next line: empty \n')
indata.write('% follow this format until defects are done, now for dopants follow this: \n')
indata.write('% *first line: The name of the dopant (arbitrary) \n')
indata.write('% *next line: Corrected formation energy \n')
indata.write('% *next line: type of this defect: n or p \n')
indata.write('% *next line: empty \n')
indata.write('% Follow this format until dopants are done too! \n \n')
indata.write('%3d %3d %3d \n' % (total_defects,0,defect_types))
indata.write('%3d %3d %3d \n' % (n_defects[0], n_defects[1], n_defects[2]))
return
def jobs_done():
x = False
if os.path.isfile("OUTCAR"):
with open("OUTCAR") as fp:
for lin in fp:
line = lin.split()
if len(line)>1:
if line[0]=='Voluntary':
x = True
return
def get_TOTEN():
E = 0.0
os.system("""grep "TOTEN" OUTCAR > temp.txt""")
os.system("tail -4 temp.txt | head -1 > temp1.txt")
with open('temp1.txt','r') as temp:
li = temp.readline()
line = li.split()
if len(line) > 4:
E = float(line[4])
os.system('rm temp.txt temp1.txt')
return E
def get_E_correction(static_dielectric):
Ec = 0.0
os.system("""grep "energy correction for charged system" OUTCAR > temp2.txt""")
with open('temp2.txt','r') as temp:
li = temp.readline()
line = li.split()
if len(line) > 5:
Ec = float(line[5])/static_dielectric
os.system('rm temp2.txt')
return Ec
def find_defect_and_type(defects,formula):
formula += 'x'
elements = []
types = []
coeffs = []
E_elements = []
for defect in defects:
defect += 'x'
if 'V' in defect[0]:
types.append('v')
element = defect[2:-1]
elif '_i' in defect:
types.append('i')
element = defect[0:1]
if '_' in element:
element = defect[0]
else:
types.append('d')
element = defect[0:1]
if '_' in element:
element = defect[0]
elements.append(element)
coeffs.append(coefficientof(element,formula))
E_elements.append(matproj.get_data(element, prop="energy_per_atom")[0]["energy_per_atom"])
return elements, types, coeffs, E_elements
def coefficientof(element1,formula):
n = 1
if formula[formula.find(element1) + len(element1)] in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
if formula[formula.find(element1) + 1 + len(element1)] in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
n = int(formula[formula.find(element1) + len(element1)] + formula[formula.find(element1) + 1 + len(element1)])
else:
n = int(formula[formula.find(element1) + len(element1)])
return n
def find_chempot(element1, energy1, dopant): # This function finds all the compounds composed of element1 and dopant so that the chemical potential of the dopant can be determined
data = matproj.query(criteria={"elements": {"$all": [element1, dopant]},"nelements": 2},properties=["icsd_id", "pretty_formula", "spacegroup.symbol","nelements","energy_per_atom"])
formation_energies = np.zeros(len(data))
for n in range(min([25, len(data)])):
formula = data[n]["pretty_formula"] + "x" # just adding a character to avoid error in "coefficientof" function
print( data[n]["pretty_formula"])
n_e = coefficientof(element1,formula)
n_d = coefficientof(dopant,formula)
formation_energies[n] = (data[n]['energy_per_atom']*(n_e+n_d) - n_e * energy1 - n_d * energy_dopant)/n_d
formation_energies = np.append(formation_energies,0)
chem = np.min(np.append(formation_energies,[0]))
return(chem)
if __name__ == "__main__":
apikey = 'fDJKEZpxSyvsXdCt'
from pymatgen.matproj.rest import MPRester
matproj = MPRester(apikey)
### Check the input arguments
parser = argparse.ArgumentParser()
parser.add_argument("-de","--defect_names", help="The list of the name of defects (e.g. ['V_O', 'V_Zn'])", required = False)
parser.add_argument("-di","--dielectric_constant", help="The dielectric constant of the base material for 1st order Makov-Payne correction", required = False)
args = parser.parse_args()
############ INPUTS:
args.defect_names = ['V_O', 'V_Sn', 'V_K']
args.dielectric_constant = 4.86
job = 'geom'
ndefects = 0
formula = 'K2Sn2O3'
coeffs = []
formula_units = 16 # The number of formula units that are in the POSCAR of "undoped" supercell
VBM_max = 2.309599
############ END OF INPUTS
elements, types, coeffs, E_elements = find_defect_and_type(args.defect_names,formula)
os.chdir('undoped' + '/' + job)
if jobs_done:
E_undoped = get_TOTEN()
print(E_undoped)
os.chdir('../..')
undoped_formation = E_undoped/formula_units
# coeffs_sum = sum(int(i) for i in coeffs)
chemical_potentials = []
undoped_formation -= sum(float(coeffs[i])*float(E_elements[i]) for i in range(0,len(elements)))
for i in range(0,len(elements)):
if elements[i] is 'O':
chemical_potentials.append(0.0)
if elements[i] is not 'O':
chemical_potentials.append(undoped_formation/(2 * int(coeffs[i])))
# chemical_potentials = [0.0, 0.0, 0.0]
n_defects = []
with open('temp_energy.txt','w') as efile:
counter = 0
for defect in args.defect_names:
d_count = 0
for charge in ['-4', '-3', '-2', '-1', '', '+1', '+2', '+3', '+4'] :
if os.path.exists(defect + charge):
d_count += 1
os.chdir(defect + charge + '/' + job)
if jobs_done:
E = get_TOTEN()
ndefects += 1
print(E)
Ec = get_E_correction(args.dielectric_constant)
print(Ec)
efile.write(defect + charge + ' \n')
formation_energy = E+Ec-E_undoped
if types[counter] is 'v':
formation_energy += (float(E_elements[counter]) + float(chemical_potentials[counter]))
elif types[counter] is 'i':
formation_energy -= (float(E_elements[counter]) + float(chemical_potentials[counter]))
else:
print('The doping is not programmed yet! Only the vacancy and interstitials for now')
if charge is not '':
formation_energy += int(charge)*VBM_max
efile.write('%4.2f %8.4f \n' % (0,formation_energy))
if charge is not '':
efile.write(charge + ' \n\n')
else:
efile.write('0.0 \n\n')
os.chdir('../..')
n_defects.append(d_count)
counter += 1
print(ndefects)
print(elements)
print(types)
print(coeffs)
print(E_elements)
print(undoped_formation)
print(chemical_potentials)
generate_MATLAB_defect_plot_input(ndefects,len(args.defect_names),n_defects)
os.system('cat temp_energy.txt >> defects_dopants_info_cation_rich.m')
os.system('rm temp_energy.txt')
|
albalu/dekode
|
collect_defect_energy.py
|
Python
|
mit
| 8,131
|
[
"VASP",
"pymatgen"
] |
1372f356bb4cf7a9eba1c021ed9e534f5d8e87b7b8f4ff4d58d727418d7bb409
|
"""Some utility functions for the spectra."""
import numpy as np
from scipy.ndimage.filters import gaussian_filter1d
def res_corr(flux, dvbin, fwhm=8):
"""
Real spectrographs have finite spectral resolution.
Correct for this by smoothing the spectrum (the flux) by convolving with a Gaussian.
The input spectrum is assumed to have infinite resolution, since we have used a spline
to interpolate it first and/or we are converged.
Strictly speaking we should rebin the spectrum after to have the same resolution
as the observed pixel, but as long as the pixels are smaller than the FWHM of the
spectrograph (which is the case as long as the observer is smart) we will be fine.
args:
flux - The input flux spectra
dvbin - the width in km/s for the input flux
fwhm - FWHM of the spectrograph in km/s
"""
# Convert FWHM input to internal units
res = fwhm/dvbin
#FWHM of a Gaussian is 2 \sqrt(2 ln 2) sigma
sigma = res/(2*np.sqrt(2*np.log(2)))
#Do filter in wrapping mode to avoid edge effects
oflux = gaussian_filter1d(flux, sigma, axis=-1, mode='wrap')
return oflux
def get_rolled_spectra(tau):
"""
Cycle the array tau so that the peak is at the middle.
Returns (roll - the index the array was rolled by, tau_out - the rolled array)
"""
(roll, tau_out) = zip(*[_roll_one_spectra(tau_l) for tau_l in tau])
assert np.all(np.shape(roll) == np.shape(tau[:,0]))
assert np.all(np.shape(tau_out) == np.shape(tau))
return (np.array(roll), np.array(tau_out))
def _roll_one_spectra(tau_l):
"""Roll a single spectrum so the peak is in the middle."""
max_t = np.max(tau_l)
ind_m = np.where(tau_l == max_t)[0][0]
tau_out = np.roll(tau_l, int(np.size(tau_l)/2)- ind_m)
roll = int(np.size(tau_l)/2) - ind_m
return roll, tau_out
|
sbird/fake_spectra
|
fake_spectra/spec_utils.py
|
Python
|
mit
| 1,904
|
[
"Gaussian"
] |
cf33876f8bade8996afc62178d184bf26e0dbf73acd14d7ddf43346cd589a7cb
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import sys
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
'''
Prevent .pyc files from being created.
Stops the vtk source being polluted
by .pyc files.
'''
sys.dont_write_bytecode = True
import TestFixedPointRayCasterNearest
class TestFixedPointRayCasterLinear(vtk.test.Testing.vtkTest):
def testFixedPointRayCasterLinear(self):
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
iRen = vtk.vtkRenderWindowInteractor()
tFPRCN = TestFixedPointRayCasterNearest.FixedPointRayCasterNearest(ren, renWin, iRen)
volumeProperty = tFPRCN.GetVolumeProperty()
for j in range(0, 5):
for i in range(0, 5):
volumeProperty[i][j].SetInterpolationTypeToLinear()
# render and interact with data
renWin.Render()
img_file = "TestFixedPointRayCasterLinear.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=10)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestFixedPointRayCasterLinear, 'test')])
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Rendering/Volume/Testing/Python/TestFixedPointRayCasterLinear.py
|
Python
|
bsd-3-clause
| 1,890
|
[
"VTK"
] |
ba6205eec0f2d5f7a7690af89635aac8c72aff2cb5ad3f19557b94bde88890bb
|
from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import autograd.numpy as np
import autograd.scipy.stats.norm as norm
from autograd.misc.optimizers import adam, sgd
# same BBSVI function!
from black_box_svi import black_box_variational_inference
if __name__ == '__main__':
# Specify an inference problem by its unnormalized log-density.
# it's difficult to see the benefit in low dimensions
# model parameters are a mean and a log_sigma
np.random.seed(42)
obs_dim = 20
Y = np.random.randn(obs_dim, obs_dim).dot(np.random.randn(obs_dim))
def log_density(x, t):
mu, log_sigma = x[:, :obs_dim], x[:, obs_dim:]
sigma_density = np.sum(norm.logpdf(log_sigma, 0, 1.35), axis=1)
mu_density = np.sum(norm.logpdf(Y, mu, np.exp(log_sigma)), axis=1)
return sigma_density + mu_density
# Build variational objective.
D = obs_dim * 2 # dimension of our posterior
objective, gradient, unpack_params = \
black_box_variational_inference(log_density, D, num_samples=2000)
# Define the natural gradient
# The natural gradient of the ELBO is the gradient of the elbo,
# preconditioned by the inverse Fisher Information Matrix. The Fisher,
# in the case of a diagonal gaussian, is a diagonal matrix that is a
# simple function of the variance. Intuitively, statistical distance
# created by perturbing the mean of an independent Gaussian is
# determined by how wide the distribution is along that dimension ---
# the wider the distribution, the less sensitive statistical distances is
# to perturbations of the mean; the narrower the distribution, the more
# the statistical distance changes when you perturb the mean (imagine
# an extremely narrow Gaussian --- basically a spike. The KL between
# this Gaussian and a Gaussian $\epsilon$ away in location can be big ---
# moving the Gaussian could significantly reduce overlap in support
# which corresponds to a greater statistical distance).
#
# When we want to move in directions of steepest ascent, we multiply by
# the inverse fisher --- that way we make quicker progress when the
# variance is wide, and we scale down our step size when the variance
# is small (which leads to more robust/less chaotic ascent).
def fisher_diag(lam):
mu, log_sigma = unpack_params(lam)
return np.concatenate([np.exp(-2.*log_sigma),
np.ones(len(log_sigma))*2])
# simple! basically free!
natural_gradient = lambda lam, i: (1./fisher_diag(lam)) * gradient(lam, i)
# function for keeping track of callback ELBO values (for plotting below)
def optimize_and_lls(optfun):
num_iters = 200
elbos = []
def callback(params, t, g):
elbo_val = -objective(params, t)
elbos.append(elbo_val)
if t % 50 == 0:
print("Iteration {} lower bound {}".format(t, elbo_val))
init_mean = -1 * np.ones(D)
init_log_std = -5 * np.ones(D)
init_var_params = np.concatenate([init_mean, init_log_std])
variational_params = optfun(num_iters, init_var_params, callback)
return np.array(elbos)
# let's optimize this with a few different step sizes
elbo_lists = []
step_sizes = [.1, .25, .5]
for step_size in step_sizes:
# optimize with standard gradient + adam
optfun = lambda n, init, cb: adam(gradient, init, step_size=step_size,
num_iters=n, callback=cb)
standard_lls = optimize_and_lls(optfun)
# optimize with natural gradient + sgd, no momentum
optnat = lambda n, init, cb: sgd(natural_gradient, init, step_size=step_size,
num_iters=n, callback=cb, mass=.001)
natural_lls = optimize_and_lls(optnat)
elbo_lists.append((standard_lls, natural_lls))
# visually compare the ELBO
plt.figure(figsize=(12,8))
colors = ['b', 'k', 'g']
for col, ss, (stand_lls, nat_lls) in zip(colors, step_sizes, elbo_lists):
plt.plot(np.arange(len(stand_lls)), stand_lls,
'--', label="standard (adam, step-size = %2.2f)"%ss, alpha=.5, c=col)
plt.plot(np.arange(len(nat_lls)), nat_lls, '-',
label="natural (sgd, step-size = %2.2f)"%ss, c=col)
llrange = natural_lls.max() - natural_lls.min()
plt.ylim((natural_lls.max() - llrange*.1, natural_lls.max() + 10))
plt.xlabel("optimization iteration")
plt.ylabel("ELBO")
plt.legend(loc='lower right')
plt.title("%d dimensional posterior"%D)
plt.show()
|
hips/autograd
|
examples/natural_gradient_black_box_svi.py
|
Python
|
mit
| 4,775
|
[
"Gaussian"
] |
799b054bb1bdf24573d47b9afe3b22bf5d53908bfbe8935d2392d763147ea628
|
from setuptools import setup, find_packages
setup(
name = 'oacwellcome',
version = '1.0.0',
packages = find_packages(),
install_requires = [
"octopus==1.0.0",
"esprit",
"Flask==0.9",
"WTForms==2.0.1",
"flask_mail==0.9.1",
"newrelic",
"gunicorn==19.1.1"
],
url = 'http://cottagelabs.com/',
author = 'Cottage Labs',
author_email = 'us@cottagelabs.com',
description = 'Open Access Compliance for Wellcome Trust',
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: Copyheart',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
CottageLabs/oacwellcome
|
setup.py
|
Python
|
apache-2.0
| 843
|
[
"Octopus"
] |
7b729dd53a542ba6a66cdd16e6d90cc5c58049de8cbfa3321ff0163576b45926
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
from math import sqrt
import inspect
import new
import os
import tinctest
from tinctest import TINCTestCase
from tinctest import logger
@tinctest.skipLoading("Test model. No tests loaded.")
class PerformanceTestCase(TINCTestCase):
"""
This is an abstract class and cannot be instantiated directly.
PerformanceTestCase provides metadata and functionality that
is common to all performance tests
@metadata: repetitions: repeat the execution of the test this many times (default: 1)
@metadata: baseline: a file containing the baseline results (default: None)
@metadata: threshold: if a test performs worse than baseline by threshold percentage,
the test is considered failed (default: 5)
"""
def __init__(self, methodName="runTest"):
self.repetitions = None
self.threshold = None
self.baseline = None
self._orig_testMethodName = methodName
super(PerformanceTestCase, self).__init__(methodName)
def _infer_metadata(self):
super(PerformanceTestCase, self)._infer_metadata()
self.repetitions = int(self._metadata.get('repetitions', '1'))
self.threshold = int(self._metadata.get('threshold', '5'))
self.baseline = self._metadata.get('baseline', None)
def run(self, result=None):
"""
This implementation of run method will generate a new test method that will
run the actual test method 'self.repetitions' number of times and gather
performance stats for the test method such as avg , maxruntime etc
@type result: TINCTextTestResult
@param result: The result object to be used for this particular test instance.
"""
"""
# XXX - Should revisit this approach later. Directly cloning the
# method definition using lambda or new.instancemethod does not
# seem to work well which is what we are trying to do here.
# Currently, we are going with the approach of creating a new test method
# that calls the original test method repetitively and changing
# self._testMethodName to point to the newly created method.
# The bug here will be that the test names used in the result object
# will be the name of the newly created method.
# For PerformanceTestCase, we should run the test method for
# 'self.repetitions' number of times. So, we redefine the original
# test method to actually do it's work multiple times.
self._orig_testMethodName = 'orig_' + self._testMethodName
methd = getattr(self, self._testMethodName)
# The following crazy line of code creates a new instance method named
# 'self._orig_testMethodName' which has the same definition as the
# instance method 'self._testMethodName'.
orig_test_method = lambda self : self.__class__.__dict__[self._testMethodName](self)
orig_test_method.__name__ = 'orig_' + self._testMethodName
setattr(self.__class__, self._orig_testMethodName, orig_test_method)
"""
# For PerformanceTestCase, we should run the test method for
# 'self.repetitions' number of times. So, we create a new instance
# method that runs self._testMethodName the desired number of times
# and set self._testMethodName to the new method before calling super.run().
# Note - The test will be reported using the new method instead of the original
# test method. We will re-visit this later.
self._orig_testMethodName = self._testMethodName
def test_function(my_self):
orig_test_method = getattr(my_self, my_self._orig_testMethodName)
runtime_list = []
for i in range(my_self.repetitions):
# Get time before and after this function to time the test
start = datetime.now()
orig_test_method()
end = datetime.now()
delta = end - start
milli = delta.seconds * 1000 + (float(delta.microseconds) / 1000)
runtime_list.append(milli)
total_runtime = sum(runtime_list)
min_runtime = min(runtime_list)
max_runtime = max(runtime_list)
avg_runtime = total_runtime / my_self.repetitions
std_dev = sqrt(sum((runtime - avg_runtime)**2 for runtime in runtime_list) / my_self.repetitions)
std_dev_pct = std_dev * 100 / float(avg_runtime)
logger.info("%s - %s" % (my_self, runtime_list))
# Find the baseline file. For now, we assume that there is only
# one baseline version specified
current_dir = os.path.dirname(inspect.getfile(my_self.__class__))
baseline_file = 'baseline_' + my_self.baseline + '.csv'
baseline_file_path = os.path.join(current_dir, baseline_file)
(baseline_runtime, delta) = GPPerfDiff.check_perf_deviation(my_self._orig_testMethodName, \
baseline_file_path, avg_runtime, \
my_self.threshold)
# compose statistics
stats = [
('Test Name', "%s.%s" % (self.__class__.__name__, self._orig_testMethodName)),
('Average Runtime', "%0.2f" % avg_runtime),
('Baseline Runtime', "%0.2f" % baseline_runtime),
('% Difference', "%0.2f" % delta),
('Allowable Threshold', "%0.2f" % my_self.threshold),
('Repetitions Performed', "%d" % my_self.repetitions),
('Total Runtime', "%0.2f" % total_runtime),
('Min Runtime', "%0.2f" % min_runtime),
('Max Runtime', "%0.2f" % max_runtime),
('Std Dev', "%0.2f" % std_dev),
('% Std Dev', "%0.2f" % std_dev_pct)
]
header = [x[0] for x in stats]
data = [x[1] for x in stats]
# dump statistics to a runtime_stats.csv file
output_file_path = os.path.join(current_dir, 'runtime_stats.csv')
existing = os.path.exists(output_file_path)
mode = 'a' if existing else 'w'
with open(output_file_path, mode) as f:
if not existing:
f.write("%s\n" % ",".join(header))
f.write("%s\n" % ",".join(data))
self.assertGreater(my_self.threshold, delta, "assert delta < my_self.threshold")
test_method = new.instancemethod(test_function,
self, self.__class__)
self.__dict__[ self._testMethodName + "*"] = test_method
self._testMethodName = self._testMethodName + "*"
super(PerformanceTestCase, self).run(result)
class GPPerfDiff(object):
'''
Utility class for checking performance deviation for performance test cases.
'''
@staticmethod
def check_perf_deviation(test_name, baseline_file, current_runtime, threshold):
runtime = -1
with open(baseline_file, 'r') as f:
for line in f:
tokens = line.strip().split(',')
if len(tokens) != 2:
continue
if tokens[0] == test_name:
runtime = float(tokens[1])
break
if runtime == -1:
return (-1, 100)
delta = int(((current_runtime - runtime)/runtime) * 100)
return (runtime, delta)
|
edespino/gpdb
|
src/test/tinc/tinctest/models/perf/__init__.py
|
Python
|
apache-2.0
| 8,328
|
[
"VisIt"
] |
b92690b28503e47dd02dfd079af5e0bfef88b4dc8d38cce73e15a305c308efcb
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('seraphim.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^characters/', include('seraphim.characters.urls', namespace='characters')),
url(r'^groups/', include('seraphim.groups.urls', namespace='groups')),
url(r'^tracker/', include('seraphim.tracker.urls', namespace='tracker')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
malaclypse2/seraphim
|
config/urls.py
|
Python
|
mit
| 1,788
|
[
"VisIt"
] |
ff21b5fdafbfa87ea081972bf9bcaec032d4a130d60b0879894cd729df861e0c
|
#!/usr/bin/env python
from rdkit import Chem, rdBase
from rdkit.Chem import rdDepictor
from rdkit.Chem import Draw
from rdkit.Chem import AllChem
from rdkit.Chem.Draw import rdMolDraw2D
from json import dumps
COLS = [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0),
(0.0, 0.0, 1.0), (1.0, 0.55, 1.0)]
def get_hit_atoms_and_bonds(mol, smt):
alist = []
blist = []
q = Chem.MolFromSmarts(smt)
for match in mol.GetSubstructMatches(q):
alist.extend(match)
for ha1 in alist:
for ha2 in alist:
if ha1 > ha2:
b = mol.GetBondBetweenAtoms(ha1, ha2)
if b:
blist.append(b.GetIdx())
return alist, blist
def add_colours_to_map(els, cols, col_num):
for el in els:
if el not in cols:
cols[el] = []
if COLS[col_num] not in cols[el]:
cols[el].append(COLS[col_num])
def do_a_picture(smi, smarts, filename, label, fmt='svg'):
rdDepictor.SetPreferCoordGen(True)
mol = Chem.MolFromSmiles(smi)
mol = Draw.PrepareMolForDrawing(mol)
acols = {}
bcols = {}
h_rads = {}
h_lw_mult = {}
for i, smt in enumerate(smarts):
alist, blist = get_hit_atoms_and_bonds(mol, smt)
col = i % 4
add_colours_to_map(alist, acols, col)
add_colours_to_map(blist, bcols, col)
if fmt == 'svg':
d = rdMolDraw2D.MolDraw2DSVG(300, 300)
mode = 'w'
elif fmt == 'png':
d = rdMolDraw2D.MolDraw2DCairo(300, 300)
mode = 'wb'
else:
print('unknown format {}'.format(fmt))
return
d.drawOptions().fillHighlights = False
d.DrawMoleculeWithHighlights(mol, label, acols, bcols, h_rads, h_lw_mult, -1)
d.FinishDrawing()
with open(filename, mode) as f:
f.write(d.GetDrawingText())
smi = 'CO[C@@H](O)C1=C(O[C@H](F)Cl)C(C#N)=C1ONNC[NH3+]'
smarts = ['CONN', 'N#CC~CO', 'C=CON', 'CONNCN']
do_a_picture(smi, smarts, 'atom_highlights_3.png', '', fmt='png')
|
rdkit/rdkit
|
Docs/Book/data/test_multi_colours.py
|
Python
|
bsd-3-clause
| 2,011
|
[
"RDKit"
] |
6132fc2ada517af25dfdd17278029080183ad9b757f6104f30c2805d6680fa0f
|
# though there are other node modules in the image, they are all owned by the npm APK package, which means engine will ignore them
pkgs = {
"/node_modules/lodash/package.json": {
"name": "lodash",
"lics": ["MIT"],
"versions": ["4.17.4"],
"latest": "4.17.4",
"origins": [
"John-David Dalton <john.david.dalton@gmail.com> (http://allyoucanleet.com/)"
],
"sourcepkg": "git+https://github.com/lodash/lodash.git",
},
}
|
anchore/anchore-engine
|
tests/functional/clients/standalone/package_list/fixtures/npms.py
|
Python
|
apache-2.0
| 491
|
[
"Dalton"
] |
fad578023495feadd5f0fadd721a357bbb819efa8c4612bf958457e48e1577fa
|
#!/usr/bin/env python
from cp2k_tools.tools import *
from cp2k_tools.parser import *
from cp2k_tools.generator import *
import sys
from docopt import docopt
def extract_last_frame():
"""Usage: extract_last_frame.py [-h] [XYZINPUT] [XYZOUTPUT]
Extract the last frame from a XYZ file
Arguments:
XYZINPUT the XYZ file to read (otherwise stdin)
XYZOUTPUT the XYZ file to write (otherwise stdout)
Options:
-h --help
"""
arguments = docopt(extract_last_frame.__doc__)
p = XYZParser()
g = XYZGenerator()
with smart_open(arguments['XYZINPUT'], 'r') as source:
with smart_open(arguments['XYZOUTPUT'], 'w') as dest:
g.write([p.parse(source)[-1]], dest)
def generate_inputs():
"""Usage: generate_inputs.py [-h] single TEMPLATE SNIPPET [OUTPUT] [COORDS]
generate_inputs.py [-h] batch TEMPLATE SNIPPETDIR OUTPUTDIR
Use the configuration in SNIPPET/SNIPPETDIR to generate cp2k input files
based on the template.
Arguments:
TEMPLATE the template to use for generating the input files
SNIPPET a single json file
SNIPPETDIR a directory with json files
OUTPUT the output file (otherwise standard output)
COORDS the coordinates output file (otherwise standard output)
OUTPUTDIR where to create the project directories
Options:
-h --help
"""
arguments = docopt(generate_inputs.__doc__)
generator = CP2KInputGenerator()
import os
import json
from glob import glob
with open(arguments['TEMPLATE'], 'r') as f:
generator.load_template(f.read())
if arguments['single']:
with open(arguments['SNIPPET'], 'r') as f:
generator.load_config(json.load(f))
with smart_open(arguments['OUTPUT']) as f:
generator.write_input(f)
with smart_open(arguments['COORDS']) as f:
generator.write_coords(f)
sys.exit(0)
for snippet in glob(os.path.join(arguments['SNIPPETDIR'], '*.json')):
config = '{}'
with open(snippet, 'r') as f:
config = json.load(f)
generator.load_config(config)
target_dir = os.path.join(arguments['OUTPUTDIR'],
config['global']['project'])
target_filename = '%s.inp' % os.path.basename(snippet)[:-5]
try:
os.mkdir(target_dir)
except FileExistsError:
pass # ignore if directory already exists
target_config_path = os.path.join(target_dir, target_filename)
target_coord_path = os.path.join(target_dir, 'initial_coords.xyz')
with open(target_config_path, 'w') as f:
generator.write_input(f)
with open(target_coord_path, 'w') as f:
generator.write_coords(f)
print('generating configuration for %s (in %s)' %
(config['global']['project'], target_dir))
def cp2kparse():
"""Usage: cp2kparse.py [-hj] [-f FILE]
Parse cp2k output.
Options:
-h --help
-f --file=FILE cp2k output file to read [default: -]
-j --json produce JSON output instead of pretty printed python objects
"""
arguments = docopt(cp2kparse.__doc__)
p = CP2KOutputBlockParser()
with smart_open(arguments['--file'], 'r') as fh:
out = p.parse(fh.read())
if arguments['--json']:
import json
print(json.dumps(out))
else:
import pprint
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(out)
def oq():
"""Usage: oq.py [-hj] [-f FILE] QUERY
Extract data from cp2k output. The syntax is similar to that of the jq tool.
Options:
-h --help
-f --file=FILE cp2k output file to read [default: -]
-j --json produce JSON output instead of pretty printed python objects
"""
arguments = docopt(oq.__doc__)
p = CP2KOutputParser()
with smart_open(arguments['--file'], 'r') as fh:
p.parse(fh)
if arguments['--json']:
import json
print(json.dumps(p.query(arguments['QUERY'])))
else:
import pprint
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(p.query(arguments['QUERY']))
if __name__ == '__main__':
sys.argv = sys.argv[1:]
import cp2k_tools.cli
getattr(cp2k_tools.cli, sys.argv[0])()
|
dev-zero/cp2k-tools
|
cp2k_tools/cli.py
|
Python
|
apache-2.0
| 4,441
|
[
"CP2K"
] |
bc3302db1df4f0a3dc2d3b2185dae547dba479b28b387dd81f063def4d315027
|
"""Tumor only somatic calling with Pisces.
https://github.com/Illumina/Pisces
"""
import os
import shutil
import pysam
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import bedutils, ploidy, vcfutils
def run(align_bams, items, ref_file, assoc_files, region=None, out_file=None):
"""Run tumor only pisces calling
Handles bgzipping output file and fixing VCF sample naming to match BAM sample.
"""
paired = vcfutils.get_paired_bams(align_bams, items)
assert paired and not paired.normal_bam, ("Pisces supports tumor-only variant calling: %s" %
(",".join([dd.get_sample_name(d) for d in items])))
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
min_af = float(dd.get_min_allele_fraction(paired.tumor_data)) / 100.0
if not utils.file_exists(out_file):
base_out_name = utils.splitext_plus(os.path.basename(paired.tumor_bam))[0]
raw_file = "%s.vcf" % utils.splitext_plus(out_file)[0]
with file_transaction(paired.tumor_data, raw_file) as tx_out_file:
ref_dir = _prep_genome(os.path.dirname(tx_out_file), paired.tumor_data)
out_dir = os.path.dirname(tx_out_file)
cores = dd.get_num_cores(paired.tumor_data)
emit_min_af = min_af / 10.0
cmd = ("pisces --bampaths {paired.tumor_bam} --genomepaths {ref_dir} --intervalpaths {target} "
"--maxthreads {cores} --minvf {emit_min_af} --vffilter {min_af} "
"--ploidy somatic --gvcf false -o {out_dir}")
# Recommended filtering for low frequency indels
# https://github.com/bcbio/bcbio-nextgen/commit/49d0cbb1f6dcbea629c63749e2f9813bd06dcee3#commitcomment-29765373
cmd += " -RMxNFilter 5,9,0.35"
# For low frequency UMI tagged variants, set higher variant thresholds
# https://github.com/Illumina/Pisces/issues/14#issuecomment-399756862
if min_af < (1.0 / 100.0):
cmd += " --minbasecallquality 30"
do.run(cmd.format(**locals()), "Pisces tumor-only somatic calling")
shutil.move(os.path.join(out_dir, "%s.vcf" % base_out_name),
tx_out_file)
vcfutils.bgzip_and_index(raw_file, paired.tumor_data["config"],
prep_cmd="sed 's#%s.bam#%s#' | %s" %
(base_out_name, dd.get_sample_name(paired.tumor_data),
vcfutils.add_contig_to_header_cl(dd.get_ref_file(paired.tumor_data), out_file)))
return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"])
def _prep_genome(out_dir, data):
"""Create prepped reference directory for pisces.
Requires a custom GenomeSize.xml file present.
"""
genome_name = utils.splitext_plus(os.path.basename(dd.get_ref_file(data)))[0]
out_dir = utils.safe_makedir(os.path.join(out_dir, genome_name))
ref_file = dd.get_ref_file(data)
utils.symlink_plus(ref_file, os.path.join(out_dir, os.path.basename(ref_file)))
with open(os.path.join(out_dir, "GenomeSize.xml"), "w") as out_handle:
out_handle.write('<sequenceSizes genomeName="%s">' % genome_name)
for c in pysam.AlignmentFile("%s.dict" % utils.splitext_plus(ref_file)[0]).header["SQ"]:
cur_ploidy = ploidy.get_ploidy([data], region=[c["SN"]])
out_handle.write('<chromosome fileName="%s" contigName="%s" totalBases="%s" knownBases="%s" '
'isCircular="false" ploidy="%s" md5="%s"/>' %
(os.path.basename(ref_file), c["SN"], c["LN"], c["LN"], cur_ploidy, c["M5"]))
out_handle.write('</sequenceSizes>')
return out_dir
|
vladsaveliev/bcbio-nextgen
|
bcbio/variation/pisces.py
|
Python
|
mit
| 4,024
|
[
"pysam"
] |
a84618a0cd7db5cdb63c1621194a90dd6296d5d2d20abd985dead8fe3ab23a35
|
# File: testing.py
# Author: Oliver Steele
# Description: Script compiler testing framework
# * P_LZ_COPYRIGHT_BEGIN ******************************************************
# * Copyright 2001-2004 Laszlo Systems, Inc. All Rights Reserved. *
# * Use is subject to license terms. *
# * P_LZ_COPYRIGHT_END ********************************************************
# Many of these functions have undescriptive, one- or two-letter
# synonyms, because they're intended to be used from the console, not
# as part of an API.
# Additional documentation is in README.TXT
from __future__ import nested_scopes
import os
false, true = 0, 1
undefined = []
CallNo = 0
#
# Interactive Testing
#
def parse(str, type=None):
reloadSystem()
from Compiler import Parser
from org.openlaszlo.sc.parser import ParseException
try:
return Parser().parse(str, *(type and (type,) or ()))
except ParseException, e:
raise `e`
def pparse(str, type=None):
"""Pretty-prints the parsed string. No return value."""
parse(str, type).dump('')
def rt(str, type=None):
"""Pretty-prints the parsed string. No return value."""
reloadSystem()
from Compiler import ParseTreePrinter
ParseTreePrinter().print(parse(str, type))
def parse0(str, type=None):
reloadSystem()
from Compiler import Parser
from org.openlaszlo.sc.parser import ParseException
try:
return Parser().parse0(str, *(type and (type,) or ()))
except ParseException, e:
raise `e`
def pparse0(str, type=None):
"""Pretty-prints the parsed string. No return value."""
parse0(str, type).dump('')
def compile(s=None, **options):
# Update arguments from the argument cache, and vice versa
global SavedTest, SavedOptions, CallNo
if s is None:
try:
s = SavedTest
except NameError:
raise "The first call to compile must specify a source string."
options_ = globals().get('SavedOptions', {})
options_.update(options)
options = options_
if s:
SavedTest = s
if options:
SavedOptions = options
reloadSystem()
from Compiler import Compiler
c = Compiler(**options)
from org.openlaszlo.sc.parser import ParseException
try:
CallNo += 1
#bytes = c.compile(('#file "interactive input %d"\n#line 1\n' % CallNo) + s)
bytes = c.compile(s)
except ParseException, e:
raise `e`
writeMovie(bytes, 'test.swf')
def compileFile(fname, **options):
class Resolver:
def __init__(self, base):
self.base = base
def resolve(self, pathname):
import os
return os.path.join(os.path.split(self.base)[0], pathname)
options_ = {}; options_.update(options)
options['resolver'] = Resolver(fname)
compile(open(fname).read(), **options)
def compileConstraint(s=None, **options):
compile('var f = function (){\n#pragma "constraintFunction"\n%s}' % s, **options)
def writeMovie(bytes, fname):
from org.openlaszlo.iv.flash.api import FlashFile, Script
from org.openlaszlo.iv.flash.api.action import DoAction, Program
file = FlashFile.newFlashFile()
file.version = 5
file.mainScript = Script(1)
frame = file.mainScript.newFrame()
program = Program(bytes, 0, len(bytes))
frame.addFlashObject(DoAction(program))
istr = file.generate().inputStream
from jarray import zeros
bytes = zeros(istr.available(), 'b')
istr.read(bytes)
from java.io import FileOutputStream
ostr = FileOutputStream(fname)
try:
ostr.write(bytes)
finally:
ostr.close()
return
c = compile
cf = compileFile
#
# Timings
#
def time(s=None, **options):
reloadSystem() # force reload outside the timer
import time
t = time.time()
compile(s, **options)
return time.time() - t
def timings():
reloadSystem() # force reload outside the timer
baseline = time(doubleParse=0)
lps = len(SavedTest.split('\n'))/baseline
print '%2.2f lines/second (including comments)' % lps
for k in 'parse generate assemble'.split():
t = time(**{'double' + k.capitalize(): 1}) - baseline
print '%s\t%2.2fs\t%2.2f%%' % \
(k, t, 100 * t / baseline)
print 'total\t%2.2fs\t100%%' % baseline
def profile():
import profile, testing
profile.run('testing.compile()', 'profile.txt')
import pstats
p = pstats.Stats('profile.txt')
#p.strip_dirs().sort_stats('cumulative').print_stats(20)
p.strip_dirs().sort_stats('time').print_stats(20)
#
# Running Tests
#
TestKeys = None # Default test suite to run
TestIndex = None # Default test, if TestKeys is a singleton
def reloadSystem():
"""Reload changed modules."""
import parseinstructions, Compiler, tests
consequences = []
for m in parseinstructions, Compiler, tests:
import os
mt = getattr(m, '__time__', 0)
ft = os.path.getmtime(m.__file__)
if m in consequences or ft > mt:
print m
reload(m)
m.__time__ = ft
if m == parseinstructions:
consequences += [Compiler]
def collectTests(keys=undefined, index=undefined, flashOnly=false,
saveArguments=false):
# synch the args with the cache of previous values
if saveArguments:
global TestKeys, TestIndex
if keys is undefined: keys = TestKeys
if index is undefined: index = TestIndex
TestKeys = keys
TestIndex = index
# lift singletons to lists
if type(keys) == type(""):
keys = [keys]
# if keys is blank, run all the tests
if not keys:
if flashOnly:
keys = FlashTests
else:
keys = PassedTests
if len(keys) == 1 and index is not None:
tests = [Tests[keys[0]][index]]
else:
tests = []
for key in keys:
#tests += ['\n// ' + key]
tests += Tests[key]
# filter out tests that the Flash compiler can't run
if flashOnly:
tests = [test for test in tests if test not in FlashOmittedTestCases]
return tests
def runConstraintTests():
reloadSystem()
from tests import ConstraintTests
from Compiler import nodeString, ReferenceCollector
for test, expected in ConstraintTests:
rc = ReferenceCollector()
rc.visit(parse(test))
result = nodeString(rc.computeReferences('test'))
# print '(%r, %r),' % (test, result)
if result != expected:
raise '%r: expected %r; got %r' % (test, expected, result)
from tests import MetaConstraintTests
for test, expected in MetaConstraintTests:
rc = ReferenceCollector(true)
rc.visit(parse(test))
result = nodeString(rc.computeReferences('test'))
# print '(%r, %r),' % (test, result)
if result != expected:
raise '%r: expected %r; got %r' % (test, expected, result)
def test(keys=undefined, index=undefined, **options):
"""Run the tests."""
reloadSystem()
tests = collectTests(keys, index, saveArguments=true)
from Compiler import Compiler
runConstraintTests()
for test in tests:
c = Compiler(**options)
try:
from jarray import array
bytes = c.compile(test)
array(bytes, 'b')
except:
print "During compilation of %r" % test
raise
def write():
"""Write the test source to tests.as, and the swf to sc.swf.
The intent is that Flash compiles tests.as -> flash.swf,
and the flasms of sc.swf and flash.swf can be compared."""
tests = collectTests(None, flashOnly=true)
reloadSystem()
# put the functions first, since Flash will compile them first
def isFn(s): return s.startswith('function')
def insertFunctionComment(s):
pos = s.find('{')
return s[:pos+1] + 'src = %r;' %s + s[pos+1:]
tests = [insertFunctionComment(s) for s in tests if isFn(s)] + \
['src = %r\n%s' % (s,s)
for s in tests if not isFn(s)]
text = '\n'.join(tests) + '\n'
f = open('tests.as', 'w')
f.write(text)
f.close()
from Compiler import Compiler
c = Compiler(flashCompilerCompatability=true)
bytes = c.compile(text)
writeMovie(bytes, 'sc.swf')
RegressionOptions = {'createActivationObject': 0}
def establishRegressionBaseline():
reloadSystem()
tests = collectTests()
text = '\n'.join(tests)
from Compiler import Compiler
writeMovie(Compiler(**RegressionOptions).compile(text), 'regression-baseline.swf')
os.system('flasm -d regression.swf > regression-baseline.flasm')
def runRegressionTests():
reloadSystem()
tests = collectTests()
text = '\n'.join(tests)
from Compiler import Compiler
writeMovie(Compiler(**RegressionOptions).compile(text), 'regression.swf')
os.system('flasm -d regression.swf > regression.flasm')
os.system('diff regression-baseline.flasm regression.flasm')
def testStaticCoverage():
import Compiler
Compiler.testStaticCoverage()
rl = reloadSystem
t = test
w = write
#
# Defining Tests
#
def resetTests():
global Tests, PassedTests, FlashTests, FlashOmittedTestCases
Tests = {}
PassedTests = []
FlashTests = []
FlashOmittedTestCases = []
def DefineTests(name, tests, passed=true, flash=true):
Tests[name] = tests
if passed:
PassedTests.append(name)
if flash:
FlashTests.append(name)
def wrap(w, s, options):
"""Returns w % s. Options are parsed and acted upon. If s is a
tuple, its first element is the string, and the second element is
an options table that should override the options argument."""
if type(s) == type(()):
s, u = s
o = {}
o.update(options)
o.update(u)
options = o
s = w % s
def parseOptions(flash=true):
if not flash:
FlashOmittedTestCases.append(s)
parseOptions(**options)
return s
def Expr(s, **options):
"""Wrap the expressions in an assignment statement."""
return wrap('a = %s', s, options)
def Stmt(s, **options):
"""Wrap the statements in a function body."""
return wrap('function f() {%s}', s, options)
def Exprs(l, **options):
return [Expr(s, **options) for s in l]
def Stmts(l, **options):
return [Stmt(s, **options) for s in l]
if __name__ == '__main__':
runRegressionTests()
|
mcarlson/openlaszlo
|
WEB-INF/lps/server/sc/testing.py
|
Python
|
epl-1.0
| 10,503
|
[
"VisIt"
] |
e7820b0b137d5e184a00b32003d8618ee4005937b0c2de96d5121d3561c0a783
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""A Relay implementation of graph packing."""
from tvm import relay
from tvm.relay import op, transform
from tvm.relay import ExprMutator
def run_opt_pass(expr, opt_pass):
"""Exectue a relay pass."""
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def _to_shape(shape):
return tuple(int(sh) for sh in shape)
def _pack_batch_channel(data, dshape, bfactor, cfactor):
"""Pack the data channel dimension.
"""
assert int(dshape[0]) % bfactor == 0
assert int(dshape[1]) % cfactor == 0
data = op.reshape(data,
newshape=(int(dshape[0]) // bfactor, bfactor,
int(dshape[1]) // cfactor, cfactor,
int(dshape[2]), int(dshape[3])))
data = op.transpose(
data, axes=(0, 2, 4, 5, 1, 3))
return data
def _unpack_batch_channel(data, old_shape):
"""Unpack the data channel dimension.
"""
data = op.transpose(data, axes=(0, 4, 1, 5, 2, 3))
data = op.reshape(data, newshape=old_shape)
return data
def _pack_weight(data, dshape, cfactor):
"""Pack the weight into packed format.
"""
assert len(dshape) == 4
assert int(dshape[0]) % cfactor == 0
assert int(dshape[1]) % cfactor == 0
data = op.reshape(data,
newshape=(int(dshape[0]) // cfactor, cfactor,
int(dshape[1]) // cfactor, cfactor,
int(dshape[2]), int(dshape[3])))
data = op.transpose(
data, axes=(0, 2, 4, 5, 1, 3))
return data
def _pack_weight_conv2d_transpose(data, dshape, cfactor):
"""Pack the weight into packed format.
"""
dshape = _to_shape(dshape)
assert len(dshape) == 4
assert dshape[0] % cfactor == 0
assert dshape[1] % cfactor == 0
data = op.reshape(data,
newshape=(dshape[0] // cfactor, cfactor,
dshape[1] // cfactor, cfactor,
dshape[2], dshape[3]))
data = op.transpose(
data, axes=(2, 0, 4, 5, 3, 1))
return data
def _pack_const(data, dshape, dtype, bfactor, cfactor):
"""Pack a constant parameter.
"""
dshape = _to_shape(dshape)
assert len(dshape) == 3
assert dshape[0] % cfactor == 0
data = op.reshape(data,
newshape=(dshape[0] // cfactor,
cfactor, dshape[1],
dshape[2], 1))
data = op.transpose(
data, axes=(0, 2, 3, 4, 1))
# broadcast batch dimension to bfactor
data = op.broadcast_to(
data,
shape=(dshape[0] // cfactor, dshape[1], dshape[2], bfactor, cfactor))
return data
def _get_shape(node):
"""Get the shape of a node.
"""
return _to_shape(node.checked_type.shape)
class ExprPack(ExprMutator):
"""Visitor to perform graph packing on an AST.
"""
def __init__(self, bfactor, cfactor, weight_bits):
self.bfactor = bfactor
self.cfactor = cfactor
self.weight_bits = weight_bits
self.start_pack = False
# Cache Operator the algorithm matches against.
self.bitpack_start = op.op.get('annotation.bitpack_start')
self.bitpack_end = op.op.get('annotation.bitpack_end')
self.conv2d = op.op.get("nn.conv2d")
self.conv2d_transpose = op.op.get("nn.conv2d_transpose")
self.add = op.op.get("add")
self.multiply = op.op.get("multiply")
self.bias_add = op.op.get("nn.bias_add")
self.number_of_conv2d = 0
super().__init__()
def visit_call(self, call):
""" Visit the children. """
# First visit the children.
oshape = _get_shape(call)
odtype = call.checked_type.dtype
input_types = [arg.checked_type for arg in call.args]
args = [self.visit(arg) for arg in call.args]
# Start and stop cases.
if call.op == self.bitpack_start:
assert not self.start_pack
self.start_pack = True
return _pack_batch_channel(args[0], oshape, self.bfactor, self.cfactor)
elif call.op == self.bitpack_end:
if self.start_pack:
self.start_pack = False
data = args[0]
data_shape = _get_shape(call.args[0])
return _unpack_batch_channel(data, data_shape)
else:
pass
if self.start_pack:
# Operator cases
if call.op == self.conv2d and odtype == 'int32':
self.number_of_conv2d += 1
assert 8 % self.weight_bits == 0
w_lanes = 8 // self.weight_bits
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
kernel_layout = "OIHW%do%di" % (self.cfactor, self.cfactor)
data, weight = args
data_shape = _to_shape(input_types[0].shape)
kernel_shape = _to_shape(input_types[1].shape)
kernel = _pack_weight(weight, kernel_shape, self.cfactor)
# insert bit packing when necessary
if w_lanes != 1:
assert 8 % w_lanes == 0
kernel = op.bitpack(kernel, lanes=w_lanes)
conv2d = op.nn.conv2d(
data,
kernel,
strides=call.attrs.strides,
padding=call.attrs.padding,
dilation=call.attrs.dilation,
groups=call.attrs.groups,
channels=call.attrs.channels,
kernel_size=call.attrs.kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype=call.attrs.out_dtype)
return conv2d
elif call.op == self.conv2d_transpose and odtype == 'int32':
self.number_of_conv2d += 1
assert 8 % self.weight_bits == 0
w_lanes = 8 // self.weight_bits
if self.start_pack:
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
kernel_layout = "IOHW%di%do" % (self.cfactor, self.cfactor)
data, weight = args
data_shape = _to_shape(input_types[0].shape)
kernel_shape = _to_shape(input_types[1].shape)
kernel = _pack_weight_conv2d_transpose(weight, kernel_shape, self.cfactor)
conv2d = op.nn.conv2d_transpose(
data,
kernel,
strides=call.attrs.strides,
padding=call.attrs.padding,
dilation=call.attrs.dilation,
groups=call.attrs.groups,
channels=call.attrs.channels,
kernel_size=call.attrs.kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
output_padding=call.attrs.output_padding,
out_dtype=call.attrs.out_dtype)
return conv2d
elif call.op == self.add and \
tuple(input_types[0].shape) == tuple(input_types[1].shape):
pass
elif call.op == self.add and len(input_types[1].shape) == 3:
data, const = args
const = _pack_const(const,
_to_shape(input_types[1].shape),
input_types[1].dtype,
self.bfactor,
self.cfactor)
return relay.Call(self.add, [data, const])
elif call.op == self.multiply and \
tuple(input_types[0].shape) == tuple(input_types[1].shape):
pass
elif call.op == self.multiply and len(input_types[1].shape) == 3:
data, const = args
const = _pack_const(const,
_to_shape(input_types[1].shape),
input_types[1].dtype,
self.bfactor,
self.cfactor)
return relay.Call(self.multiply, [data, const])
elif self.start_pack and call.op == self.bias_add:
data, bias = args
bias = _pack_const(bias,
_to_shape(input_types[1].shape),
input_types[1].dtype,
self.bfactor,
self.cfactor)
return relay.Call(self.add, [data, bias])
elif self.start_pack and call.op == op.op.get('cast') and \
input_types[0].dtype == 'int32':
cast = relay.Call(op.op.get('cast'), [args[0]], call.attrs)
return relay.Call(op.op.get('copy'), [cast])
return relay.Call(
self.visit(call.op),
args,
call.attrs)
class BT(Exception):
pass
def get_subgraph(expr, start_name, stop_name):
""" We assume stop_name only appears once for simplicity.
This constraint will be lifted in the future.
bitpack_start and bitpack_end are both inclusive.
"""
bitpack_start = op.op.get('annotation.bitpack_start')
bitpack_end = op.op.get('annotation.bitpack_end')
anf = run_opt_pass(expr, transform.ToANormalForm())
def _recursion(anf, start_found, stop_found):
""" Helper to obtain the subgraph.
"""
if isinstance(anf, relay.expr.Function):
return relay.expr.Function(anf.params,
_recursion(anf.body, start_found, stop_found),
anf.ret_type, anf.type_params, anf.attrs)
elif isinstance(anf, relay.expr.Let):
value = anf.value
if isinstance(value, relay.expr.Call):
if isinstance(value.op, relay.op.Op):
if value.op.name == start_name and not start_found:
value = relay.expr.Call(bitpack_start, [value])
start_found = True
elif value.op.name == stop_name:
raise BT()
try:
return relay.expr.Let(anf.var, value, _recursion(anf.body, start_found, stop_found))
except BT:
assert start_found
assert not stop_found
stop_found = True
value = relay.expr.Call(bitpack_end, [value])
# todo: check anf.body has no more stop_name beside that one
return relay.expr.Let(anf.var, value, anf.body)
else:
assert start_found
assert stop_found
return anf
annotated = _recursion(anf, False, False)
return run_opt_pass(annotated, transform.ToGraphNormalForm())
def graph_pack(expr,
bfactor,
cfactor,
weight_bits,
start_name="nn.max_pool2d",
stop_name="nn.global_avg_pool2d"):
"""Pack the graph into batch&channel packed format.
Parameters
----------
expr : relay.Expr
The input program.
bfactor : int
The packing factor in batch
cfactor : int
The packing factor in channel
weight_bits: int
The bit-width of the weights.
start_name: str, optional
Start packing from certain known node.
stop_name: str, optional
Stop packing from certain known node.
Returns
-------
expr : Expr
The transformed expression.
"""
assert isinstance(expr, relay.Function)
expr = get_subgraph(expr, start_name, stop_name)
expr = run_opt_pass(expr, transform.InferType())
packer = ExprPack(
bfactor, cfactor,
weight_bits)
expr = packer.visit(expr)
assert not packer.start_pack
return run_opt_pass(expr, transform.InferType())
|
Huyuwei/tvm
|
vta/python/vta/top/graphpack.py
|
Python
|
apache-2.0
| 13,086
|
[
"VisIt"
] |
6a41c8c0749d366dc65871f4e18647ece6479a20474789bd7ce4b185ff43bf99
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from camelot.container import Container
class FigureContainer( Container ):
"""A container that is able to plot itself on a matplotlib figure canvas.
Its 'plot_on_figure' method will be called in the gui thread to fill the figure
canvas.
One figure canvas can contain multiple axes (=sub plots)
"""
def __init__(self, axes):
"""
:param axes: a list of AxesContainer objects representing all the subplots, in
the form of ::
[[ax1, ax2],
[ax3, ax4]]
"""
self.axes = axes
def plot_on_figure(self, fig):
"""Draw all axes (sub plots) on a figure canvas"""
fig.clear()
if self.axes:
rows = len(self.axes)
cols = len(self.axes[0])
for i,row in enumerate(self.axes):
for j,subplot in enumerate(row):
n = i*cols + j
ax = fig.add_subplot( rows, cols, n+1 )
ax.clear()
subplot.plot_on_axes( ax )
class AxesMethod(object):
"""Helper class to substitute a method on an Axes object and
record its calls"""
def __init__(self, method_name, commands):
"""
:param method_name: the name of the method for which this object is a substitute
:param commands: a list in which to store invocations of the method
"""
self._method_name = method_name
self._commands = commands
def __call__(self, *args, **kwargs):
"""record a call the the substitute method into the commands list"""
self._commands.append( (self._method_name, args, kwargs) )
class AxesContainer( Container ):
"""A container that is able to generate a plot on a matplotlib axes. Methods
can be called on this class as if it were a matplotlib Axes class. All method
calls will be recorded. Of course the methods won't return matplotlib objects.
The set_auto_legend method can be used to turn legens on without the need for
matplotlib objects.
"""
def __init__(self):
"""
:param legend: True or False, to put a legend on the chart
"""
super(AxesContainer, self).__init__()
# store all the method calls that need to be called on a
# matplotlib axes object in a list
self._commands = list()
self._auto_legend = False
def __getattr__(self, attribute_name):
"""Suppose the caller wants to call a function on a matplotlib
axes object"""
return AxesMethod( attribute_name, self._commands )
def set_auto_legend(self, auto_legend):
"""Specify if the container should try to put a legend on the
plot.
:param auto_legend: True or False
"""
self._auto_legend = auto_legend
def plot_on_axes(self, ax):
"""Replay the list of stored commands to the real Axes object"""
for name, args, kwargs in self._commands:
getattr(ax, name)(*args, **kwargs)
if self._auto_legend:
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
class PlotContainer( AxesContainer ):
# this line drives pylint crazy because it need axes to be imported
#__doc__ = axes.Axes.plot.__doc__
def __init__(self, *args, **kwargs):
""":param *args, **kwargs: the arguments to be passed to the matplotlib plot command"""
super(PlotContainer, self).__init__()
self.plot( *args, **kwargs )
class BarContainer( AxesContainer ):
# this line drives pylint crazy because it need axes to be imported
#__doc__ = axes.Axes.bar.__doc__
def __init__(self, *args, **kwargs):
""":param *args, **kwargs: the arguments to be passed to the matplotlib bar command"""
super(BarContainer, self).__init__()
self.bar( *args, **kwargs )
def structure_to_figure_container( structure ):
"""Convert a structure to a figure container, if the structure
is an instance of a FigureContainer, return as is.
If the structure is an instance of an AxesContainer, return a
FigureContainer with a single Axes.
If the structure is a list, use the structure as a constructor
argument for the FigureContainer
"""
if isinstance(structure, FigureContainer):
return structure
if isinstance(structure, AxesContainer):
return FigureContainer( [[structure]] )
if isinstance(structure, (list, tuple)):
return FigureContainer( structure )
|
kurtraschke/camelot
|
camelot/container/chartcontainer.py
|
Python
|
gpl-2.0
| 5,735
|
[
"VisIt"
] |
657b93bc0992252de97698aa303e97f5518d3281ea4390dfe47b657fe557d542
|
class MooseException(Exception):
"""
An Exception for MOOSE python applications.
"""
def __init__(self, *args):
message = ' '.join([str(x) for x in args])
Exception.__init__(self, message)
|
liuwenf/moose
|
python/mooseutils/MooseException.py
|
Python
|
lgpl-2.1
| 222
|
[
"MOOSE"
] |
ba8b8f0c889c19de9990962761fb729173bc734651b9a05ad8cc3b3bddf5e546
|
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
from __future__ import nested_scopes
ident = '$Id: Types.py,v 1.19 2005/02/22 04:29:43 warnes Exp $'
from version import __version__
import UserList
import base64
import cgi
import urllib
import copy
import re
import time
from types import *
# SOAPpy modules
from Errors import *
from NS import NS
from Utilities import encodeHexString, cleanDate
from Config import Config
###############################################################################
# Utility functions
###############################################################################
def isPrivate(name): return name[0]=='_'
def isPublic(name): return name[0]!='_'
###############################################################################
# Types and Wrappers
###############################################################################
class anyType:
_validURIs = (NS.XSD, NS.XSD2, NS.XSD3, NS.ENC)
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == anyType:
raise Error, "anyType can't be instantiated directly"
if type(name) in (ListType, TupleType):
self._ns, self._name = name
else:
self._ns = self._validURIs[0]
self._name = name
self._typed = typed
self._attrs = {}
self._cache = None
self._type = self._typeName()
self._data = self._checkValueSpace(data)
if attrs != None:
self._setAttrs(attrs)
def __str__(self):
if hasattr(self,'_name') and self._name:
return "<%s %s at %d>" % (self.__class__, self._name, id(self))
return "<%s at %d>" % (self.__class__, id(self))
__repr__ = __str__
def _checkValueSpace(self, data):
return data
def _marshalData(self):
if isinstance(self._data, basestring): return self._data
return str(self._data)
def _marshalAttrs(self, ns_map, builder):
a = ''
for attr, value in self._attrs.items():
ns, n = builder.genns(ns_map, attr[0])
a += n + ' %s%s="%s"' % \
(ns, attr[1], cgi.escape(str(value), 1))
return a
def _fixAttr(self, attr):
if type(attr) in (StringType, UnicodeType):
attr = (None, attr)
elif type(attr) == ListType:
attr = tuple(attr)
elif type(attr) != TupleType:
raise AttributeError, "invalid attribute type"
if len(attr) != 2:
raise AttributeError, "invalid attribute length"
if type(attr[0]) not in (NoneType, StringType, UnicodeType):
raise AttributeError, "invalid attribute namespace URI type"
return attr
def _getAttr(self, attr):
attr = self._fixAttr(attr)
try:
return self._attrs[attr]
except:
return None
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if type(value) is StringType:
value = unicode(value)
self._attrs[attr] = value
def _setAttrs(self, attrs):
if type(attrs) in (ListType, TupleType):
for i in range(0, len(attrs), 2):
self._setAttr(attrs[i], attrs[i + 1])
return
if type(attrs) == DictType:
d = attrs
elif isinstance(attrs, anyType):
d = attrs._attrs
else:
raise AttributeError, "invalid attribute type"
for attr, value in d.items():
self._setAttr(attr, value)
def _setMustUnderstand(self, val):
self._setAttr((NS.ENV, "mustUnderstand"), val)
def _getMustUnderstand(self):
return self._getAttr((NS.ENV, "mustUnderstand"))
def _setActor(self, val):
self._setAttr((NS.ENV, "actor"), val)
def _getActor(self):
return self._getAttr((NS.ENV, "actor"))
def _typeName(self):
return self.__class__.__name__[:-4]
def _validNamespaceURI(self, URI, strict):
if not hasattr(self, '_typed') or not self._typed:
return None
if URI in self._validURIs:
return URI
if not strict:
return self._ns
raise AttributeError, \
"not a valid namespace for type %s" % self._type
class voidType(anyType):
pass
class stringType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type:" % self._type
return data
class untypedType(stringType):
def __init__(self, data = None, name = None, attrs = None):
stringType.__init__(self, data, name, 0, attrs)
class IDType(stringType): pass
class NCNameType(stringType): pass
class NameType(stringType): pass
class ENTITYType(stringType): pass
class IDREFType(stringType): pass
class languageType(stringType): pass
class NMTOKENType(stringType): pass
class QNameType(stringType): pass
class tokenType(anyType):
_validURIs = (NS.XSD2, NS.XSD3)
__invalidre = '[\n\t]|^ | $| '
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class normalizedStringType(anyType):
_validURIs = (NS.XSD3,)
__invalidre = '[\n\r\t]'
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class CDATAType(normalizedStringType):
_validURIs = (NS.XSD2,)
class booleanType(anyType):
def __int__(self):
return self._data
__nonzero__ = __int__
def _marshalData(self):
return ['false', 'true'][self._data]
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if data in (0, '0', 'false', ''):
return 0
if data in (1, '1', 'true'):
return 1
raise ValueError, "invalid %s value" % self._type
class decimalType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType):
raise Error, "invalid %s value" % self._type
return data
class floatType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -3.4028234663852886E+38 or \
data > 3.4028234663852886E+38:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class doubleType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -1.7976931348623158E+308 or \
data > 1.7976931348623157E+308:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class durationType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
try:
# A tuple or a scalar is OK, but make them into a list
if type(data) == TupleType:
data = list(data)
elif type(data) != ListType:
data = [data]
if len(data) > 6:
raise Exception, "too many values"
# Now check the types of all the components, and find
# the first nonzero element along the way.
f = -1
for i in range(len(data)):
if data[i] == None:
data[i] = 0
continue
if type(data[i]) not in \
(IntType, LongType, FloatType):
raise Exception, "element %d a bad type" % i
if data[i] and f == -1:
f = i
# If they're all 0, just use zero seconds.
if f == -1:
self._cache = 'PT0S'
return (0,) * 6
# Make sure only the last nonzero element has a decimal fraction
# and only the first element is negative.
d = -1
for i in range(f, len(data)):
if data[i]:
if d != -1:
raise Exception, \
"all except the last nonzero element must be " \
"integers"
if data[i] < 0 and i > f:
raise Exception, \
"only the first nonzero element can be negative"
elif data[i] != long(data[i]):
d = i
# Pad the list on the left if necessary.
if len(data) < 6:
n = 6 - len(data)
f += n
d += n
data = [0] * n + data
# Save index of the first nonzero element and the decimal
# element for _marshalData.
self.__firstnonzero = f
self.__decimal = d
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
t = 0
if d[self.__firstnonzero] < 0:
s = '-P'
else:
s = 'P'
t = 0
for i in range(self.__firstnonzero, len(d)):
if d[i]:
if i > 2 and not t:
s += 'T'
t = 1
if self.__decimal == i:
s += "%g" % abs(d[i])
else:
s += "%d" % long(abs(d[i]))
s += ['Y', 'M', 'D', 'H', 'M', 'S'][i]
self._cache = s
return self._cache
class timeDurationType(durationType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class dateTimeType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.time()
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 6:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
cleanDate(data)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dT%02d:%02d:%02d" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
f = d[5] - int(d[5])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class recurringInstantType(anyType):
_validURIs = (NS.XSD,)
def _checkValueSpace(self, data):
try:
if data == None:
data = list(time.gmtime(time.time())[:6])
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 1:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
if len(data) < 6:
data += [0] * (6 - len(data))
f = len(data)
for i in range(f):
if data[i] == None:
if f < i:
raise Exception, \
"only leftmost elements can be none"
else:
f = i
break
cleanDate(data, f)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
e = list(d)
neg = ''
if not e[0]:
e[0] = '--'
else:
if e[0] < 0:
neg = '-'
e[0] = abs(e[0])
if e[0] < 100:
e[0] = '-' + "%02d" % e[0]
else:
e[0] = "%04d" % e[0]
for i in range(1, len(e)):
if e[i] == None or (i < 3 and e[i] == 0):
e[i] = '-'
else:
if e[i] < 0:
neg = '-'
e[i] = abs(e[i])
e[i] = "%02d" % e[i]
if d[5]:
f = abs(d[5] - int(d[5]))
if f:
e[5] += ("%g" % f)[1:]
s = "%s%s-%s-%sT%s:%s:%sZ" % ((neg,) + tuple(e))
self._cache = s
return self._cache
class timeInstantType(dateTimeType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class timePeriodType(dateTimeType):
_validURIs = (NS.XSD2, NS.ENC)
class timeType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[3:6]
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[3:6])
data[2] += f
elif type(data) in (IntType, LongType):
data = time.gmtime(data)[3:6]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[3:6]
elif len(data) > 3:
raise Exception, "too many values"
data = [None, None, None] + list(data)
if len(data) < 6:
data += [0] * (6 - len(data))
cleanDate(data, 3)
data = data[3:]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = ''
s = time.strftime("%H:%M:%S", (0, 0, 0) + d + (0, 0, -1))
f = d[2] - int(d[2])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class dateType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:3]
elif len(data) > 3:
raise Exception, "too many values"
data = list(data)
if len(data) < 3:
data += [1, 1, 1][len(data):]
data += [0, 0, 0]
cleanDate(data)
data = data[:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:2]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:2]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data += [1, 0, 0, 0]
cleanDate(data)
data = data[:2]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class centuryType(anyType):
_validURIs = (NS.XSD2, NS.ENC)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1] / 100
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1] / 100
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%02dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class yearType(gYearType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[1:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data = [0] + data + [0, 0, 0]
cleanDate(data, 1)
data = data[1:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d-%02dZ" % self._data
return self._cache
class recurringDateType(gMonthDayType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:2]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[1:2]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 12:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d--Z" % self._data
return self._cache
class monthType(gMonthType):
_validURIs = (NS.XSD2, NS.ENC)
class gDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[2:3]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[2:3]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 31:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "---%02dZ" % self._data
return self._cache
class recurringDayType(gDayType):
_validURIs = (NS.XSD2, NS.ENC)
class hexBinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = encodeHexString(self._data)
return self._cache
class base64BinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = base64.encodestring(self._data)
return self._cache
class base64Type(base64BinaryType):
_validURIs = (NS.ENC,)
class binaryType(anyType):
_validURIs = (NS.XSD, NS.ENC)
def __init__(self, data, name = None, typed = 1, encoding = 'base64',
attrs = None):
anyType.__init__(self, data, name, typed, attrs)
self._setAttr('encoding', encoding)
def _marshalData(self):
if self._cache == None:
if self._getAttr((None, 'encoding')) == 'base64':
self._cache = base64.encodestring(self._data)
else:
self._cache = encodeHexString(self._data)
return self._cache
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if attr[1] == 'encoding':
if attr[0] != None or value not in ('base64', 'hex'):
raise AttributeError, "invalid encoding"
self._cache = None
anyType._setAttr(self, attr, value)
class anyURIType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = urllib.quote(self._data)
return self._cache
class uriType(anyURIType):
_validURIs = (NS.XSD,)
class uriReferenceType(anyURIType):
_validURIs = (NS.XSD2,)
class NOTATIONType(anyType):
def __init__(self, data, name = None, typed = 1, attrs = None):
if self.__class__ == NOTATIONType:
raise Error, "a NOTATION can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
class ENTITIESType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) in (StringType, UnicodeType):
return (data,)
if type(data) not in (ListType, TupleType) or \
filter (lambda x: type(x) not in (StringType, UnicodeType), data):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
return ' '.join(self._data)
class IDREFSType(ENTITIESType): pass
class NMTOKENSType(ENTITIESType): pass
class integerType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType):
raise ValueError, "invalid %s value" % self._type
return data
class nonPositiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data > 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Positive_IntegerType(nonPositiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-positive-integer'
class negativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data >= 0:
raise ValueError, "invalid %s value" % self._type
return data
class negative_IntegerType(negativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'negative-integer'
class longType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -9223372036854775808L or \
data > 9223372036854775807L:
raise ValueError, "invalid %s value" % self._type
return data
class intType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -2147483648L or \
data > 2147483647:
raise ValueError, "invalid %s value" % self._type
return data
class shortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -32768 or \
data > 32767:
raise ValueError, "invalid %s value" % self._type
return data
class byteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -128 or \
data > 127:
raise ValueError, "invalid %s value" % self._type
return data
class nonNegativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data < 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Negative_IntegerType(nonNegativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-negative-integer'
class unsignedLongType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 18446744073709551615L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedIntType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 4294967295L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedShortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 65535:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedByteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 255:
raise ValueError, "invalid %s value" % self._type
return data
class positiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data <= 0:
raise ValueError, "invalid %s value" % self._type
return data
class positive_IntegerType(positiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'positive-integer'
# Now compound types
class compoundType(anyType):
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == compoundType:
raise Error, "a compound can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
self._keyord = []
if type(data) == DictType:
self.__dict__.update(data)
def _aslist(self, item=None):
if item is not None:
return self.__dict__[self._keyord[item]]
else:
return map( lambda x: self.__dict__[x], self._keyord)
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.__dict__[item]
else:
retval = {}
def fun(x): retval[x.encode(encoding)] = self.__dict__[x]
if hasattr(self, '_keyord'):
map( fun, self._keyord)
else:
for name in dir(self):
if isPublic(name):
retval[name] = getattr(self,name)
return retval
def __getitem__(self, item):
if type(item) == IntType:
return self.__dict__[self._keyord[item]]
else:
return getattr(self, item)
def __len__(self):
return len(self._keyord)
def __nonzero__(self):
return 1
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs = None):
if name in self._keyord:
if type(self.__dict__[name]) != ListType:
self.__dict__[name] = [self.__dict__[name]]
self.__dict__[name].append(value)
else:
self.__dict__[name] = value
self._keyord.append(name)
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
if subpos == 0 and type(self.__dict__[name]) != ListType:
self.__dict__[name] = value
else:
self.__dict__[name][subpos] = value
self._keyord[pos] = name
def _getItemAsList(self, name, default = []):
try:
d = self.__dict__[name]
except:
return default
if type(d) == ListType:
return d
return [d]
def __str__(self):
return anyType.__str__(self) + ": " + str(self._asdict())
def __repr__(self):
return self.__str__()
class structType(compoundType):
pass
class headerType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Header", typed, attrs)
class bodyType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Body", typed, attrs)
class arrayType(UserList.UserList, compoundType):
def __init__(self, data = None, name = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
if data:
if type(data) not in (ListType, TupleType):
raise Error, "Data must be a sequence"
UserList.UserList.__init__(self, data)
compoundType.__init__(self, data, name, 0, attrs)
self._elemsname = elemsname or "item"
if data == None:
self._rank = rank
# According to 5.4.2.2 in the SOAP spec, each element in a
# sparse array must have a position. _posstate keeps track of
# whether we've seen a position or not. It's possible values
# are:
# -1 No elements have been added, so the state is indeterminate
# 0 An element without a position has been added, so no
# elements can have positions
# 1 An element with a position has been added, so all elements
# must have positions
self._posstate = -1
self._full = 0
if asize in ('', None):
asize = '0'
self._dims = map (lambda x: int(x), str(asize).split(','))
self._dims.reverse() # It's easier to work with this way
self._poss = [0] * len(self._dims) # This will end up
# reversed too
for i in range(len(self._dims)):
if self._dims[i] < 0 or \
self._dims[i] == 0 and len(self._dims) > 1:
raise TypeError, "invalid Array dimensions"
if offset > 0:
self._poss[i] = offset % self._dims[i]
offset = int(offset / self._dims[i])
# Don't break out of the loop if offset is 0 so we test all the
# dimensions for > 0.
if offset:
raise AttributeError, "invalid Array offset"
a = [None] * self._dims[0]
for i in range(1, len(self._dims)):
b = []
for j in range(self._dims[i]):
b.append(copy.deepcopy(a))
a = b
self.data = a
def _aslist(self, item=None):
if item is not None:
return self.data[int(item)]
else:
return self.data
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.data[int(item)]
else:
retval = {}
def fun(x): retval[str(x).encode(encoding)] = self.data[x]
map( fun, range(len(self.data)) )
return retval
def __getitem__(self, item):
try:
return self.data[int(item)]
except ValueError:
return getattr(self, item)
def __len__(self):
return len(self.data)
def __nonzero__(self):
return 1
def __str__(self):
return anyType.__str__(self) + ": " + str(self._aslist())
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs):
if self._full:
raise ValueError, "Array is full"
pos = attrs.get((NS.ENC, 'position'))
if pos != None:
if self._posstate == 0:
raise AttributeError, \
"all elements in a sparse Array must have a " \
"position attribute"
self._posstate = 1
try:
if pos[0] == '[' and pos[-1] == ']':
pos = map (lambda x: int(x), pos[1:-1].split(','))
pos.reverse()
if len(pos) == 1:
pos = pos[0]
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if pos:
raise Exception
elif len(pos) != len(self._dims):
raise Exception
else:
for i in range(len(self._dims)):
if pos[i] >= self._dims[i]:
raise Exception
curpos = pos
else:
raise Exception
except:
raise AttributeError, \
"invalid Array element position %s" % str(pos)
else:
if self._posstate == 1:
raise AttributeError, \
"only elements in a sparse Array may have a " \
"position attribute"
self._posstate = 0
curpos = self._poss
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
if pos == None:
self._poss[0] += 1
for i in range(len(self._dims) - 1):
if self._poss[i] < self._dims[i]:
break
self._poss[i] = 0
self._poss[i + 1] += 1
if self._dims[-1] and self._poss[-1] >= self._dims[-1]:
#self._full = 1
#FIXME: why is this occuring?
pass
def _placeItem(self, name, value, pos, subpos, attrs = None):
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
if self._dims[i] == 0:
curpos[0] = pos
break
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if self._dims[i] != 0 and pos:
raise Error, "array index out of range"
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
class typedArrayType(arrayType):
def __init__(self, data = None, name = None, typed = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None, complexType = 0):
arrayType.__init__(self, data, name, attrs, offset, rank, asize,
elemsname)
self._typed = 1
self._type = typed
self._complexType = complexType
class faultType(structType, Error):
def __init__(self, faultcode = "", faultstring = "", detail = None):
self.faultcode = faultcode
self.faultstring = faultstring
if detail != None:
self.detail = detail
structType.__init__(self, None, 0)
def _setDetail(self, detail = None):
if detail != None:
self.detail = detail
else:
try: del self.detail
except AttributeError: pass
def __repr__(self):
if getattr(self, 'detail', None) != None:
return "<Fault %s: %s: %s>" % (self.faultcode,
self.faultstring,
self.detail)
else:
return "<Fault %s: %s>" % (self.faultcode, self.faultstring)
__str__ = __repr__
def __call__(self):
return (self.faultcode, self.faultstring, self.detail)
class SOAPException(Exception):
def __init__(self, code="", string="", detail=None):
self.value = ("SOAPpy SOAP Exception", code, string, detail)
self.code = code
self.string = string
self.detail = detail
def __str__(self):
return repr(self.value)
class RequiredHeaderMismatch(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodNotFound(Exception):
def __init__(self, value):
(val, detail) = value.split(":")
self.value = val
self.detail = detail
def __str__(self):
return repr(self.value, self.detail)
class AuthorizationFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#######
# Convert complex SOAPpy objects to native python equivalents
#######
def simplify(object, level=0):
"""
Convert the SOAPpy objects and thier contents to simple python types.
This function recursively converts the passed 'container' object,
and all public subobjects. (Private subobjects have names that
start with '_'.)
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level > 10:
return object
if isinstance( object, faultType ):
if object.faultstring == "Required Header Misunderstood":
raise RequiredHeaderMismatch(object.detail)
elif object.faultstring == "Method Not Found":
raise MethodNotFound(object.detail)
elif object.faultstring == "Authorization Failed":
raise AuthorizationFailed(object.detail)
elif object.faultstring == "Method Failed":
raise MethodFailed(object.detail)
else:
se = SOAPException(object.faultcode, object.faultstring,
object.detail)
raise se
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
data[k] = simplify(data[k], level=level+1)
return data
elif isinstance( object, compoundType ) or isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
data[k] = simplify(data[k], level=level+1)
return data
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
return object
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
else:
return object
def simplify_contents(object, level=0):
"""
Convert the contents of SOAPpy objects to simple python types.
This function recursively converts the sub-objects contained in a
'container' object to simple python types.
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level>10: return object
if isinstance( object, faultType ):
for k in object._keys():
if isPublic(k):
setattr(object, k, simplify(object[k], level=level+1))
raise object
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
object[k] = simplify(data[k], level=level+1)
elif isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
setattr(object, k, simplify(data[k], level=level+1))
elif isinstance( object, compoundType ) :
data = object._asdict()
for k in data.keys():
if isPublic(k):
object[k] = simplify(data[k], level=level+1)
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
|
krux/adspygoogle
|
adspygoogle/SOAPpy/Types.py
|
Python
|
apache-2.0
| 51,936
|
[
"Brian"
] |
4c815bd6ea62dda5611b81930f7aa04d496a520c9aa8fdab0f5d3c72872faccd
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
import numpy as np
from psi4 import core
from psi4.driver import constants
from psi4.driver.p4util.exceptions import *
def least_squares_fit_polynomial(xvals, fvals, localization_point, no_factorials=True, weighted=True, polynomial_order=4):
"""Performs and unweighted least squares fit of a polynomial, with specified order
to an array of input function values (fvals) evaluated at given locations (xvals).
See http://dx.doi.org/10.1063/1.4862157, particularly eqn (7) for details. """
xpts = np.array(xvals) - localization_point
if weighted:
R = 1.0
p_nu = 1
epsilon = 1e-3
zvals = np.square(xpts/R)
weights = np.exp(-zvals) / (zvals**p_nu + epsilon**p_nu)
else:
weights = None
fit = np.polynomial.polynomial.polyfit(xpts, fvals, polynomial_order, w=weights)
# Remove the 1/n! coefficients
if no_factorials:
scalefac = 1.0
for n in range(2,polynomial_order+1):
scalefac *= n
fit[n] *= scalefac
return fit
def anharmonicity(rvals, energies, plot_fit='', mol = None):
"""Generates spectroscopic constants for a diatomic molecules.
Fits a diatomic potential energy curve using a weighted least squares approach
(c.f. http://dx.doi.org/10.1063/1.4862157, particularly eqn. 7), locates the minimum
energy point, and then applies second order vibrational perturbation theory to obtain spectroscopic
constants. Any number of points greater than 4 may be provided, and they should bracket the minimum.
The data need not be evenly spaced, and can be provided in any order. The data are weighted such that
those closest to the minimum have highest impact.
A dictionary with the following keys, which correspond to spectroscopic constants, is returned:
:type rvals: list
:param rvals: The bond lengths (in Angstrom) for which energies are
provided, of length at least 5 and equal to the length of the energies array
:type energies: list
:param energies: The energies (Eh) computed at the bond lengths in the rvals list
:type plot_fit: string
:param plot_fit: A string describing where to save a plot of the harmonic and anharmonic fits, the
inputted data points, re, r0 and the first few energy levels, if matplotlib
is available. Set to 'screen' to generate an interactive plot on the screen instead. If a filename is
provided, the image type is determined by the extension; see matplotlib for supported file types.
:returns: (*dict*) Keys: "re", "r0", "we", "wexe", "nu", "ZPVE(harmonic)", "ZPVE(anharmonic)", "Be", "B0", "ae", "De"
corresponding to the spectroscopic constants in cm-1
"""
angstrom_to_bohr = 1.0 / constants.bohr2angstroms
angstrom_to_meter = 10e-10;
# Make sure the input is valid
if len(rvals) != len(energies):
raise ValidationError("The number of energies must match the number of distances")
npoints = len(rvals)
if npoints < 5:
raise ValidationError("At least 5 data points must be provided to compute anharmonicity")
core.print_out("\n\nPerforming a fit to %d data points\n" % npoints)
# Make sure the molecule the user provided is the active one
molecule = mol if mol is not None else core.get_active_molecule()
molecule.update_geometry()
natoms = molecule.natom()
if natoms != 2:
raise Exception("The current molecule must be a diatomic for this code to work!")
m1 = molecule.mass(0)
m2 = molecule.mass(1)
# Optimize the geometry, refitting the surface around each new geometry
core.print_out("\nOptimizing geometry based on current surface:\n\n");
re = np.mean(rvals)
maxit = 30
thres = 1.0e-9
for i in range(maxit):
derivs = least_squares_fit_polynomial(rvals,energies,localization_point=re)
e,g,H = derivs[0:3]
core.print_out(" E = %20.14f, x = %14.7f, grad = %20.14f\n" % (e, re, g))
if abs(g) < thres:
break
re -= g/H;
if i == maxit-1:
raise ConvergenceError("diatomic geometry optimization", maxit)
core.print_out(" Final E = %20.14f, x = %14.7f, grad = %20.14f\n" % (e, re, g));
if re < min(rvals):
raise Exception("Minimum energy point is outside range of points provided. Use a lower range of r values.")
if re > max(rvals):
raise Exception("Minimum energy point is outside range of points provided. Use a higher range of r values.")
# Convert to convenient units, and compute spectroscopic constants
d0,d1,d2,d3,d4 = derivs*constants.hartree2aJ
core.print_out("\nEquilibrium Energy %20.14f Hartrees\n" % e)
core.print_out("Gradient %20.14f\n" % g)
core.print_out("Quadratic Force Constant %14.7f MDYNE/A\n" % d2)
core.print_out("Cubic Force Constant %14.7f MDYNE/A**2\n" % d3)
core.print_out("Quartic Force Constant %14.7f MDYNE/A**3\n" % d4)
hbar = constants.h / (2.0 * np.pi)
mu = ((m1*m2)/(m1+m2))*constants.amu2kg
we = 5.3088375e-11 * np.sqrt(d2/mu)
wexe = (1.2415491e-6)*(we/d2)**2 * ((5.0*d3*d3)/(3.0*d2)-d4)
# Rotational constant: Be
I = ((m1*m2)/(m1+m2)) * constants.amu2kg * (re * angstrom_to_meter)**2
B = constants.h / (8.0 * np.pi**2 * constants.c * I)
# alpha_e and quartic centrifugal distortion constant
ae = -(6.0 * B**2 / we) * ((1.05052209e-3*we*d3)/(np.sqrt(B * d2**3))+1.0)
de = 4.0*B**3 / we**2
# B0 and r0 (plus re check using Be)
B0 = B - ae / 2.0
r0 = np.sqrt(constants.h / (8.0 * np.pi**2 * mu * constants.c * B0))
recheck = np.sqrt(constants.h / (8.0 * np.pi**2 * mu * constants.c * B))
r0 /= angstrom_to_meter;
recheck /= angstrom_to_meter;
# Fundamental frequency nu
nu = we - 2.0 * wexe;
zpve_nu = 0.5 * we - 0.25 * wexe;
# Generate pretty pictures, if requested
if(plot_fit):
try:
import matplotlib.pyplot as plt
except ImportError:
msg = "\n\tPlot not generated; matplotlib is not installed on this machine.\n\n"
print(msg)
core.print_out(msg)
# Correct the derivatives for the missing factorial prefactors
dvals = np.zeros(5)
dvals[0:5] = derivs[0:5]
dvals[2] /= 2
dvals[3] /= 6
dvals[4] /= 24
# Default plot range, before considering energy levels
minE = np.min(energies)
maxE = np.max(energies)
minR = np.min(rvals)
maxR = np.max(rvals)
# Plot vibrational energy levels
we_au = we / constants.hartree2wavenumbers
wexe_au = wexe / constants.hartree2wavenumbers
coefs2 = [ dvals[2], dvals[1], dvals[0] ]
coefs4 = [ dvals[4], dvals[3], dvals[2], dvals[1], dvals[0] ]
for n in range(3):
Eharm = we_au*(n+0.5)
Evpt2 = Eharm - wexe_au*(n+0.5)**2
coefs2[-1] = -Eharm
coefs4[-1] = -Evpt2
roots2 = np.roots(coefs2)
roots4 = np.roots(coefs4)
xvals2 = roots2 + re
xvals4 = np.choose(np.where(np.isreal(roots4)), roots4)[0].real + re
Eharm += dvals[0]
Evpt2 += dvals[0]
plt.plot(xvals2, [Eharm, Eharm], 'b', linewidth=1)
plt.plot(xvals4, [Evpt2, Evpt2], 'g', linewidth=1)
maxE = Eharm
maxR = np.max([xvals2,xvals4])
minR = np.min([xvals2,xvals4])
# Find ranges for the plot
dE = maxE - minE
minE -= 0.2*dE
maxE += 0.4*dE
dR = maxR - minR
minR -= 0.2*dR
maxR += 0.2*dR
# Generate the fitted PES
xpts = np.linspace(minR, maxR, 1000)
xrel = xpts - re
xpows = xrel[:, None] ** range(5)
fit2 = np.einsum('xd,d', xpows[:,0:3], dvals[0:3])
fit4 = np.einsum('xd,d', xpows, dvals)
# Make / display the plot
plt.plot(xpts, fit2, 'b', linewidth=2.5, label='Harmonic (quadratic) fit')
plt.plot(xpts, fit4, 'g', linewidth=2.5, label='Anharmonic (quartic) fit')
plt.plot([re, re], [minE, maxE], 'b--', linewidth=0.5)
plt.plot([r0, r0], [minE, maxE], 'g--', linewidth=0.5)
plt.scatter(rvals, energies, c='Black', linewidth=3, label='Input Data')
plt.legend()
plt.xlabel('Bond length (Angstroms)')
plt.ylabel('Energy (Eh)')
plt.xlim(minR, maxR)
plt.ylim(minE, maxE)
if plot_fit == 'screen':
plt.show()
else:
plt.savefig(plot_fit)
core.print_out("\n\tPES fit saved to %s.\n\n" % plot_fit)
core.print_out("\nre = %10.6f A check: %10.6f\n" % (re, recheck))
core.print_out("r0 = %10.6f A\n" % r0)
core.print_out("we = %10.4f cm-1\n" % we)
core.print_out("wexe = %10.4f cm-1\n" % wexe)
core.print_out("nu = %10.4f cm-1\n" % nu)
core.print_out("ZPVE(nu) = %10.4f cm-1\n" % zpve_nu)
core.print_out("Be = %10.4f cm-1\n" % B)
core.print_out("B0 = %10.4f cm-1\n" % B0)
core.print_out("ae = %10.4f cm-1\n" % ae)
core.print_out("De = %10.7f cm-1\n" % de)
results = {
"re" : re,
"r0" : r0,
"we" : we,
"wexe" : wexe,
"nu" : nu,
"ZPVE(harmonic)" : zpve_nu,
"ZPVE(anharmonic)" : zpve_nu,
"Be" : B,
"B0" : B0,
"ae" : ae,
"De" : de
}
return results
|
amjames/psi4
|
psi4/driver/diatomic.py
|
Python
|
lgpl-3.0
| 10,732
|
[
"Psi4"
] |
241e74005a9ca18957028b15fb8627deba69e8810b2e21fa5a6f12b9b595f8be
|
""" Module for validation of incoming inputs.
TODO: Refactor BaseController references to similar methods to use this module.
"""
from galaxy import exceptions
from galaxy.util.sanitize_html import sanitize_html
def validate_and_sanitize_basestring( key, val ):
if not isinstance( val, basestring ):
raise exceptions.RequestParameterInvalidException( '%s must be a string or unicode: %s'
% ( key, str( type( val ) ) ) )
return unicode( sanitize_html( val, 'utf-8', 'text/html' ), 'utf-8' )
def validate_and_sanitize_basestring_list( key, val ):
try:
assert isinstance( val, list )
return [ unicode( sanitize_html( t, 'utf-8', 'text/html' ), 'utf-8' ) for t in val ]
except ( AssertionError, TypeError ):
raise exceptions.RequestParameterInvalidException( '%s must be a list of strings: %s'
% ( key, str( type( val ) ) ) )
def validate_boolean( key, val ):
if not isinstance( val, bool ):
raise exceptions.RequestParameterInvalidException( '%s must be a boolean: %s'
% ( key, str( type( val ) ) ) )
return val
#TODO:
#def validate_integer( self, key, val, min, max ):
#def validate_float( self, key, val, min, max ):
#def validate_number( self, key, val, min, max ):
#def validate_genome_build( self, key, val ):
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/util/validation.py
|
Python
|
gpl-3.0
| 1,470
|
[
"Galaxy"
] |
02a1d84e7a6a06ddb488053dfb31824f708ba3688660bc1fb36df9cb24b69b8f
|
##
## nw_spectrum
##
## Kenneth Lopata
## Last modified: 2013-03-08
##
## Python script for parsing NWChem output for TDDFT/vspec excitation
## energies, and optionally Lorentzian broadenening the spectra. For
## online help run:
##
## nw_spectrum --help
##
##
import sys
import textwrap
from optparse import OptionParser
ver = "2.1"
pname = "nw_spectrum"
def check_version ():
"""Check version and ensure new print and string formatting is
allowed. Raises and exception if not satisfied."""
if sys.version_info < (2, 6):
raise Exception("This script requires python >= 2.6")
try:
newstring = "oldstring {v}".format(v=3.14)
except:
raise Exception("This script requires string.format()")
def ev2au(e_ev):
return (1.0 / 27.2114) * e_ev
def au2ev (e_au):
return 27.2114 * e_au
def ev2nm(e_ev):
return 27.2114 * 2.0 * 2.99792 * 2.41888 * 3.14159265359 / e_ev
def determine_data_type ():
"""Parses stdin to see what data type, then rewinds stdin.
Returns 'vspec', 'tddft', or raises and Exception if neither
found. It choses based on the first tag found in the input, so
files with multiple data will only find the 1st. To extract the
2nd, manually specify the data format via the command line args."""
tag_tddft = "NWChem TDDFT Module"
tag_vspec = "DFT Virtual Spectrum"
lines = sys.stdin.readlines()
for line in lines:
if tag_tddft in line:
sys.stdin.seek(0)
return "tddft"
elif tag_vspec in line:
sys.stdin.seek(0)
return "vspec"
raise Exception ("Failed to determine data format, please specify manually.")
def parse_input_vspec (opts):
"""Parses input from vspec and returns excitation energies in the
form [energy, f], in eV and atomic units units, respectively."""
lines = sys.stdin.readlines ()
inside_data = False
roots = []
for l in lines:
if "<START>" in l:
try:
ls = l.split()
tag = ls[0]
nexcite = int (ls[1])
except:
raise Exception ("Failed to parse <START> tag and number: {0}".format(l))
iexcite = 0
inside_data = True
continue
if inside_data:
if "<END>" in l:
inside_data = False
continue
# break
try:
line_split = l.strip().split()
n = int (line_split[0])
occ = int (line_split[1]) #not used
virtual = int (line_split[2]) #not used
energy_ev = float (line_split[3])
osc = float (line_split[7])
except:
raise Exception ("Failed to parse data line: {0}".format(l))
iexcite = iexcite + 1
if n != iexcite:
raise Exception ("Expected excitation number {0}, found {1}".format(iexcite, n))
if energy_ev < 0.0:
print ("{0} Warning: Ignored negative vpsec excitation: {1} eV, {2}".format(opts.cchar, energy_ev, osc))
if opts.verbose:
sys.stderr.write ("Warning: Ignored negative vpsec excitation: {0} eV, {1}\n".format(energy_ev, osc))
else:
roots.append ([energy_ev, osc])
# if not inside_data:
# raise Exception ("Failed to find <START> tag")
if iexcite != nexcite:
print ("{0} Warning: Expected {1} excitations, found {2}".format(opts.cchar, nexcite, iexcite))
if opts.verbose:
sys.stderr.write ("Warning: Expected {0} excitations, found {1}\n".format(nexcite,iexcite))
if opts.verbose:
sys.stderr.write ("{0}: Found {1} vspec excitations\n".format(pname, len(roots)))
return roots
def parse_input_evals (opts):
"""Parses input for eigenvalues and return as a list"""
start_tag = "DFT Final Molecular Orbital Analysis"
end_tag = "Task times cpu"
inside = False
lines = sys.stdin.readlines ()
iline = -1
evals = []
while True:
iline = iline + 1
try:
line = lines[iline]
except:
break
if start_tag in line:
inside = True
if end_tag in line:
inside = False
if inside and "Vector" in line and "Occ" in line:
line_strip = line.strip()
try:
tagloc = line_strip.rfind("E=")
evalue_str = line_strip[tagloc+2:tagloc+15].replace("D", "E") # after E= ; replace D with E
evalue = float(evalue_str)
except:
raise Exception ("Failed to parse eigenvalue: {0}".format(line_strip))
eval_ev = au2ev (evalue)
evals.append(eval_ev) #store eigenvalue in eV
if opts.verbose:
sys.stderr.write ("{0}: Found {1} eigenvalues\n".format(pname, len(evals)))
return evals
def bin_evals (opts, evals):
"""Take eigenvalues and bins them, and return [energy, N], where N
is the number of eigenvalues in the energy bin centered around
energy"""
## they should be sorted but let's make sure
evals_sort = sorted(evals)
emin = evals_sort[0]
emax = evals_sort[-1]
de = (emax - emin) / opts.nbin
#=== XXX HARDCODE ===
# de = 0.01
# opts.nbin = int((emax - emin)/de) + 1
#===
dos_raw = []
for ie in range(opts.nbin+1):
ecenter = emin + ie*de
eleft = ecenter - 0.5*de
eright = ecenter + 0.5*de
count = 0
for val in evals_sort:
if val >= eleft and val <= eright:
count = count + 1
dos_raw.append ([ecenter, count])
## check that total sum is number of eigenvalues
ntot = 0
for d in dos_raw:
ntot = ntot + d[1]
if ntot != len (evals):
raise Exception ("Inconsistent integrated DOS and number of eigenvalues: {0} vs {1}".format(ntot, len(evals)))
return dos_raw
def parse_input_tddft (opts):
"""Parses input for singlet TDDFT excitation energies.
Returns excitation energies in the form [energy, f], in eV and
atomic units units, respectively."""
start_tag = "Convergence criterion met"
end_tag = "Excited state energy"
max_osc_search = 10 #max number of lines after root energy to look for oscillator strength
inside = False #true when we are inside output block
lines = sys.stdin.readlines()
iline = -1
roots = []
while True:
iline = iline + 1
try:
line = lines[iline]
except:
break
if start_tag in line:
inside = True
if end_tag in line:
inside = False
if inside and "Root" in line and "eV" in line:
line_strip = line.strip()
# ##
# ## OLD WAY
# ##
# ## Note, I would ideally .split() the line directly, but you
# ## can have cases like Root356 (for three digit root numbers)
# ## so I have to do it this ugly way...
# ##
# try:
# line_start = line_strip[0:4] # should contain RootXXX
# line_data = line_strip[7:].split()
# line_n = line_strip[4:7] #contains integer root number
# line_e = line_data[-2] # should contain excitation energy
# line_ev_tag = line_data[-1] # should contain "eV"
# except:
# raise Exception ("Failed to parse data line for root: {0}".format(line_strip))
##
## NEW WAY after Niri changed formatting, e.g.:
## Root 48 singlet a 57.454053695 a.u. 1563.4050 eV
##
line_strip = line.strip()
line_split = line_strip.split()
try:
line_start = line_split[0] # contains "Root"
line_n = line_split[1] # contains root number (int)
line_ev_tag = line_split[-1] # contains "eV"
line_e = line_split[-2] # contains excitation energy in eV
except:
raise Exception ("Failed to parse data line for root: {0}".format(line_strip))
if line_start == "Root" and line_ev_tag == "eV":
try:
n = int(line_n)
energy_ev = float(line_e)
except:
raise Exception ("Failed to convert root values: {0}".format(line_strip))
else:
raise Exception ("Unexpected format for root: {0}".format(line_strip))
if line_start == "Root" and line_ev_tag == "eV":
try:
n = int(line_n)
energy_ev = float(line_e)
except:
raise Exception ("Failed to convert root values: {0}".format(line_strip))
else:
raise Exception ("Unexpected format for root: {0}".format(line_strip))
##
## Now look for oscillator strength, which will be a few
## lines down (though the exact position may vary it seems).
##
ioscline = -1
while True:
ioscline = ioscline + 1
if ioscline >= max_osc_search:
raise Exception ("Failed to find oscillator strength after looking {0} lines.".format(ioscline))
oscline = lines[iline + ioscline].strip()
if "Dipole Oscillator Strength" in oscline:
try:
osc_str = oscline.split()
osc = float (osc_str[3])
except:
raise Exception ("Failed to convert oscillator strength: {0}".format(oscline))
break
## do some final checks, then append to data
if energy_ev < 0.0:
raise Exception ("Invalid negative energy: {0}".format(energy_ev))
if osc < 0.0:
raise Exception ("Invalid negative oscillator strength: {0}".format(osc))
roots.append([energy_ev, osc])
nroots = len (roots)
if nroots < 1:
raise Exception ("Failed to find any TDDFT roots")
else:
if opts.header:
print ("{0} Successfully parsed {1} TDDFT singlet excitations".format(opts.cchar,nroots))
if opts.verbose:
sys.stderr.write ("{0}: Found {1} TDDFT excitations\n".format(pname, nroots))
return roots
def make_energy_list (opts, roots):
"""Computes the list of spectrum energies, and potentially adjusts
peak widths"""
epad = 20.0*opts.width
emin = roots[0][0] - epad
# if emin < opts.width:
# emin = opts.width
emax = roots[-1][0] + epad
de = (emax - emin) / opts.npoints
## Use width of at least two grid points
if opts.width < 2*de:
opts.width = 2*de
print ("{0} Warning: Forced broadening to be {1} eV".format(opts.cchar, opts.width))
if opts.verbose:
sys.stderr.write ("Warning: Forced broadening to be {0} eV\n".format(opts.width))
# opts.width = max (opts.width, 2*de)
eout = [ emin + ie*de for ie in range(opts.npoints) ]
return eout
### OLD SLOWER WAY
# def lorentzian_broaden (opts, roots):
# """Broadens raw roots into spectrum and returns the result."""
# ## cutoff radius
# cutoff = 15.0*opts.width
# ##
# ## multiply by 0.5 as FWHM was supplied by gamma in lorenzian is
# ## actually HWHM.
# ##
# gamma = 0.5 * opts.width
# gamma_sqrd = gamma*gamma
# ##
# ## L(w; w0, gamma) = gamma/pi * 1 / [(w-w0)^2 + gamma^2]
# ##
# prefac = gamma/3.14159265359*de
# # spectrum = [ [emin + ie*de, 0] for ie in range(opts.npoints)]
# # for point in spectrum:
# # stot = 0.0
# # for root in roots:
# # xx0 = point[0] - root[0]
# # if abs(xx0) <= cutoff:
# # stot += prefac * root[1] / ( xx0*xx0 + gamma_sqrd) #Lorentzian
# # point[1] = stot
# # npoints_made = len(spectrum)
# # if npoints_made != opts.npoints:
# # raise Exception ("Spectrum should have {0} points, instead has {1}".format(opts.npoints, npoints_made))
# # if opts.header:
# # print ("{0} Lorentzian broadened roots with width {1} eV".format(opts.cchar, opts.width))
# # print ("{0} Created spectrum with {1} points".format(opts.cchar, npoints_made))
# return spectrum
## Faster way--use a generator
def gen_spectrum (opts, energies, roots):
"""Generator for making Lorentzian broadenend spectrum."""
## cutoff radius
# cutoff = 15.0*opts.width
cutoff = 20.0*opts.width
# cutoff = 9999999.0*opts.width
##
## L(w; w0, gamma) = gamma/pi * 1 / [(w-w0)^2 + gamma^2]
##
##
## multiply by 0.5 as FWHM was supplied by gamma in
## lorenzian is actually HWHM.
##
gamma = 0.5 * opts.width
gamma_sqrd = gamma*gamma
de = (energies[-1] - energies[0]) / (len(energies)-1)
prefac = gamma/3.14159265359*de
##
## used for verbose output (ie see progress for very large runs)
##
# checkpt_energies = []
# ne = len(energies)
# for ie in range(ne):
# if ie % ne == 0:
# checkpt_energies.append(energies[ie])
for energy in energies:
# if opts.verbose:
# if energy in checkpt_energies:
# sys.stderr.write ("XXX\n")
stot = 0.0
for root in roots:
xx0 = energy - root[0]
if abs(xx0) <= cutoff:
stot += root[1] / ( xx0*xx0 + gamma_sqrd) #Lorentzian
yield [energy, stot*prefac]
def dump_header (opts):
"""Print header to stdout"""
if opts.header:
print ("{0} ================================".format(opts.cchar))
print ("{0} NWChem spectrum parser ver {1}".format(opts.cchar,ver))
print ("{0} ================================".format(opts.cchar))
print ("{0} ".format(opts.cchar))
print ("{0} Parser runtime options: {1}".format(opts.cchar,opts))
print ("{0}".format(opts.cchar))
def dump_data (opts, roots):
"""Dumps output to stdout. This works for either lists of raw roots
or broadened spectra."""
if opts.verbose:
sys.stderr.write ("{0}: Dumping data to stdout ... \n".format(pname))
if opts.units == "ev":
if opts.header:
print ("{0}".format(opts.cchar))
print ("{c}{s1}{delim}{s2}".format(c=opts.cchar, s1=" Energy [eV] ", delim=opts.delim, s2=" Abs. [au] "))
print ("{0}----------------------------------".format(opts.cchar))
for root in roots:
print ("{energy:.10e}{delim}{osc:.10e}".format(energy=root[0], delim=opts.delim, osc=root[1]))
elif opts.units == "au":
if opts.header:
print ("{0}".format(opts.cchar))
print ("{c}{s1}{delim}{s2}".format(c=opts.cchar, s1=" Energy [au] ", delim=opts.delim, s2=" Abs. [au] "))
print ("{0}----------------------------------".format(opts.cchar))
for root in roots:
eout = ev2au (root[0])
print ("{energy:.10e}{delim}{osc:.10e}".format(energy=eout, delim=opts.delim, osc=root[1]))
elif opts.units == "nm":
if opts.header:
print ("{0}".format(opts.cchar))
print ("{c}{s1}{delim}{s2}".format(c=opts.cchar, s1=" Wavelen. [nm] ", delim=opts.delim, s2=" Abs. [au] "))
print ("{0}----------------------------------".format(opts.cchar))
# roots.reverse () #reorder so we output in increasing wavelength
#XXX SHOULD REVERSE
for root in roots:
eout = ev2nm (root[0])
print ("{energy:.10e}{delim}{osc:.10e}".format(energy=eout, delim=opts.delim, osc=root[1]))
else:
raise Exception ("Invalid unit: {0}".format(opts.units))
def preprocess_check_opts (opts):
"""Check options and replaces with sane values if needed, stores
all opts as purely lowercase."""
opts.units = opts.units.lower()
opts.datafmt = opts.datafmt.lower()
if opts.units != "nm" and opts.units != "ev" and opts.units != "au":
raise Exception ("Invalid unit type: {0}".format(opts.units))
if opts.datafmt != "tddft" and opts.datafmt != "vspec" and opts.datafmt != "auto" and opts.datafmt != "dos":
raise Exception ("Invalid data format: {0}".format(opts.datafmt))
if opts.npoints < 100:
raise Exception ("Increase number of points to at least 100 (you asked for {0})".format(opts.npoints))
if opts.width < 0.0:
raise Exception ("Peak width must be positive (you supplied {0})".format(opts.width))
def main():
##
## Parse command line options. Note we later make all lowercase,
## so from the user's point of view they are case-insensitive.
##
usage = "%prog [options]\n\n"
desc = "Reads NWChem output from stdin, parses for the linear response TDDFT or DFT vspec excitations, and prints the absorption spectrum to stdout. It will optionally broaden peaks using a Lorentzian with FWHM of at least two energy/wavelength spacings. By default, it will automatically determine data format (tddft or vspec) and generate a broadened spectrum in eV."
desc_wrap = textwrap.wrap (desc,80)
for s in desc_wrap:
usage += s + "\n"
example = 'Create absorption spectrum in nm named "spectrum.dat" from the NWChem output file "water.nwo" named spectrum.dat with peaks broadened by 0.3 eV and 5000 points in the spectrum.'
example_wrap = textwrap.wrap (example,80)
usage += "\nExample:\n\n\t"+"nw_spectrum -b0.3 -p5000 -wnm < water.nwo > spectrum.dat\n\n"
for s in example_wrap:
usage += s + "\n"
parser = OptionParser(usage=usage)
parser.add_option("-f", "--format", type="string", dest="datafmt",
help="data file format: auto (default), tddft, vspec, dos", metavar="FMT")
parser.add_option("-b", "--broad", type="float", dest="width",
help="broaden peaks (FWHM) by WID eV (default 0.1 eV)", metavar="WID")
parser.add_option("-n", "--nbin", type="int", dest="nbin",
help="number of eigenvalue bins for DOS calc (default 20)", metavar="NUM")
parser.add_option("-p", "--points", type="int", dest="npoints",
help="create a spectrum with NUM points (default 2000)", metavar="NUM")
parser.add_option("-w", "--units", type="string", dest="units",
help="units for frequency: eV (default), au, nm", metavar="UNT")
parser.add_option("-d", "--delim", type="string", dest="delim",
help="use STR as output separator (four spaces default)", metavar="STR")
parser.add_option("-x", "--extract", action="store_false", dest="makespec",
help="extract unbroadened roots; do not make spectrum")
parser.add_option("-C", "--clean", action="store_false", dest="header",
help="clean output; data only, no header or comments")
parser.add_option("-c", "--comment", type="string", dest="cchar",
help="comment character for output ('#' default)", metavar="CHA")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="echo warnings and progress to stderr")
parser.set_defaults(width = 0.1, npoints = 2000, units="eV", datafmt="auto", delim=" ", makespec=True, header=True, cchar="#", nbin=20, verbose=False)
(opts, args) = parser.parse_args()
preprocess_check_opts(opts)
check_version ()
if opts.header:
dump_header (opts)
if opts.datafmt == "auto":
opts.datafmt = determine_data_type ()
if opts.datafmt == "tddft":
print ("{0} The input appears to contain TDDFT data.".format(opts.cchar))
elif opts.datafmt == "vspec":
print ("{0} The input appears to contain vspec data.".format(opts.cchar))
else:
raise Exception ("Invalid data format: {0}".format(opts.datafmt))
## parse raw data
if opts.datafmt == "tddft":
roots = parse_input_tddft (opts)
elif opts.datafmt == "vspec":
roots = parse_input_vspec (opts)
elif opts.datafmt == "dos":
evals = parse_input_evals (opts)
roots = bin_evals (opts, evals)
else:
raise Exception ("Invalid data format supplied: {0}".format(opts.datafmt))
## make broadened spectrum if desired
if opts.makespec:
energies = make_energy_list (opts, roots)
spectrum = gen_spectrum (opts, energies, roots)
if opts.verbose:
sys.stderr.write ("{0}: Initialized spectrum [{1: .3f} : {2: .3f}] ({3} points)\n".format(pname, energies[0], energies[-1], len(energies)))
if opts.header:
print ("{0} Roots were Lorentzian broadened with width {1} eV ".format(opts.cchar, opts.width))
print ("{0} Spectrum generated with {1} points.".format(opts.cchar, opts.npoints))
else:
spectrum = roots
if opts.header:
print ("{0} Roots not broadened".format(opts.cchar))
print ("{0} No spectrum generated: output is list of raw excitations".format(opts.cchar))
## finally, dump data to stdout
dump_data (opts, spectrum)
if opts.verbose:
sys.stderr.write ("{0}: Done\n".format(pname))
if __name__ == "__main__":
main()
|
rangsimanketkaew/NWChem
|
contrib/parsers/nw_spectrum.py
|
Python
|
mit
| 22,171
|
[
"NWChem"
] |
cdde43def8637a11ef0917c7d49f09c53a22c3272100112fcb96d7755a621218
|
#!/usr/binenv python
# py2/3 compatibility
from __future__ import absolute_import, division, print_function, unicode_literals
from textwrap import dedent
import netCDF4 as nc4
import numpy as np
from datetime import datetime, timedelta
from gridded.utilities import get_dataset
class Time(object):
_const_time = None
def __init__(self,
data=(datetime.now(),),
filename=None,
varname=None,
tz_offset=None,
origin=None,
displacement=timedelta(seconds=0),
*args,
**kwargs):
'''
Representation of a time axis. Provides interpolation alphas and indexing.
:param time: Ascending list of times to use
:param tz_offset: offset to compensate for time zone shifts
:param origin: shifts the time interval to begin at the time specified
:param displacement: displacement to apply to the time data. Allows shifting entire time interval into future or past
:type time: netCDF4.Variable or [] of datetime.datetime
:type tz_offset: datetime.timedelta
:type origin: datetime.timedelta
:type displacement: datetime.timedelta
'''
if isinstance(data, (nc4.Variable, nc4._netCDF4._Variable)):
if (hasattr(nc4, 'num2pydate')):
self.data = nc4.num2pydate(data[:], units=data.units)
else:
self.data = nc4.num2date(data[:], units=data.units, only_use_cftime_datetimes=False, only_use_python_datetimes=True)
elif data is None:
self.data = np.array([datetime.now()])
else:
self.data = np.asarray(data)
if origin is not None:
diff = self.data[0] - origin
self.data -= diff
self.data += displacement
self.filename = filename
self.varname = varname
# if self.filename is None:
# self.filename = self.id + '_time.txt'
if tz_offset is not None:
self.data += tz_offset
if not self._timeseries_is_ascending(self.data):
raise ValueError("Time sequence is not ascending")
if self._has_duplicates(self.data):
raise ValueError("Time sequence has duplicate entries")
super(Time, self).__init__(*args, **kwargs)
@classmethod
def from_netCDF(cls,
filename=None,
dataset=None,
varname=None,
datavar=None,
tz_offset=None,
**kwargs):
"""
construct a Time object from a netcdf file
:param filename=None: name of netcddf file
:param dataset=None: netcdf dataset object (one or the other)
:param varname=None: name of the netcdf variable
:param datavar=None: Either the time variable name, or
A netcdf variable that needs a Time object.
It will try to find the time variable that
corresponds to the passed in variable.
:param tz_offset=None: offset to adjust for timezone, in hours.
"""
if dataset is None:
dataset = get_dataset(filename)
if datavar is not None:
if hasattr(datavar, 'time') and datavar.time in dataset.dimensions.keys():
varname = datavar.time
else:
varname = datavar.dimensions[0] if 'time' in datavar.dimensions[0] else None
if varname is None:
return cls.constant_time()
time = cls(data=dataset[varname],
filename=filename,
varname=varname,
tz_offset=tz_offset,
**kwargs
)
return time
@classmethod
def constant_time(cls):
if cls._const_time is None:
cls._const_time = cls(np.array([datetime.now()]))
return cls._const_time
@property
def info(self):
"""
Provides info about this Time object
"""
msg = """
Time object:
filename: {}
varname: {}
first timestep: {}
final timestep: {}
number of timesteps: {}
""".format(self.filename,
self.varname,
self.min_time,
self.max_time,
len(self.data),
)
return dedent(msg)
def __len__(self):
return len(self.data)
def __iter__(self):
return self.data.__iter__()
def __eq__(self, other):
r = self.data == other.data
return all(r) if hasattr(r, '__len__') else r
def __ne__(self, other):
return not self.__eq__(other)
def _timeseries_is_ascending(self, ts):
return np.all(np.sort(ts) == ts)
def _has_duplicates(self, time):
return len(np.unique(time)) != len(time) and len(time) != 1
@property
def min_time(self):
'''
First time in series
:rtype: datetime.datetime
'''
return self.data[0]
@property
def max_time(self):
'''
Last time in series
:rtype: datetime.datetime
'''
return self.data[-1]
def get_time_array(self):
return self.data[:]
def time_in_bounds(self, time):
'''
Checks if time provided is within the bounds represented by this object.
:param time: time to be queried
:type time: datetime.datetime
:rtype: boolean
'''
return not time < self.min_time or time > self.max_time
def valid_time(self, time):
if time < self.min_time or time > self.max_time:
raise ValueError('time specified ({0}) is not within the bounds of the time ({1} to {2})'.format(
time.strftime('%c'), self.min_time.strftime('%c'), self.max_time.strftime('%c')))
def index_of(self, time, extrapolate=False):
'''
Returns the index of the provided time with respect to the time intervals in the file.
:param time: Time to be queried
:param extrapolate:
:type time: datetime.datetime
:type extrapolate: boolean
:return: index of first time before specified time
:rtype: integer
'''
if not (extrapolate or len(self.data) == 1):
self.valid_time(time)
index = np.searchsorted(self.data, time)
if len(self.data) == 1:
index = 0
return index
def interp_alpha(self, time, extrapolate=False):
'''
Returns interpolation alpha for the specified time
:param time: Time to be queried
:param extrapolate:
:type time: datetime.datetime
:type extrapolate: boolean
:return: interpolation alpha
:rtype: double (0 <= r <= 1)
'''
if not len(self.data) == 1 or not extrapolate:
self.valid_time(time)
i0 = self.index_of(time, extrapolate)
if i0 > len(self.data) - 1:
return 1
if i0 == 0:
return 0
t0 = self.data[i0 - 1]
t1 = self.data[i0]
return (time - t0).total_seconds() / (t1 - t0).total_seconds()
|
NOAA-ORR-ERD/gridded
|
gridded/time.py
|
Python
|
unlicense
| 7,412
|
[
"NetCDF"
] |
d4d18b20cf6b100a12f3f577f1fcf245c12c6d1a47c7d0f393899c1b96e79aa2
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic Hartree-Fock analytical nuclear gradients
'''
import time
import numpy
import ctypes
from pyscf import gto
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import _vhf
def grad_elec(mf_grad, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
'''
Electronic part of RHF/RKS gradients
Args:
mf_grad : grad.rhf.Gradients or grad.rks.Gradients object
'''
mf = mf_grad.base
mol = mf_grad.mol
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
log = logger.Logger(mf_grad.stdout, mf_grad.verbose)
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
dm0 = mf.make_rdm1(mo_coeff, mo_occ)
t0 = (time.clock(), time.time())
log.debug('Computing Gradients of NR-HF Coulomb repulsion')
vhf = mf_grad.get_veff(mol, dm0)
log.timer('gradients of 2e part', *t0)
dme0 = mf_grad.make_rdm1e(mo_energy, mo_coeff, mo_occ)
if atmlst is None:
atmlst = range(mol.natm)
aoslices = mol.aoslice_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
p0, p1 = aoslices [ia,2:]
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ij->x', h1ao, dm0)
# nabla was applied on bra in vhf, *2 for the contributions of nabla|ket>
de[k] += numpy.einsum('xij,ij->x', vhf[:,p0:p1], dm0[p0:p1]) * 2
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], dme0[p0:p1]) * 2
de[k] += mf_grad.extra_force(ia, locals())
if log.verbose >= logger.DEBUG:
log.debug('gradients of electronic part')
_write(log, mol, de, atmlst)
return de
def _write(dev, mol, de, atmlst):
'''Format output of nuclear gradients.
Args:
dev : lib.logger.Logger object
'''
if atmlst is None:
atmlst = range(mol.natm)
dev.stdout.write(' x y z\n')
for k, ia in enumerate(atmlst):
dev.stdout.write('%d %s %15.10f %15.10f %15.10f\n' %
(ia, mol.atom_symbol(ia), de[k,0], de[k,1], de[k,2]))
def grad_nuc(mol, atmlst=None):
'''
Derivatives of nuclear repulsion energy wrt nuclear coordinates
'''
gs = numpy.zeros((mol.natm,3))
for j in range(mol.natm):
q2 = mol.atom_charge(j)
r2 = mol.atom_coord(j)
for i in range(mol.natm):
if i != j:
q1 = mol.atom_charge(i)
r1 = mol.atom_coord(i)
r = numpy.sqrt(numpy.dot(r1-r2,r1-r2))
gs[j] -= q1 * q2 * (r2-r1) / r**3
if atmlst is not None:
gs = gs[atmlst]
return gs
def get_hcore(mol):
'''Part of the nuclear gradients of core Hamiltonian'''
h = mol.intor('int1e_ipkin', comp=3)
if mol._pseudo:
NotImplementedError('Nuclear gradients for GTH PP')
else:
h+= mol.intor('int1e_ipnuc', comp=3)
if mol.has_ecp():
h += mol.intor('ECPscalar_ipnuc', comp=3)
return -h
def hcore_generator(mf, mol=None):
if mol is None: mol = mf.mol
with_x2c = getattr(mf.base, 'with_x2c', None)
if with_x2c:
hcore_deriv = with_x2c.hcore_deriv_generator(deriv=1)
else:
with_ecp = mol.has_ecp()
if with_ecp:
ecp_atoms = set(mol._ecpbas[:,gto.ATOM_OF])
else:
ecp_atoms = ()
aoslices = mol.aoslice_by_atom()
h1 = mf.get_hcore(mol)
def hcore_deriv(atm_id):
shl0, shl1, p0, p1 = aoslices[atm_id]
with mol.with_rinv_at_nucleus(atm_id):
vrinv = mol.intor('int1e_iprinv', comp=3) # <\nabla|1/r|>
vrinv *= -mol.atom_charge(atm_id)
if with_ecp and atm_id in ecp_atoms:
vrinv += mol.intor('ECPscalar_iprinv', comp=3)
vrinv[:,p0:p1] += h1[:,p0:p1]
return vrinv + vrinv.transpose(0,2,1)
return hcore_deriv
def get_ovlp(mol):
return -mol.intor('int1e_ipovlp', comp=3)
def get_jk(mol, dm):
'''J = ((-nabla i) j| kl) D_lk
K = ((-nabla i) j| kl) D_jk
'''
vhfopt = _vhf.VHFOpt(mol, 'int2e_ip1ip2', 'CVHFgrad_jk_prescreen',
'CVHFgrad_jk_direct_scf')
dm = numpy.asarray(dm, order='C')
if dm.ndim == 3:
n_dm = dm.shape[0]
else:
n_dm = 1
ao_loc = mol.ao_loc_nr()
fsetdm = getattr(_vhf.libcvhf, 'CVHFgrad_jk_direct_scf_dm')
fsetdm(vhfopt._this,
dm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(n_dm),
ao_loc.ctypes.data_as(ctypes.c_void_p),
mol._atm.ctypes.data_as(ctypes.c_void_p), mol.natm,
mol._bas.ctypes.data_as(ctypes.c_void_p), mol.nbas,
mol._env.ctypes.data_as(ctypes.c_void_p))
# Update the vhfopt's attributes intor. Function direct_mapdm needs
# vhfopt._intor and vhfopt._cintopt to compute J/K. intor was initialized
# as int2e_ip1ip2. It should be int2e_ip1
vhfopt._intor = intor = mol._add_suffix('int2e_ip1')
vhfopt._cintopt = None
vj, vk = _vhf.direct_mapdm(intor, # (nabla i,j|k,l)
's2kl', # ip1_sph has k>=l,
('lk->s1ij', 'jk->s1il'),
dm, 3, # xyz, 3 components
mol._atm, mol._bas, mol._env, vhfopt=vhfopt)
return -vj, -vk
def get_veff(mf_grad, mol, dm):
'''NR Hartree-Fock Coulomb repulsion'''
vj, vk = mf_grad.get_jk(mol, dm)
return vj - vk * .5
def make_rdm1e(mo_energy, mo_coeff, mo_occ):
'''Energy weighted density matrix'''
mo0 = mo_coeff[:,mo_occ>0]
mo0e = mo0 * (mo_energy[mo_occ>0] * mo_occ[mo_occ>0])
return numpy.dot(mo0e, mo0.T.conj())
def symmetrize(mol, de, atmlst=None):
'''Symmetrize the gradients wrt the point group symmetry of the molecule.'''
assert(mol.symmetry)
pmol = mol.copy()
# The symmetry of gradients should be the same to the p-type functions.
# We use p-type AOs to generate the symmetry adaptation projector.
pmol.basis = {'default': [[1, (1, 1)]]}
# There is uncertainty for the output of the transformed molecular
# geometry when mol.symmetry is True. E.g., H2O can be placed either on
# xz-plane or on yz-plane for C2v symmetry. This uncertainty can lead to
# wrong symmetry adaptation basis. Molecular point group and coordinates
# should be explicitly given to avoid the uncertainty.
pmol.symmetry = mol.topgroup
pmol.atom = mol._atom
pmol.unit = 'Bohr'
pmol.build(False, False)
# irrep-p-function x irrep-gradients = total symmetric irrep
a_id = pmol.irrep_id.index(0)
c = pmol.symm_orb[a_id].reshape(mol.natm, 3, -1)
if atmlst is not None:
c = c[:,atmlst,:]
tmp = numpy.einsum('zx,zxi->i', de, c)
proj_de = numpy.einsum('i,zxi->zx', tmp, c)
return proj_de
def as_scanner(mf_grad):
'''Generating a nuclear gradients scanner/solver (for geometry optimizer).
The returned solver is a function. This function requires one argument
"mol" as input and returns energy and first order nuclear derivatives.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
nuc-grad object and SCF object (DIIS, conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, grad
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> hf_scanner = scf.RHF(mol).apply(grad.RHF).as_scanner()
>>> e_tot, grad = hf_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot, grad = hf_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
if isinstance(mf_grad, lib.GradScanner):
return mf_grad
logger.info(mf_grad, 'Create scanner for %s', mf_grad.__class__)
class SCF_GradScanner(mf_grad.__class__, lib.GradScanner):
def __init__(self, g):
lib.GradScanner.__init__(self, g)
def __call__(self, mol_or_geom, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
mf_scanner = self.base
e_tot = mf_scanner(mol)
self.mol = mol
# If second integration grids are created for RKS and UKS
# gradients
if getattr(self, 'grids', None):
self.grids.reset(mol)
de = self.kernel(**kwargs)
return e_tot, de
return SCF_GradScanner(mf_grad)
class GradientsBasics(lib.StreamObject):
'''
Basic nuclear gradient functions for non-relativistic methods
'''
def __init__(self, method):
self.verbose = method.verbose
self.stdout = method.stdout
self.mol = method.mol
self.base = method
self.max_memory = self.mol.max_memory
self.atmlst = None
self.de = None
self._keys = set(self.__dict__.keys())
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('\n')
if hasattr(self.base, 'converged') and not self.base.converged:
log.warn('Ground state %s not converged',
self.base.__class__.__name__)
log.info('******** %s for %s ********',
self.__class__, self.base.__class__)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
def get_hcore(self, mol=None):
if mol is None: mol = self.mol
return get_hcore(mol)
hcore_generator = hcore_generator
def get_ovlp(self, mol=None):
if mol is None: mol = self.mol
return get_ovlp(mol)
@lib.with_doc(get_jk.__doc__)
def get_jk(self, mol=None, dm=None, hermi=0):
if mol is None: mol = self.mol
if dm is None: dm = self.base.make_rdm1()
cpu0 = (time.clock(), time.time())
vj, vk = get_jk(mol, dm)
logger.timer(self, 'vj and vk', *cpu0)
return vj, vk
def get_j(self, mol=None, dm=None, hermi=0):
if mol is None: mol = self.mol
if dm is None: dm = self.base.make_rdm1()
intor = mol._add_suffix('int2e_ip1')
return -_vhf.direct_mapdm(intor, 's2kl', 'lk->s1ij', dm, 3,
mol._atm, mol._bas, mol._env)
def get_k(self, mol=None, dm=None, hermi=0):
if mol is None: mol = self.mol
if dm is None: dm = self.base.make_rdm1()
intor = mol._add_suffix('int2e_ip1')
return -_vhf.direct_mapdm(intor, 's2kl', 'jk->s1il', dm, 3,
mol._atm, mol._bas, mol._env)
def grad_nuc(self, mol=None, atmlst=None):
if mol is None: mol = self.mol
return grad_nuc(mol, atmlst)
def optimizer(self, solver='geometric'):
'''Geometry optimization solver
Kwargs:
solver (string) : geometry optimization solver, can be "geomeTRIC"
(default) or "berny".
'''
if solver.lower() == 'geometric':
from pyscf.geomopt import geometric_solver
return geometric_solver.GeometryOptimizer(self.as_scanner())
elif solver.lower() == 'berny':
from pyscf.geomopt import berny_solver
return berny_solver.GeometryOptimizer(self.as_scanner())
else:
raise RuntimeError('Unknown geometry optimization solver %s' % solver)
def grad_elec(self):
raise NotImplementedError
def kernel(self):
raise NotImplementedError
@lib.with_doc(symmetrize.__doc__)
def symmetrize(self, de, atmlst=None):
return symmetrize(self.mol, de, atmlst)
grad = lib.alias(kernel, alias_name='grad')
def _finalize(self):
if self.verbose >= logger.NOTE:
logger.note(self, '--------------- %s gradients ---------------',
self.base.__class__.__name__)
self._write(self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
_write = _write
def as_scanner(self):
'''Generate Gradients Scanner'''
raise NotImplementedError
class Gradients(GradientsBasics):
'''Non-relativistic restricted Hartree-Fock gradients'''
def get_veff(self, mol=None, dm=None):
if mol is None: mol = self.mol
if dm is None: dm = self.base.make_rdm1()
return get_veff(self, mol, dm)
def make_rdm1e(self, mo_energy=None, mo_coeff=None, mo_occ=None):
if mo_energy is None: mo_energy = self.base.mo_energy
if mo_coeff is None: mo_coeff = self.base.mo_coeff
if mo_occ is None: mo_occ = self.base.mo_occ
return make_rdm1e(mo_energy, mo_coeff, mo_occ)
def extra_force(self, atom_id, envs):
'''Hook for extra contributions in analytical gradients.
Contributions like the response of auxiliary basis in density fitting
method, the grid response in DFT numerical integration can be put in
this function.
'''
return 0
grad_elec = grad_elec
def kernel(self, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
cput0 = (time.clock(), time.time())
if mo_energy is None: mo_energy = self.base.mo_energy
if mo_coeff is None: mo_coeff = self.base.mo_coeff
if mo_occ is None: mo_occ = self.base.mo_occ
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
de = self.grad_elec(mo_energy, mo_coeff, mo_occ, atmlst)
self.de = de + self.grad_nuc(atmlst=atmlst)
if self.mol.symmetry:
self.de = self.symmetrize(self.de, atmlst)
logger.timer(self, 'SCF gradients', *cput0)
self._finalize()
return self.de
as_scanner = as_scanner
Grad = Gradients
from pyscf import scf
# Inject to RHF class
scf.hf.RHF.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.atom = [['He', (0.,0.,0.)], ]
mol.basis = {'He': 'ccpvdz'}
mol.build()
method = scf.RHF(mol)
method.scf()
g = Gradients(method)
print(g.grad())
h2o = gto.Mole()
h2o.verbose = 0
h2o.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
h2o.basis = {'H': '631g',
'O': '631g',}
h2o.symmetry = True
h2o.build()
mf = scf.RHF(h2o)
mf.conv_tol = 1e-14
e0 = mf.scf()
g = Gradients(mf)
print(g.grad())
#[[ 0 0 -2.41134256e-02]
# [ 0 4.39690522e-03 1.20567128e-02]
# [ 0 -4.39690522e-03 1.20567128e-02]]
mf = scf.RHF(h2o).x2c()
mf.conv_tol = 1e-14
e0 = mf.scf()
g = mf.Gradients()
print(g.grad())
#[[ 0 0 -2.40286232e-02]
# [ 0 4.27908498e-03 1.20143116e-02]
# [ 0 -4.27908498e-03 1.20143116e-02]]
|
gkc1000/pyscf
|
pyscf/grad/rhf.py
|
Python
|
apache-2.0
| 16,084
|
[
"PySCF"
] |
05b947e510faac66cff73bc44ccac55f649f6becf91cefcf6db78de818cdd300
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright (C) 2015 John Törnblom
'''
rule-specification language (RSL) interpreter.
'''
import sys
import logging
import os
import xtuml
import rsl.version
complete_usage = '''
USAGE:
%s [-arch <string>] ... [-import <string>] ... [-include <string>] ... [-d <integer>] ... [-diff <string>] [-emit <string>] [-priority <integer>] [-lVHs] [-lSCs] [-l2b] [-l2s] [-l3b] [-l3s] [-nopersist] [-dumpsql <file>] [-force] [-integrity] [-e <string>] [-t <string>] [-v <string>] [-qim] [-q] [-l] [-f <string>] [-# <integer>] [//] [-version] [-h]
Where:
-arch <string> (accepted multiple times)
(value required) Archetype file name(s)
-import <string> (accepted multiple times)
(value required) Data file name(s)
-include <string> (accepted multiple times)
(value required) add a path to list of dirs to search for include files
-d <integer> (accepted multiple times)
(value required) The domain code. This argument must immediately precede the "-import" argument that it applies to.
-diff <string>
(value required) save a diff of all emits to a filename
-priority <integer>
(value required) Set process priority. Acceptable values are:
NORMAL_PRIORITY_CLASS = 32
IDLE_PRIORITY_CLASS = 64
HIGH_PRIORITY_CLASS = 128
REALTIME_PRIORITY_CLASS = 256
BELOW_NORMAL_PRIORITY_CLASS = 16384 (default)
ABOVE_NORMAL_PRIORITY_CLASS =
32768
-emit <string>
(value required) Chose when to emit. Acceptable values are:
never = never emit to disk
change = only emit to disk when files differ (default)
always = always emit to disk, even when the content in memory is the same as the content on disk
-lVHs
Use VHDL source license
-lSCs
Use SystemC source license
-l2b
Use MC-2020 binary license
-l2s
Use MC-2020 source license
-l3b
Use MC-3020 binary license
-l3s
Use MC-3020 source license
-nopersist
Disable persistence
-dumpsql <file>
(value required) Dump the instance population as SQL insert statements
-force
make read-only emit files writable
-integrity
check the model for integrity violations upon program exit
-e <string>
(value required) Enable specified feature
-t <string>
(value required) Full-blast logging
-v <string>
(value required) Verbose mode (STMT, COMP, or SYS)
-qim
Quiet insert mismatches. Do not print warnings if insert data doesn't populate all attributes.
-q
Quit on error
-l
Use log file
-f <string>
(value required) Generated file name (database)
-# <integer>
(value required) Number of files to generate
//, -ignore_rest
Ignores the rest of the labeled arguments following this flag.
-version
Displays version information and exits.
-h
Displays usage information and exits.
gen_erate
'''
brief_usage = '''
Brief USAGE:
%s [-arch <string>] ... [-import <string>] ... [-include <string>] ... [-d <integer>] ... [-diff <string>] [-emit <string>] [-priority <integer>] [-lVHs] [-lSCs] [-l2b] [-l2s] [-l3b] [-l3s] [-nopersist] [-dumpsql <file>] [-force] [-integrity] [-e <string>] [-t <string>] [-v <string>] [-qim] [-q] [-l] [-f <string>] [-# <integer>] [//] [-version] [-h]
For complete USAGE and HELP type:
%s -h
'''
def main(argv=None):
loglevel = logging.INFO
database_filename = 'mcdbms.gen'
enable_persistance = True
dump_sql_file = ''
force_overwrite = False
emit_when = 'change'
diff_filename = None
inputs = list()
includes = ['.']
check_integrity = False
argv = argv or sys.argv
quiet_insert_mismatch = False
i = 1
while i < len(argv):
if argv[i] == '-arch':
i += 1
inputs.append((argv[i], 'arc'))
elif argv[i] == '-import':
i += 1
inputs.append((argv[i], 'sql'))
elif argv[i] == '-include':
i += 1
includes.append(argv[i])
elif argv[i] == '-emit':
i += 1
emit_when = argv[i]
elif argv[i] == '-f':
i += 1
database_filename = argv[i]
elif argv[i] == '-force':
force_overwrite = True
elif argv[i] == '-integrity':
check_integrity = True
elif argv[i] == '-diff':
i += 1
diff_filename = argv[i]
elif argv[i] == '-nopersist':
enable_persistance = False
elif argv[i] == '-dumpsql':
i += 1
dump_sql_file = argv[i]
elif argv[i] == '-v':
i += 1
loglevel = logging.DEBUG
elif argv[i] == '-qim':
quiet_insert_mismatch = True
elif argv[i] == '-version':
print(rsl.version.complete_string)
sys.exit(0)
elif argv[i] == '-h':
print(complete_usage % argv[0])
sys.exit(0)
elif argv[i] in ['//', '-ignore_rest']:
break
# ignore these options
elif argv[i] in ['-lVHs', '-lSCs', '-l2b', '-l2s', '-l3b', '-l3s',
'-q', '-l']:
pass
# ignore these options (which expects a following value)
elif argv[i] in ['-d', '-priority', '-e', '-t', '-#']:
i += 1
else:
print("PARSE ERROR: Argument: %s" % argv[i])
print("Couldn't find match for argument")
print(brief_usage % (argv[0], argv[0]))
sys.exit(1)
i += 1
logging.basicConfig(stream=sys.stdout, level=loglevel)
id_generator = xtuml.IntegerGenerator()
metamodel = xtuml.MetaModel(id_generator)
loader = xtuml.ModelLoader()
if quiet_insert_mismatch:
load_logger = logging.getLogger(xtuml.load.__name__)
load_logger.setLevel(logging.ERROR)
if diff_filename:
with open(diff_filename, 'w') as f:
f.write(' '.join(argv))
f.write('\n')
if enable_persistance and os.path.isfile(database_filename):
loader.filename_input(database_filename)
for filename, kind in inputs:
if kind == 'sql':
loader.filename_input(filename)
elif kind == 'arc':
loader.populate(metamodel)
rt = rsl.Runtime(metamodel, emit_when, force_overwrite, diff_filename)
ast = rsl.parse_file(filename)
rsl.evaluate(rt, ast, includes)
loader = xtuml.ModelLoader()
else:
#should not happen
print("Unknown %s is of unknown kind '%s', skipping it" % (filename, kind))
errors = 0
if check_integrity:
errors += xtuml.check_association_integrity(metamodel)
errors += xtuml.check_uniqueness_constraint(metamodel)
if enable_persistance:
xtuml.persist_database(metamodel, database_filename)
if dump_sql_file != '':
xtuml.persist_instances(metamodel, dump_sql_file)
return errors
if __name__ == '__main__':
num_errors = main()
sys.exit(num_errors > 0)
|
john-tornblom/pyrsl
|
rsl/gen_erate.py
|
Python
|
gpl-3.0
| 7,431
|
[
"BLAST"
] |
991d6cbb90ee10924e0c30cec97c625c58c25017abc6fe9c4fcaefd863ae0843
|
# inporb.py -- Molcas orbital format
#
# molpy, an orbital analyzer and file converter for Molcas files
# Copyright (c) 2016 Steven Vancoillie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Steven Vancoillie.
#
from copy import deepcopy
import numpy as np
from . import export
from .errors import InvalidRequest
@export
class MolcasINPORB():
"""
Handle reading and writing of Molcas INPORB files. These are the default
formatted orbital files used by any Molcas program modules.
"""
def __init__(self, filename, mode, version='2.0'):
"""
initialize the INPORB file and prepare for reading or writing
"""
if mode.startswith('r'):
try:
with open(filename, 'r') as f:
firstline = next(f)
if not firstline.startswith('#INPORB'):
raise InvalidRequest
except:
raise InvalidRequest
self.f = open(filename, mode)
if mode.startswith('r'):
line = self.seek_line('#INPORB')
self.version = line.split()[1]
self.seek_line('#INFO')
uhf, self.n_sym, self.wfn_type = (int(val) for val in self._next_noncomment().split())
if uhf == 1:
self.unrestricted = True
else:
self.unrestricted = False
self.n_bas = np.array(self._next_noncomment().split(), dtype=np.int)
self.n_orb = np.array(self._next_noncomment().split(), dtype=np.int)
elif mode.startswith('w'):
self.version = version
else:
raise Exception('invalid mode string')
if self.version == '2.0':
self.read_block = self._read_block_v20
self.occ_fmt = ' {:7.4f}'
self.one_fmt = ' {:11.4e}'
self.orb_fmt = ' {:21.14e}'
self.occ_blk_size = 10
self.one_blk_size = 10
self.orb_blk_size = 5
elif self.version == '1.1':
self.read_block = self._read_block_v11
self.occ_fmt = '{:18.11e}'
self.one_fmt = '{:18.11e}'
self.orb_fmt = '{:18.11e}'
self.occ_blk_size = 4
self.one_blk_size = 4
self.orb_blk_size = 4
else:
raise Exception('invalid version number')
def write(self, wfn):
wfn = deepcopy(wfn)
self.write_version(self.version)
if wfn.unrestricted:
uhf = 1
kinds = ['alpha', 'beta']
else:
uhf = 0
kinds = ['restricted']
self.write_info(uhf, wfn.n_sym, wfn.n_bas)
orbs = {}
for kind in kinds:
wfn.mo[kind].sanitize()
orbs[kind] = wfn.symmetry_blocked_orbitals(kind=kind)
for kind in kinds:
self.write_orb((orb.coefficients for orb in orbs[kind]), kind=kind)
for kind in kinds:
self.write_occ((orb.occupations for orb in orbs[kind]), kind=kind)
for kind in kinds:
self.write_one((orb.energies for orb in orbs[kind]), kind=kind)
for kind in kinds:
self.write_index((orb.types for orb in orbs[kind]), kind=kind)
def close(self):
self.f.close()
def rewind(self):
self.f.seek(0)
def read_orb(self, kind='restricted'):
self.seek_line(self._format_header('ORB', kind=kind))
coefficients = np.empty(sum(self.n_bas**2), dtype=np.float64)
sym_offset = 0
for nb in self.n_bas:
if nb == 0:
continue
for offset in range(sym_offset, sym_offset + nb**2, nb):
coefficients[offset:offset+nb] = self.read_block(nb)
sym_offset += nb**2
return coefficients
def read_occ(self, kind='restricted'):
self.seek_line(self._format_header('OCC', kind=kind))
occupations = np.empty(sum(self.n_bas), dtype=np.float64)
sym_offset = 0
for nb in self.n_bas:
occupations[sym_offset:sym_offset+nb] = self.read_block(nb, self.occ_blk_size)
sym_offset += nb
return occupations
def read_one(self, kind='restricted'):
self.seek_line(self._format_header('ONE', kind=kind))
energies = np.empty(sum(self.n_bas), dtype=np.float64)
sym_offset = 0
for nb in self.n_bas:
energies[sym_offset:sym_offset+nb] = self.read_block(nb, self.one_blk_size)
sym_offset += nb
return energies
def read_index(self):
self.seek_line('#INDEX')
typeindices = np.empty(sum(self.n_bas), dtype='U1')
blk_size = 10
sym_offset = 0
for nb in self.n_bas:
for offset in range(sym_offset, sym_offset + nb, blk_size):
values = self._next_noncomment().split()[1].strip()
size = min(blk_size, sym_offset + nb - offset)
typeindices[offset:offset+size] = np.array([values]).view('U1')
sym_offset += nb
return typeindices
def write_version(self, version):
self.f.write('#INPORB {:s}\n'.format(version))
def write_info(self, uhf, n_sym, n_bas, title=''):
""" write info block """
self.f.write('#INFO\n')
self.f.write(''.join(['*' + title + '\n']))
self.f.write((3 * '{:8d}' + '\n').format(uhf,n_sym,0))
self.f.write((n_sym * '{:8d}' + '\n').format(*n_bas))
self.f.write((n_sym * '{:8d}' + '\n').format(*n_bas))
def write_orb(self, mo_vectors, kind='restricted'):
self.f.write(self._format_header('ORB', kind=kind))
for isym, coef in enumerate(mo_vectors):
norb = coef.shape[0]
for jorb in range(norb):
self.f.write('* ORBITAL{:5d}{:5d}\n'.format(isym+1,jorb+1))
self._write_blocked(np.ravel(coef[:,jorb]), self.orb_fmt, blocksize=self.orb_blk_size)
def write_occ(self, mo_occupations, kind='restricted'):
self.f.write(self._format_header('OCC', kind=kind))
self.f.write('* OCCUPATION NUMBERS\n')
for occ in mo_occupations:
self._write_blocked(occ, self.occ_fmt, blocksize=self.occ_blk_size)
def write_one(self, mo_energies, kind='restricted'):
self.f.write(self._format_header('ONE', kind=kind))
self.f.write('* ONE ELECTRON ENERGIES\n')
for ene in mo_energies:
self._write_blocked(ene, self.one_fmt, blocksize=self.one_blk_size)
def write_index(self, mo_typeindices, kind='restricted'):
self.f.write('#INDEX\n')
for idx in mo_typeindices:
self.f.write('* 1234567890\n')
self._write_blocked(idx, '{:1s}', blocksize=10, enum=True)
# internal use
def _next_noncomment(self):
line = next(self.f)
while line.startswith('*'):
line = next(self.f)
return line
def _read_block_v11(self, size, blk_size=4):
"""
read a block of 'size' values from an INPORB 1.1 file
"""
arr = np.empty(size)
for offset in range(0, size, blk_size):
line = self._next_noncomment().rstrip()
values = [line[sta:sta+18] for sta in range(0,len(line),18)]
arr[offset:offset+blk_size] = np.array(values, dtype=np.float64)
return arr
def _read_block_v20(self, size, blk_size=5):
"""
read a block of 'size' values from an INPORB 2.0 file
"""
arr = np.empty(size)
for offset in range(0, size, blk_size):
values = self._next_noncomment().split()
arr[offset:offset+blk_size] = np.array(values, dtype=np.float64)
return arr
def _write_blocked(self, arr, fmt, blocksize=5, enum=False):
"""
write an array to file with a fixed number of elements per line
"""
if enum:
for idx, offset in enumerate(range(0, len(arr), blocksize)):
prefix = '{:1d} '.format(idx % 10)
line = ''.join(fmt.format(i) for i in arr[offset:offset+blocksize])
self.f.write(prefix + line + '\n')
else:
for offset in range(0, len(arr), blocksize):
line = ''.join(fmt.format(i) for i in arr[offset:offset+blocksize])
self.f.write(line + '\n')
@staticmethod
def _format_header(header, kind='restricted'):
if kind == 'beta':
return '#U' + header + '\n'
else:
return '#' + header + '\n'
def seek_line(self, pattern):
""" find the next line starting with a specific string """
line = next(self.f)
while not line.startswith(pattern):
line = next(self.f)
return line
@export
class MolcasINPORB11(MolcasINPORB):
def __init__(self, filename, mode):
super().__init__(filename, mode, version='1.1')
@export
class MolcasINPORB20(MolcasINPORB):
def __init__(self, filename, mode):
super().__init__(filename, mode, version='2.0')
|
steabert/molpy
|
molpy/inporb.py
|
Python
|
gpl-2.0
| 9,707
|
[
"MOLCAS"
] |
a7e8d1645abc505f4b9776af90eecd782faca05f8939a08273a60ac9d58dc65a
|
"""
.. automodule::
:members:
Trajectories Class
==================
.. autoclass:: Trajectories
:members:
"""
try:
from mayavi import mlab
from tvtk.tools import visual
except:
print('mayavi not installed')
import numpy as np
import scipy as sp
import pdb
import matplotlib.pyplot as plt
import pylayers.util.pyutil as pyu
import pandas as pd
import copy
import time
import doctest
from datetime import datetime
from matplotlib.widgets import Slider, CheckButtons
import matplotlib.animation as animation
from pylayers.util.project import *
from pylayers.gis.layout import Layout
class Trajectories(PyLayers,list):
""" Define a list of trajectory
"""
def __init__(self):
""" initialization
"""
super(list, self).__init__()
self.name = []
self.typ = []
self.ID = []
self.t = []
def __repr__(self):
"""
"""
if hasattr(self,'Lfilename'):
s = 'Trajectories performed in Layout : ' + str(self.Lfilename) + '\n\n'
else:
s = ''
try:
for a in self:
s = s + a.__repr__()
s = s + '\n'
except:
s = 'Issue in Trajectories. Are you sure any Trajectory is loaded ?'
return s
def append(self,obj):
""" overload list.append
"""
super(Trajectories,self).append(obj)
self.name.append(obj.name)
self.typ.append(obj.typ)
self.ID.append(obj.ID)
def pop(self,idx=-1):
""" overloaded list.pop
"""
super(Trajectories,self).pop(idx)
self.name.pop(idx)
self.typ.pop(idx)
self.ID.pop(idx)
def loadh5(self, _filename='simulnet_TA-Office.h5',append =False):
""" import simulnet h5 file
Parameters
----------
filename : string
default simulnet + Layout_filename . h5
append : boolean
if True : append new trajectories to preexisting ones
Returns
-------
lt : list of trajectory
Examples
--------
.. plot::
:include-source:
>>> from pylayers.mobility.trajectory import *
>>> T = Trajectories()
>>> T.loadh5()
"""
filename = pyu.getlong(_filename, pstruc['DIRNETSAVE'])
if os.path.exists(filename):
fil = pd.io.pytables.HDFStore(filename)
else:
raise NameError(filename + ' not found')
if not append:
[self.pop(0) for i in range(len(self))]
for k in fil.keys():
df = fil[k]
#df = df.set_index('t')
df.index = pd.to_datetime(df.t)
ID = fil.get_storer(k).attrs.ID
name = fil.get_storer(k).attrs.name
typ = fil.get_storer(k).attrs.typ
layout = fil.get_storer(k).attrs.layout
#
# velocity
#
v = np.array((df.vx.values,df.vy.values))
#
d = np.sqrt(np.sum(v*v,axis=0))
s = np.cumsum(d)
df['s'] = s
self.append(Trajectory(df=df,ID=ID,name=name,typ=typ))
fil.close()
self.Lfilename = layout
self.t = self.time()
def resample(self, sf=2, tstart = -1):
""" resample trajectories
Parameters
----------
sf : int
sampling factor
tstart : float
new start time (must be > original start time).
if tstart = -1 : original start conserved
Returns
-------
T : Trajectories
new trajectories object updated
"""
T=Trajectories()
for t in self:
if t.typ != 'ap':
T.append(t.resample(sf=sf, tstart=tstart))
else:
T.append(t)
T.Lfilename = self.Lfilename
T.time()
return T
def time(self,unit='s'):
""" extract time from a trajectory
Parameters
----------
unit : integer
default 0 (s) - 3 (ms) 6 (mus) 9 (ns)
Returns
-------
update self.t
"""
ut = np.where(np.array(self.typ) == 'ag')[0][0]
self.t = self[ut].time()
def replay(self, fig=[], ax=[], **kwargs):
""" replay a trajectory
Parameters
----------
fig
ax
speed : float
speed ratio
"""
# plt.ion()
if fig==[]:
fig = plt.gcf()
if ax == []:
ax = plt.gca()
limkwargs = copy.copy(kwargs)
if 'c' in kwargs:
limkwargs.pop('c')
if 'color'in kwargs:
limkwargs.pop('c')
limkwargs['marker'] = '*'
limkwargs['s'] = 20
if ('m' or 'marker') not in kwargs:
kwargs['marker'] = 'o'
if ('c' or 'color') not in kwargs:
kwargs['color'] = 'b'
L=Layout(self.Lfilename)
fig, ax = L.showG('s',fig=fig, ax=ax, **kwargs)
time=self[0].time()
line, = ax.plot([], [], 'ob', lw=2)
time_template = 'time = %.1fs'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
def init():
line.set_data([],[])
time_text.set_text('')
return line, time_text
def animate(it):
X=[]
Y=[]
for t in self:
if t.typ == 'ag':
X.append(t['x'].values[it])
Y.append(t['y'].values[it])
line.set_data(X,Y)
time_text.set_text(time_template%(time[it]))
return line, time_text
ani = animation.FuncAnimation(fig, animate, np.arange(1, len(time)),
interval=25, blit=True, init_func=init)
plt.show()
def _show3(self):
[t._show3() for t in self]
def ishow(self):
"""
interactive show of trajectories
Examples
--------
.. plot::
:include-source:
>>> from pylayers.mobility.trajectory import *
>>> T=Trajectories()
>>> T.loadh5()
>>> T.ishow()
"""
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2, left=0.3)
L = Layout(self.Lfilename)
fig, ax = L.showG('s', fig=fig, ax=ax)
valinit = 0
lines = []
labels = []
colors = "bgrcmykw"
for iT, T in enumerate(self):
if T.typ == 'ag':
lines.extend(ax.plot(T['x'][0:valinit],T['y'][0:valinit], 'o',
color=colors[iT], visible=True))
labels.append(T.name + ':' + T.ID)
else:
lines.extend(ax.plot(T['x'][0], T['y'][0], '^', ms=12,
color=colors[iT], visible=True))
labels.append(T.name + ':' + T.ID)
#if type(self[0]) is datetime.
t = self[0].time()
# init boolean value for visible in checkbutton
blabels = [True]*len(labels)
########
# slider
########
slider_ax = plt.axes([0.1, 0.1, 0.8, 0.02])
slider = Slider(slider_ax, "time", self[0].tmin, self[0].tmax,
valinit=valinit, color='#AAAAAA')
def update(val):
if val >= 1:
pval=np.where(val>t)[0]
ax.set_title(str(self[0].index[pval[-1]].time())[:11].ljust(12),
loc='left')
for iT, T in enumerate(self):
if T.typ == 'ag':
lines[iT].set_xdata(T['x'][pval])
lines[iT].set_ydata(T['y'][pval])
fig.canvas.draw()
slider.on_changed(update)
########
# choose
########
rax = plt.axes([0.02, 0.5, 0.3, 0.2], aspect='equal')
# check (ax.object, name of the object , bool value for the obsject)
check = CheckButtons(rax, labels, tuple(blabels))
def func(label):
i = labels.index(label)
lines[i].set_visible(not lines[i].get_visible())
fig.canvas.draw()
check.on_clicked(func)
fig.canvas.draw()
plt.show(fig)
class Trajectory(PyLayers,pd.DataFrame):
""" define a trajectory
This class derives from pandas.DataFrame. It handles a full 3D trajectory.
A trajectory is time-stamped and contains 3D coordinates of p
position, velocity and acceleration.
Attributes
----------
tmin : float
tmax : float
tttimr :
dtot :
meansp :
Methods
-------
time
space
update
rescale
generate
distance
plot
replay
"""
def __init__(self, df={}, ID='0', name='', typ=''):
""" trajectory initialization
"""
super(Trajectory, self).__init__(df)
if type(ID) is str:
self.ID = ID
else:
self.ID = ID.decode('utf-8')
if type(name) is str:
self.name = name
else:
self.name = name.decode('utf-8')
if type(typ) is str:
self.typ = typ
else:
self.typ = typ.decode('utf-8')
self.has_values = self.update()
def __repr__(self):
try:
# total distance
dtot = self['s'].values[-1]
# total time
T = self.tmax-self.tmin
st = ''
typ = str(self.typ)
if not isinstance(self.ID,str):
ID = str(self.ID)
else:
ID = self.ID
if typ == 'ag':
string ='Trajectory of agent ' + str(self.name) + ' with ID ' + ID
else :
string ='Access point ' + str(self.name) + ' with ID ' + ID
st = st + string + '\n'
st = st + '-'*len(string) + '\n'
if self.typ == 'ag':
st = st+'t (s) : '+ str("%3.2f" %self.tmin)+" : "+ str("%3.2f" % self.ts) +" : " +str("%3.2f" % self.tmax)+'\n'
st = st+'dtot (m) : '+ str("%3.2f" %dtot)+'\n'
st = st+'Vmoy (m/s) : '+ str("%3.2f" % (dtot/T))+'\n'
else :
st = st+'t (s) : '+ str("%3.2f" %self.tmin) + '\n'
st = st+'Vmoy (m/s) : '+ str(self['vx'].values[0]) +'\n'
st = st + str(self.head(2)) + '\n'
except:
st = 'void Trajectory'
return(st)
def copy(self,deep=True):
""" copy of trajectroy
Parameters
----------
deep : boolean
"""
df = super(Trajectory, self).copy(deep=deep)
return Trajectory(df=df,ID=self.ID,name=self.name,typ=self.typ)
def update(self):
""" update class member data
This method updates the following data members
+ tmin (s)
+ tmax (s)
+ ts time step in second
+ ttime (s) trajectory duration
+ measnsp
Returns
-------
bool :
True if Trajectory has values, False otherwise
"""
if len(self.values) != 0:
self.tmin = self.t.min()
self.tmax = self.t.max()
try:
self.ts = (self.index[-1]*1e-9)-(self.index[-2]*1e-9)
except:
self.ts = np.nan
self.ttime = self.tmax-self.tmin
self.dtot = self['s'].values[-1]
self.meansp = self.dtot/self.ttime
self.t = self.time()
return True
else :
return False
def generate(self,**kwargs):
""" generate a trajectory from a numpy array
Parameters
----------
pt : np.ndarray:
(npt x 3) (x,y,z)
t = np.ndarray
(1 x npt)
Id : Agent Id
name : Agent Name
unit : str
time unity ('s'|'ns',...)
Examples
--------
.. plot::
:include-source:
>>> from pylayers.mobility.trajectory import *
>>> traj = Trajectory()
>>> traj.generate()
>>> traj.plot()
"""
defaults = { 'ID': '1',
'name': 'MyNameIsNoBody',
'typ':'ag',
't': np.linspace(0,10,50),
'pt': np.vstack((np.sin(np.linspace(0,3,50)),np.linspace(0,10,50),np.random.randn(50),)).T,
'unit': 's',
'sf': 1
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
t = kwargs['t']
if len(t) < 2:
raise AttributeError('Trajectory.generate requieres at least 3 time stamps')
pt = kwargs['pt']
npt = len(t)
# now = datetime.now()
td = pd.to_datetime(t,unit=kwargs['unit'])
# delta = now - td[0]
# td = td+delta
# velocity vector
v = (pt[1:, :]-pt[0:-1, :])/(t[1]-t[0])
# acceleration vector
a = (v[1:, :]-v[0:-1, :])/(t[1]-t[0])
#
d = np.sqrt(np.sum(v[:,0:2]*v[:, 0:2], axis=1))
s = np.cumsum(d)*(t[1]-t[0])
s[-1] = 0
s = np.roll(s,1)
df = {
'x': pt[:-2, 0],
'y': pt[:-2, 1],
'z': pt[:-2, 2],
'vx': v[:-1, 0],
'vy': v[:-1, 1],
'vz': v[:-1, 2],
'ax': a[:, 0],
'ay': a[:, 1],
'az': a[:, 2],
's': s[:-1],
't':t[:-2]}
super(Trajectory, self).__init__(df, columns=['x', 'y', 'z', 'vx', 'vy',
'vz', 'ax', 'ay', 'az', 's','t'],
index=td[:-2])
self.ID = kwargs['ID']
self.name = kwargs['name']
self.typ = kwargs['typ']
self.time()
self.update()
return self
def resample(self, sf=2, tstart=-1, tstop = -1):
""" resample trajectory
Parameters
----------
sf : float
sampling factor
tstart : float
new start time (must be > original start time).
if tstart = -1 : original start conserved
Returns
-------
T : Trajectory
resampled trajectory
"""
t = self.t
x = self.space()[:, 0]
y = self.space()[:, 1]
fx = sp.interpolate.interp1d(t, x)
fy = sp.interpolate.interp1d(t, y)
if tstart == -1:
tstart = t[0]
else:
if t[0] <= tstart:
tstart = tstart
else :
raise AttributeError('tstart < tmin')
if tstop != -1:
if tstart > tstop:
raise AttributeError('tstart > tstop')
tstep = (t[1]-t[0])/sf
# need to add at least 3 values gor ge nerate to estomate acceleration
tnew = np.arange(tstart, t[-1], tstep)
if tstop != -1:
ustop = np.where(tnew <=tstop)[0][-1]
tnew=tnew[0:ustop]
# generate needs at least 3 measures
xnew = fx(tnew)
ynew = fy(tnew)
T = Trajectory()
T.generate(ID=self.ID,
name=self.name,
typ=self.typ,
t=tnew,
pt=np.vstack((xnew,ynew,np.random.randn(len(tnew)),)).T,
unit='s',
sf=sf)
T.update()
return T
def rescale(self,speedkmph=3):
""" same length but specified speed
Parameters
----------
speedkmph : float
targeted mean speed in km/h
Returns
-------
t : rescaled trajectory
"""
speedms = speedkmph/3.6
factor = speedms/self.meansp
newtime = self.t/factor
pt = self.space(ndim=3)
t = copy.copy(self)
t.generate(ID=self.ID, name=self.name, t=newtime, pt=pt)
return(t)
def distance(self,tk):
""" recover distance at time tk
Parameters
----------
tk : float
time value in seconds
Example
-------
>>> from pylayers.mobility.trajectory import *
>>> T = Trajectory()
>>> T.generate()
>>> T.distance(2)
"""
t = self.t
u = np.where((t >= tk-self.ts/2.) & (t <= tk+self.ts/2.))[0][0]
return(self['s'][u])
def space(self, ndim=2):
""" extract space information
Parameters
----------
ndim : int
number of dimensions (default 2)
Returns
-------
pt : nd.array()
"""
if ndim == 2:
pt = np.vstack((self['x'].values, self['y'].values)).T
if ndim == 3:
pt = np.vstack((self['x'].values, self['y'].values,self['z'].values)).T
return(pt)
def time(self, unit=0):
""" extract time
Parameters
----------
unit : integer
default 0 (s) - 3 (ms) 6 (mus) 9 (ns)
Returns
-------
t : nd.array
time in 10**-unit s
"""
lt = self.index
self.t = (lt.microsecond*1e-6+
lt.second+
lt.minute*60+
lt.hour*3600)*10**(unit)
return self.t
def _show3(self,color_range=True,linewidth=0.01):
X=self[['x','y','z']].values
if color_range:
t = np.linspace(0, 100, len(X))
mlab.plot3d(X[:,0],X[:,1],X[:,2],-t,colormap='gist_gray',tube_radius=linewidth)
else:
mlab.plot3d(X[:,0],X[:,1],X[:,2],color=(0,0,0),tube_radius=linewidth)
def plot(self, fig=[], ax=[],tmin=0,tmax=None,Nlabels=5, typ='plot', L=[]):
""" plot trajectory
Parameters
----------
fig
ax
Nlabels : int
typ : 'plot'|'scatter'
L : pylayers.gis.layout.Layout object to be displayed
Examples
--------
.. plot::
:include-source:
>>> from pylayers.mobility.trajectory import *
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> t = np.arange(0,10,0.01)
>>> x = 2*t*np.cos(t)
>>> y = 3*t*np.sin(t)
>>> z = 0*t
>>> pt =np.vstack((x,y,z)).T
>>> traj = Trajectory()
>>> traj.generate(t=t,pt=pt)
>>> f,a = traj.plot()
>>> plt.show()
"""
if fig==[]:
fig = plt.gcf()
if ax == []:
ax = plt.gca()
if L != []:
if isinstance(L,Layout):
fig, ax = L.showGs(fig=fig, ax=ax)
tt = self.time()
if tmax == None:
tmax = tt[-1]
assert(tmax>=tmin)
assert(tmax<=tt[-1])
assert(tmin>=tt[0])
tk = np.where((tt>=tmin)&(tt<=tmax))[0]
kmin = tk[0]
kmax = tk[-1]
if typ == 'plot':
ax.plot(self['x'][tk], self['y'][tk])
elif typ == 'scatter':
ax.scatter(self['x'][tk], self['y'][tk])
for k in np.linspace(kmin, kmax, Nlabels, endpoint=False,dtype=int):
ax.text(self['x'][k], self['y'][k], str(self.index[k].strftime("%M:%S")))
ax.plot(self['x'][k], self['y'][k], '*r')
plt.xlabel('x (meters)')
plt.ylabel('y (meters)')
return fig, ax
def replay(self, fig=[], ax=[], Nlabels=5, typ='plot', L=[], speed=1, **kwargs):
""" replay a trajectory
Parameters
----------
fig
ax
Nlabels : int
default 5
typ : string
'plot'|'scatter'
L : pylayers.gis.layout.Layout
Layout for body to be displayed in
speed : float
speed ratio
"""
# plt.ion()
if fig==[]:
fig = plt.gcf()
if ax == []:
ax = plt.gca()
limkwargs = copy.copy(kwargs)
if 'c' in kwargs:
limkwargs.pop('c')
if 'color'in kwargs:
limkwargs.pop('c')
limkwargs['marker'] = '*'
limkwargs['s'] = 20
if ('m' or 'marker') not in kwargs:
kwargs['marker'] = 'o'
if ('c' or 'color') not in kwargs:
kwargs['color'] = 'b'
if L != []:
if isinstance(L,Layout):
fig, ax = L.showG('s',fig=fig, ax=ax, **kwargs)
labels = np.linspace(0, len(self), Nlabels, endpoint=True).tolist()
line, = ax.plot([], [], 'ob', lw=2)
time_template = 'time = %.1fs'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
def init():
line.set_data([],[])
time_text.set_text('')
return line, time_text
def animate(it):
thisx = [-100,self['x'].values[it]]
thisy = [-100,self['y'].values[it]]
line.set_data(thisx, thisy)
time_text.set_text(time_template%(self.t[it]))
return line, time_text
ani = animation.FuncAnimation(fig, animate, np.arange(1, len(self.t)),
interval=25, blit=True, init_func=init)
plt.show()
# for ik, k in enumerate(self.index):
# time.sleep(1/(1.*speed))
# ax.scatter(,**kwargs)
# plt.title(str(self.index[ik].time())[:11].ljust(12), loc='left')
# if ik > labels[0]:
# ax.text(self['x'].values[ik], self['y'].values[ik], str(self.index[ik].strftime("%M:%S")))
# ax.scatter(self['x'].values[ik], self['y'].values[ik], **limkwargs)
# labels.pop(0)
# plt.draw()
# plt.ioff()
# for k in :
# k = int(k)
# ax.text(self['x'][k],self['y'][k],str(self.index[k].strftime("%M:%S")))
# ax.plot(self['x'][k],self['y'][k],'*r')
# plt.draw()
def importsn(_filename='pos.csv'):
""" import simulnet csv file
****DEPRECATED
Parameters
----------
filename : string
default 'pos.csv'
Returns
-------
lt : list of trajectory
"""
filename = pyu.getlong(_filename, pstruc['DIRNETSAVE'])
dt = pd.read_csv(filename)
dtk = dt.keys()
N = len(dtk)
Ntraj = int((N-1)/3)
lt = []
for it in range(Ntraj):
x = dt[dtk[3*it+1]].values
y = dt[dtk[3*it+2]].values
z = np.zeros(len(x))
pt = np.vstack((x, y, z))
T = Trajectory()
lt.append(T.generate(t=dt['time'].values, pt=pt.T, unit='s'))
return(lt)
# def importh5(_filename='simulnet_TA-Office.h5'):
# """ import simulnet h5 file
# Parameters
# ----------
# filename : string
# default simulnet + Layout_filename . h5
# Returns
# -------
# lt : list of trajectory
# """
# filename = pyu.getlong(_filename,pstruc['DIRNETSAVE'])
# fil = pd.HDFStore(filename)
# lt=[]
# for k in fil.keys():
# df = fil[k]
# df = df.set_index('t')
# v=np.array((df.vx.values,df.vy.values))
# d = np.sqrt(np.sum(v*v,axis=0))
# s = np.cumsum(d)
# df['s'] = s
# lt.append(Trajectory(df))
# fil.close()
# return lt
if __name__ == '__main__':
plt.ion()
doctest.testmod()
|
pylayers/pylayers
|
pylayers/mobility/trajectory.py
|
Python
|
mit
| 23,654
|
[
"Mayavi"
] |
39fb846199fe3f810328b822503550b929b0dd9ca75f43a8d7ad5202476157c9
|
#!/usr/bin/env python
"""Based on cairo-demo/X11/cairo-demo.c"""
import cairocffi as cairo
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
SIZE = 30
def triangle(ctx):
ctx.move_to(SIZE, 0)
ctx.rel_line_to(SIZE, 2 * SIZE)
ctx.rel_line_to(-2 * SIZE, 0)
ctx.close_path()
def square(ctx):
ctx.move_to(0, 0)
ctx.rel_line_to(2 * SIZE, 0)
ctx.rel_line_to(0, 2 * SIZE)
ctx.rel_line_to(-2 * SIZE, 0)
ctx.close_path()
def bowtie(ctx):
ctx.move_to(0, 0)
ctx.rel_line_to(2 * SIZE, 2 * SIZE)
ctx.rel_line_to(-2 * SIZE, 0)
ctx.rel_line_to(2 * SIZE, -2 * SIZE)
ctx.close_path()
def inf(ctx):
ctx.move_to(0, SIZE)
ctx.rel_curve_to(0, SIZE, SIZE, SIZE, 2 * SIZE, 0)
ctx.rel_curve_to(SIZE, -SIZE, 2 * SIZE, -SIZE, 2 * SIZE, 0)
ctx.rel_curve_to(0, SIZE, -SIZE, SIZE, -2 * SIZE, 0)
ctx.rel_curve_to(-SIZE, -SIZE, -2 * SIZE, -SIZE, -2 * SIZE, 0)
ctx.close_path()
def draw_shapes(ctx, x, y, fill):
ctx.save()
ctx.new_path()
ctx.translate(x + SIZE, y + SIZE)
bowtie(ctx)
if fill:
ctx.fill()
else:
ctx.stroke()
ctx.new_path()
ctx.translate(3 * SIZE, 0)
square(ctx)
if fill:
ctx.fill()
else:
ctx.stroke()
ctx.new_path()
ctx.translate(3 * SIZE, 0)
triangle(ctx)
if fill:
ctx.fill()
else:
ctx.stroke()
ctx.new_path()
ctx.translate(3 * SIZE, 0)
inf(ctx)
if fill:
ctx.fill()
else:
ctx.stroke()
ctx.restore()
def fill_shapes(ctx, x, y):
draw_shapes(ctx, x, y, True)
def stroke_shapes(ctx, x, y):
draw_shapes(ctx, x, y, False)
def draw(da, ctx):
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(SIZE / 4)
ctx.set_tolerance(0.1)
ctx.set_line_join(cairo.LINE_JOIN_ROUND)
ctx.set_dash([SIZE / 4.0, SIZE / 4.0], 0)
stroke_shapes(ctx, 0, 0)
ctx.set_dash([], 0)
stroke_shapes(ctx, 0, 3 * SIZE)
ctx.set_line_join(cairo.LINE_JOIN_BEVEL)
stroke_shapes(ctx, 0, 6 * SIZE)
ctx.set_line_join(cairo.LINE_JOIN_MITER)
stroke_shapes(ctx, 0, 9 * SIZE)
fill_shapes(ctx, 0, 12 * SIZE)
ctx.set_line_join(cairo.LINE_JOIN_BEVEL)
fill_shapes(ctx, 0, 15 * SIZE)
ctx.set_source_rgb(1, 0, 0)
stroke_shapes(ctx, 0, 15 * SIZE)
def main():
win = Gtk.Window()
win.connect('destroy', Gtk.main_quit)
win.set_default_size(450, 550)
drawingarea = Gtk.DrawingArea()
win.add(drawingarea)
drawingarea.connect('draw', draw)
win.show_all()
Gtk.main()
if __name__ == '__main__':
main()
|
emperrors/fetchLinuxIDC
|
cairo_bin/test_gtk.py
|
Python
|
gpl-3.0
| 2,617
|
[
"Bowtie"
] |
f691a1e8e9127597ec962140b524d621ccb03c27c3078010cf7e2156a0f1b726
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************
**espressopp.analysis.XDensity**
********************************
.. function:: espressopp.analysis.XDensity(system)
:param system:
:type system:
.. function:: espressopp.analysis.XDensity.compute(rdfN)
:param rdfN:
:type rdfN:
:rtype:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_XDensity
class XDensityLocal(ObservableLocal, analysis_XDensity):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_XDensity, system)
def compute(self, rdfN):
return self.cxxclass.compute(self, rdfN)
if pmi.isController :
class XDensity(Observable):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
pmicall = [ "compute" ],
cls = 'espressopp.analysis.XDensityLocal'
)
|
capoe/espressopp.soap
|
src/analysis/XDensity.py
|
Python
|
gpl-3.0
| 1,841
|
[
"ESPResSo"
] |
77235df4f805a5d578f507d814b849f0f27ebea2b185f06954691195681bc9b5
|
'''
cheshift plugin-in: Validate your protein model with PyMOL
Described at PyMOL wiki: http://www.pymolwiki.org/index.php/cheshift
Author : Osvaldo Martin
email: aloctavodia@gmail.com
Date: September 2014
License: GNU General Public License
Version 3.6
'''
import Tkinter
from Tkinter import *
import tkFileDialog
import Pmw
from pymol import cmd, stored
import sys
import re
import os
try:
import numpy as np
except ImportError:
print '<'*80 + '\n\nCheShift needs NumPy to be installed in your system, please follow the instructions at\nhttp://www.pymolwiki.org/index.php/cheshift\n\n' + '>'*80
try:
from scipy.interpolate import griddata
except ImportError:
print '<'*80 + '\n\nCheShift needs SciPy to be installed in your system, please follow the instructions at\nhttp://www.pymolwiki.org/index.php/cheshift\n\n' + '>'*80
path = os.path.dirname(__file__)
def __init__(self):
"""Add this Plugin to the PyMOL menu"""
self.menuBar.addmenuitem('Plugin', 'command',
'Cheshift',
label = 'Cheshift',
command = lambda : mainDialog())
def validation(pdb_filename, cs_exp):
"""Run the CheShift validation routine"""
cmd.set('suspend_updates', 'on')
cs_exp_name = os.path.basename(cs_exp).split('.')[0]
pose, residues, total_residues, states = pose_from_pdb(pdb_filename)
reference = bmrb2cheshift(cs_exp, cs_exp_name)
ok, ocslist_full_new = check_seq(residues, cs_exp_name)
if ok == 1:
Db = load(path)
cs_2_colors(cs_exp_name, pose, residues, total_residues, states, reference, Db)
clean(pose)
colorize()
cmd.set('suspend_updates', 'off')
#print '<'*80 + '\nCheShift-2 Validation Report saved at\n%s.zip\n' % details_path + '>'*80
def prediction(pdb_filename):
"""Run the CheShift CS prediction routine"""
cmd.set('suspend_updates', 'on')
pose, residues, total_residues, states = pose_from_pdb(pdb_filename)
Db = load(path)
raw(pose, residues, total_residues, states, Db)
print '<'*80 + '\nYou didn`t provide a file with chemical Shifts, hence CheShift-2 assumed you\n only wanted the predicted CS. The predicted chemical shifts can be found in the file %s.txt\n' % pose + '>'*80
for sel in ['A', 'B', 'C', 'D']:
cmd.delete(sel)
cmd.set('suspend_updates', 'off')
def mainDialog():
""" Creates the GUI """
master = Tk()
master.title(' CheShift ')
w = Tkinter.Label(master, text='\nCheShift: Validate your protein model with PyMOl',
background = 'black',
foreground = 'white')
w.pack(expand=1, fill = 'both', padx=4, pady=4)
############################ NoteBook #########################################
Pmw.initialise()
nb = Pmw.NoteBook(master, hull_width=430, hull_height=250)
p1 = nb.add('Run CheShift')
p2 = nb.add(' Color code ')
p4 = nb.add(' About ')
nb.pack(padx=5, pady=5, fill=BOTH, expand=1)
############################ RUN CheShift TAB #################################
# select files
group = Pmw.Group(p1, tag_text='Select your file')
group.pack(fill='both', expand=1, padx=5, pady=5)
Label(group.interior(), text =u"""
If you don't select a file, CheShift will predict the
13C\u03B1 and 13C\u03B2 chemical shifts values for the
currently loaded protein.
If you choose a file, CheShift will validate your protein model.
""",justify=LEFT).pack()
Button(group.interior(), text='Chemical Shift file', command=retrieve_cs).pack()
# Run
Button(p1, text="Run", command=run).pack(side=BOTTOM)
############################ COLOR TAB ########################################
Label(p2, text =u"""
Colors indicate the difference between predicted and
observed 13C\u03B1 and 13C\u03B2 chemical shifts values
averaged over all uploaded conformers.
Green, yellow and red colors represent small, medium
and large differences, respectively.
White is used if either the prediction fail or the
observed value is missing
CheShift-2 provied alternative rotamers for blue residues.
""",justify=LEFT).pack()
Button(p2, text="Reset View", command=colorize).pack(side=BOTTOM)
############################ About TAB ########################################
Label(p4, text = """
If you find CheShift useful please cite:
Martin O.A. Arnautova Y.A. Icazatti A.A.
Scheraga H.A. and Vila J.A.
A Physics-Based Method to Validate and
Repair Flaws in Protein Structures.
Proc Natl Acad Sci USA 2013. 110(42):16826-31
""",justify=CENTER).pack()
master.mainloop()
def retrieve_cs():
"""Loads a Chemical Shift file provided by the user"""
global cs_path
cs_path = tkFileDialog.askopenfilename(title="Open chemical shift file", filetypes=[("All files","*")])
if len(cs_path) == 0:
del cs_path
def colorize():
"""Color the protein according to the b-factor. Uses the
'cheshift-color-code''"""
try:
cmd.spectrum('b', 'red_yellow_green', minimum='-1.0', maximum='0.0')
cmd.select('missing', 'b = -2.0')
cmd.color('white','missing')
cmd.delete('missing')
cmd.select('fixable', 'b = 2.0')
cmd.color('blue','fixable')
cmd.delete('fixable')
cmd.hide()
cmd.show('cartoon')
except:
pass
def run():
"""Checks if files were provided and calls the validation
or prediction routine"""
pdb = 0
cs = 0
try:
pdb_filename = cmd.get_names('all')[0]
print pdb_filename
pdb = 1
except:
Pmw.MessageDialog(title = 'Error',message_text = 'Please choose a\n PDB file')
if pdb == 1:
try:
cs_path
cs = 1
except:
pass
if pdb == 1 and cs == 1:
validation(pdb_filename, cs_path)
if pdb == 1 and cs == 0:
prediction(pdb_filename)
def bmrb2cheshift(cs_exp, cs_exp_name):
"""Parse the experimental chemical shifts file. Stores the data in an easy
format for further processing"""
for line in open('%s' % cs_exp).readlines():
if 'DSS' in line:
reference = 1.7
elif 'TSP' in line:
reference = 1.82
elif 'TMS' in line:
reference = 0.00
try:
reference
except:
print 'Unknow reference value, please check the BMRB file'
sys.exit()
try:
cs_exp_ca = []
cs_exp_cb = []
a = re.compile('[0-9]{1,5}\s{1,4}[A-Z]{3}\sCA\s{0,3}C.{0,5}[0-9]*\.[0-9]{0,2}')
b = re.compile('[0-9]{1,5}\s{1,4}[A-Z]{3}\sCB\s{0,3}C.{0,5}[0-9]*\.[0-9]{0,2}')
for line in open('%s' % cs_exp).readlines():
if a.search(line):
data = a.search(line).group().split()
cs_exp_ca.append(data)
if b.search(line):
data = b.search(line).group().split()
cs_exp_cb.append(data)
len_a = len(cs_exp_ca)
len_b = len(cs_exp_cb)
if len_a > len_b:
dif = len_a - len_b
for i in range(0, dif):
cs_exp_cb.append(['99999'])
elif len_a < len_b:
dif = len_b - len_a
for i in range(0, dif):
cs_exp_ca.append(['99999'])
count_ca = 0
count_cb = 0
ocs_list = []
while True:
try:
resn_ca = int(cs_exp_ca[count_ca][0])
resn_cb = int(cs_exp_cb[count_cb][0])
if resn_ca == resn_cb:
line = '%4s %3s %6.2f %6.2f\n' % (cs_exp_ca[count_ca][0], cs_exp_ca[count_ca][1], float(cs_exp_ca[count_ca][-1]), float(cs_exp_cb[count_cb][-1]))
ocs_list.append(line)
count_ca += 1
count_cb += 1
if resn_ca > resn_cb:
line = '%4s %3s %6.2f %6.2f\n' % (cs_exp_cb[count_cb][0], cs_exp_cb[count_cb][1], 999.00, float(cs_exp_cb[count_cb][-1]))
ocs_list.append(line)
count_cb += 1
if resn_ca < resn_cb:
line = '%4s %3s %6.2f %6.2f\n' % (cs_exp_ca[count_ca][0], cs_exp_ca[count_ca][1], float(cs_exp_ca[count_ca][-1]), 999.00)
ocs_list.append(line)
count_ca += 1
except:
break
res_old = int(ocs_list[0].split()[0])
count0 = 0
count1 = 0
safe = 0
fd = open('%s.ocs' % cs_exp_name, 'w')
while count0 < len(ocs_list):
safe += 1
if safe > len(cs_exp_ca)*5:
break
res_new = int(ocs_list[count0].split()[0])
if res_old + count1 == res_new:
fd.write(ocs_list[count0])
count0 += 1
count1 += 1
else:
fd.write('%4s UNK 999.00 999.00\n' % (res_old + count1))
count1 += 1
fd.close()
except:
fd = open('%s.ocs' % cs_exp_name, 'w')
cs_file = open('%s' % cs_exp).readlines()
reference = cs_file[0]
for line in cs_file[1:]:
fd.write(line)
fd.close()
return reference
def check_seq(residues, cs_exp_name):
""" Compares if the aminoacidic sequence in the bmrb file matchs
the one in the pdb file."""
#read a pdb and extract the sequence using three leter code and save it to a list
ok = 1
ocslist = [] #contains the sequence from the ocs file
ocslist_full = [] #contains the whole ocs file
ocslist_full_new = [] #contains the corrected ocs file i.e. including UNK
for line in open('%s.ocs' % (cs_exp_name)).readlines():
ocslist_full.append(line)
ocslist.append(line.split()[1])
indelfirst, indellast = align(ocslist, residues)
if indelfirst == 0 and indellast == 0:
ocslist_full_new = list(ocslist_full)
else:
firstocs = int(ocslist_full[0].split()[0])
lastocs = int(ocslist_full[-1].split()[0])
newfirst = firstocs - indelfirst
start = 0
stop = len(ocslist_full)
if indelfirst < 0:
start = abs(indelfirst)
if indellast < 0:
stop = len(ocslist_full) + indellast
line = ('%s' % ocslist_full[0])
for i in range(newfirst, firstocs): #works only if indelfirst is greater than 0
line = ('%4s %3s %6.2f %6.2f\n' % (i, 'UNK', 999.00, 999.00))
ocslist_full_new.append(line)
for i in range(start, stop):
line = ('%s' % ocslist_full[i])
ocslist_full_new.append(line)
for i in range(lastocs, lastocs+indellast):#works only if indellast is positive
line = ('%4s %3s %6.2f %6.2f\n' % (i, 'UNK', 999.00, 999.00))
ocslist_full_new.append(line)
#check if both sequences match
fd = open('%s.ocs' % (cs_exp_name), 'w')
for i, residue in enumerate(residues):
if residue == ocslist_full_new[i].split()[1] or ocslist_full_new[i].split()[1] == 'UNK':
fd.write('%s' % ocslist_full_new[i])
else:
pdb_res = residue
ocs_res = ocslist[i-indelfirst]
pdb_num = i + 1
ocs_num = i + 1 - indelfirst
ocs_seq = ' '.join(ocslist)
ok = 0
for ocs in ocslist:
if ocs_seq.endswith('UNK'):
ocs_seq = ocs_seq[:-4]
else:
break
print 'The residue %s-%s in your PDB file does not match with residue %s-%s in the chemical shift file %s %s %s' % (pdb_res, pdb_num, ocs_res, ocs_num, ocslist[i-1], ocslist[i], ocslist[i+1])
break
fd.close()
return ok, ocslist_full_new
def align(three0_list, three1_list):
"""This function aligns two sequences using a brute force algorithm.
Returns how many positions the first sequence is shifted at the beginning
and how many at the end. The sequences must have not indels and the first
sequence must be shorter than the second"""
three2one={'ALA':'A','ARG':'R','ASN':'N','ASP':'D','CYS':'C','GLN':'Q','GLU':'E','GLY':'G','HIS':'H','ILE':
'I','LEU':'L','LYS':'K','MET':'M','PHE':'F','PRO':'P','SER':'S','THR':'T','TRP':'W','TYR':'Y','VAL':'V', 'UNK':'U'}
#convert both lists from 3 letter code to one letter code
one0_list = []
one1_list = []
for res in three0_list:
one0_list.append(three2one[res])
for res in three1_list:
one1_list.append(three2one[res])
#convert one letter code list to strings
a_seq = ''.join(one0_list)
b_seq = ''.join(one1_list)
# get the length of the sequences
len_a_seq = len(a_seq)
len_b_seq = len(b_seq)
#create two new sequences of the same length
seq_1 = len(b_seq) * '.' + a_seq
seq_2 = b_seq + len(a_seq) * '.'
# compare both sequences, the trick is to delete (iteratively) the first chacter
# of sequence 1 and the last of sequence 2.
dif = []
for shift in range(0, len(seq_1)-1):
seq_1 = seq_1[1:]
seq_2 = seq_2[0:len(seq_2)-1]
matching = 0
for i in range(0, len(seq_1)):
if seq_1[i] == seq_2[i]:
matching += 1
dif.append(matching)
maximun = max(dif)
for values in range(0, len(dif)):
if maximun == dif[values]:
beginning = len_b_seq-(values+1)
endding = len_b_seq-len_a_seq-beginning
return beginning, endding
def pose_from_pdb(pdb_file_name):
"""Gets information from the pdb like the number of residues, the sequence,
the number of states and the name of the object"""
pose = pdb_file_name
remStr = "all and not (alt ''+A)"
cmd.remove(remStr)
cmd.alter('all', "alt=''")
stored.residues = []
stored.ResiduesNumber = []
cmd.iterate('(name ca)','stored.residues.append((resn))')
cmd.iterate('all','stored.ResiduesNumber.append((resi))')
first = int(stored.ResiduesNumber[0])
cmd.alter(pose, 'resi=str(int(resi)-%s)' % (first))
cmd.sort(pose)
states = cmd.count_states('all') + 1
return pose, stored.residues, len(stored.residues), states
def get_phi(res_num, state):
"""Computes the phi torsional angle"""
if res_num != 0:
cmd.select('A', 'resi %s and name C' % (res_num-1))
cmd.select('B', 'resi %s and name N' % res_num)
cmd.select('C', 'resi %s and name CA' % res_num)
cmd.select('D', 'resi %s and name C' % res_num)
return cmd.get_dihedral('A', 'B', 'C', 'D', state)
else:
return float('nan')
def get_psi(res_num, state, total_residues):
"""Computes the psi torsional angle"""
if res_num != total_residues - 1:
cmd.select('A', 'resi %s and name N' % res_num)
cmd.select('B', 'resi %s and name CA' % res_num)
cmd.select('C', 'resi %s and name C' % res_num)
cmd.select('D', 'resi %s and name N' % (res_num+1))
psi = cmd.get_dihedral('A', 'B', 'C', 'D', state)
return psi
else:
return float('nan')
def get_omega(res_num, state, total_residues):
"""Computes the omega torsional angle"""
if res_num != total_residues-1:
cmd.select('A', 'resi %s and name CA' % res_num)
cmd.select('B', 'resi %s and name C' % res_num)
cmd.select('C', 'resi %s and name N' % (res_num+1))
cmd.select('D', 'resi %s and name CA' % (res_num+1))
omega = cmd.get_dihedral('A', 'B', 'C', 'D', state)
return omega
else:
return float('nan')
def get_chi1(res_num, res_name, state):
"""Computes the chi1 torsional angle"""
if res_name not in ['ALA', 'GLY', 'PRO']:
cmd.select('A', 'resi %s and name N' % res_num)
cmd.select('B', 'resi %s and name CA' % res_num)
cmd.select('C', 'resi %s and name CB' % res_num)
cmd.select('D', 'resi %s and (name CG or name CG1 or name OG1 or name OG or name SG)' % res_num)
chi1 = cmd.get_dihedral('A', 'B', 'C', 'D', state)
return chi1
else:
return float('nan')
def get_chi2(res_num, res_name, state):
"""Computes the chi2 torsional angle"""
if res_name not in ['ALA', 'GLY', 'PRO', 'SER', 'THR', 'VAL', 'CYS']:
cmd.select('A', 'resi %s and name CA' % res_num)
cmd.select('B', 'resi %s and name CB' % res_num)
cmd.select('C', 'resi %s and (name CG or name CG1 or name OG1 or name OG)' % res_num)
cmd.select('D', 'resi %s and (name CD or name CD1 or name OD1 or name ND1 or name SD)' % res_num)
chi2 = cmd.get_dihedral('A', 'B', 'C', 'D', state)
return chi2
else:
return float('nan')
def load(path):
"""Load the files containing the theoretical chemical shifts. Creates a
dictionary to store the data."""
aminoacids = ['ALA','ARG','ASN','ASP','GLU','GLN','GLY','HIS','ILE','LEU',
'LYS','MET','PHE','PRO','SER','THR','TYR','TRP','VAL']
Db = {}
for aminoacid in aminoacids:
vector = []
for line in open(os.path.join(path, 'CS_DB', 'CS_db_%s' % aminoacid)).readlines():
vector.append(map(float, line.split()))
Db[aminoacid] = vector
return Db
def near_pro(omega, psi, Db):
"""Computes the chemical shifts from the psi and omega torsional angles
by linear interpolation from the theoretical values stored in Db.
This funcion works only for proline"""
points = []
values_Ca = []
values_Cb = []
if omega <= -90: # torsional angles are circular, for example -180=180. angles smaller than -90
omega = 180 # are closer to 180 than to 0
lista = np.array([0., 180.])# cheshift databse has only two values for proline omega angle, this
index = (np.abs(lista-omega)).argmin() # two lines calculate the nearest omega angle, in the datbase,
nearestOmega = lista[index]
for line in Db['PRO']: # PRO database is small. hence to calculate the theoretical CS i just take
if line[0] == nearestOmega: # all the values with the nearestOmega
vi, yi, zi = line[1], line[4], line[5]
points.append(vi)
values_Ca.append(yi), values_Cb.append(zi)
points = np.array(points)
values_Ca = np.array(values_Ca)
values_Cb = np.array(values_Cb)
values_Ca_New = griddata(points, values_Ca, (psi), method='linear') #linear interpolation
values_Cb_New = griddata(points, values_Cb, (psi), method='linear') #linear interpolation
return values_Ca_New, values_Cb_New
def near(phi, psi, chi1, chi2, res_name, Db):
"""Computes the chemical shifts from the torsional angles by linear
interpolation from the theoretical values stored in Db.
This funcion works for non-proline residues"""
points = []
values_Ca = []
values_Cb = []
phi_list = []
phi_round = int(round(phi, -1)) # round to the nearest values in the database
psi_round = int(round(psi, -1))
chi1_rotamers = np.array([-180., -150., -120., -90., -60., -30., 0., 30., 60., 90., 120., 150., 180.])
index = (np.abs(chi1_rotamers-chi1)).argmin()
nearestChi1_A = chi1_rotamers[index]
chi1_rotamers_new = np.delete(chi1_rotamers, index)
index = (np.abs(chi1_rotamers_new-chi1)).argmin()
nearestChi1_B = chi1_rotamers_new[index]
if phi > phi_round: # for phi and psi angles get the two nearest values in the database
phi_range = range(phi_round, phi_round+20, 10)
else:
if phi_round == -180:
phi_round = 180
phi_range = range(phi_round-10, phi_round+10, 10)
if psi > psi_round:
psi_range = range(psi_round, psi_round+20, 10)
else:
if psi_round == -180:
psi_round = 180
psi_range = range(psi_round-10, psi_round+10, 10)
for phi_value in phi_range: # A trick to avoid reading the whole list. The indexes where
y = int(phi_value * 0.1 + 19)# the necesarry values are stored (start and end) are calculated.
if y > 37:
y = -(37-y)
lenght = (len(Db[res_name])/37)
end = lenght * y
start = end-lenght
for i in range(start, end):
phi_list.append(Db[res_name][i])
if res_name in ['ALA', 'GLY']:
for line in phi_list:
for psi_value in psi_range:
if psi_value == line[1]:
ui, vi, yi, zi = line[0], line[1], line[4], line[5]
vector = ui, vi
points.append(vector)
values_Ca.append(yi), values_Cb.append(zi)
points = np.array(points)
values_Ca = np.array(values_Ca)
values_Cb = np.array(values_Cb)
values_Ca_New = griddata(points, values_Ca, (phi, psi), method='linear')
values_Cb_New = griddata(points, values_Cb, (phi, psi), method='linear')
return values_Ca_New, values_Cb_New
elif res_name in ['SER', 'THR', 'VAL', 'CYS']:
for line in phi_list:
for psi_value in psi_range:
if psi_value == line[1] and (line[2] == nearestChi1_A or line[2] == nearestChi1_B):
ui, vi, wi, yi, zi = line[0], line[1], line[2], line[4], line[5]
vector = ui, vi, wi
points.append(vector)
values_Ca.append(yi), values_Cb.append(zi)
points = np.array(points)
points = np.array(points)
values_Ca = np.array(values_Ca)
values_Cb = np.array(values_Cb)
values_Ca_New = griddata(points, values_Ca, (phi, psi, chi1), method='linear')
values_Cb_New = griddata(points, values_Cb, (phi, psi, chi1), method='linear')
return values_Ca_New, values_Cb_New
else:
lista = []
for i in range (0, 3):
rotamer = phi_list[i][3]
if rotamer < 0:
rotamer = rotamer + 360
lista.append(rotamer)
if 0. in lista:
lista.append(360)
lista = np.array(lista)
if chi2 < 0:
chi2 = chi2 + 360
index = (np.abs(lista-chi2)).argmin()
nearestChi2 = lista[index]
if nearestChi2 > 180:
nearestChi2 = nearestChi2 - 360
for line in phi_list:
for psi_value in psi_range:
if psi_value == line[1] and line[3] == nearestChi2 and (line[2] == nearestChi1_A or line[2] == nearestChi1_B):
ui, vi, wi, yi, zi = line[0], line[1], line[2], line[4], line[5]
vector = ui, vi, wi
points.append(vector)
values_Ca.append(yi), values_Cb.append(zi)
points = np.array(points)
values_Ca = np.array(values_Ca)
values_Cb = np.array(values_Cb)
values_Ca_New = griddata(points, values_Ca, (phi, psi, chi1), method='linear')
values_Cb_New = griddata(points, values_Cb, (phi, psi, chi1), method='linear')
return values_Ca_New, values_Cb_New
def get_inout(state, residues, total_residues):
"""Computes if the phi-psi torsional angles of a residue belong to high or
low prababilities areas in the ramachandra plot.
Uses information derived from the Neighbor-dependent Ramachandran
Distributions http://dunbrack.fccc.edu/ndrd/ from Dunbrack Lab"""
def myround(x, base=5):
return int(base * round(x/base))
inout_list = []
for res_num in range(0, total_residues):
if res_num == 0 or res_num == len(residues)-1:
a = [residues[res_num], res_num, 'nan']
inout_list.append(a)
else:
triple = residues[res_num-1]+residues[res_num]+residues[res_num+1]
phi = get_phi(res_num, state)
psi = get_psi(res_num, state, total_residues)
phi_round = myround(phi)
psi_round = myround(psi)
fd = open(os.path.join(path, 'TRIPLE_5', '%s.dat' % triple)).readlines()
if '%4s %4s\n' % (phi_round, psi_round) not in fd:
a = [residues[res_num], res_num, 'low']
inout_list.append(a)
else:
a = [residues[res_num], res_num, 'high']
inout_list.append(a)
return inout_list
def get_outliers(cs_exp_name):
"""Computes if an observed chemical shift is within 3 standard deviation
from the mean of observed values. The observed values were taken from the
BMRB http://www.bmrb.wisc.edu/ref_info/statsel.htm"""
boundaries_CA = {'ALA':(47.23,59.11), 'ARG':(49.86,63.72), 'ASP':(48.57,60.81), 'ASN':(47.88,59.22), 'CYS':(48.28,68.38), 'GLU':(51.07,63.61), 'GLN':(50.18,63.02), 'GLY':(41.37,49.35), 'HIS':(49.57,63.49), 'ILE':(53.58,69.72), 'LEU':(49.29,62.01), 'LYS':(50.38,63.58), 'MET':(49.42,62.86), 'PHE':(50.42,65.84), 'PRO':(58.79,67.91), 'SER':(52.50,64.98), 'THR':(54.47,70.01), 'TRP':(49.95,65.43), 'TYR':(50.57,65.75), 'VAL':(53.92,71.14)}
boundaries_CB = {'ALA':(13.60,24.34), 'ARG':(25.17,36.15), 'ASP':(36.01,45.73), 'ASN':(33.71,43.67), 'CYS':(14.34,50.88), 'GLU':(24.88,35.08), 'GLN':(23.73,34.59), 'HIS':(24.01,36.43), 'ILE':(32.59,44.59), 'LEU':(36.70,47.86), 'LYS':(27.46,38.08), 'MET':(26.32,39.58), 'PHE':(33.74,46.16), 'PRO':(28.31,35.39), 'SER':(59.26,68.32), 'THR':(64.21,75.19), 'TRP':(23.89,36.07), 'TYR':(32.82,45.72), 'VAL':(27.37,38.05)}
fd = open('%s.ocs' % (cs_exp_name))
fd.readline()
low_Ca = []
high_Ca = []
low_Cb = []
high_Cb = []
for line in fd:
res_num = line.split()[0]
res_name = line.split()[1]
cs_exp_Ca_value = float(line.split()[2])
cs_exp_Cb_value = float(line.split()[3])
if cs_exp_Ca_value < 999.00:
res_outlier = res_name.title() + res_num
if cs_exp_Ca_value < boundaries_CA[res_name][0]:
low_Ca.append(res_outlier)
elif cs_exp_Ca_value > boundaries_CA[res_name][1]:
high_Ca.append(res_outlier)
if cs_exp_Cb_value < 999.00:
res_outlier = res_name.title() + res_num
if cs_exp_Cb_value < boundaries_CB[res_name][0] :
low_Cb.append(res_outlier)
elif cs_exp_Cb_value > boundaries_CB[res_name][1]:
high_Cb.append(res_outlier)
return low_Ca, high_Ca, low_Cb, high_Cb
def get_chemical_shifts(ocs_file, residues, total_residues, state, reference, Db):
"""Call the near and near_pro function only for the residues with observed
chemical shifts. Returns the list of computed chemical shifts and the list
of phi, psi, chi1 and chi2 torsional angles"""
chemical_shifts = []
res_num = 0
tors_list = []
for line in open(ocs_file).readlines():
if line.split()[1] == 'UNK':
a = ['UNK', 999.00, 999.00]
chemical_shifts.append(a)
res_name = residues[res_num]
phi = get_phi(res_num, state)
psi = get_psi(res_num, state, total_residues)
chi1 = get_chi1(res_num, res_name, state)
chi2 = get_chi2(res_num, res_name, state)
res_name = residues[res_num]
tors_list.append([res_name, phi, psi, chi1, chi2])
res_num += 1
else:
res_name = residues[res_num]
phi = get_phi(res_num, state)
psi = get_psi(res_num, state, total_residues)
chi1 = get_chi1(res_num, res_name, state)
chi2 = get_chi2(res_num, res_name, state)
try:
res_name_next = residues[res_num+1]
except:
res_name_next = 'GLY'
if res_name != 'PRO' and res_name != 'CYS':
try:
values_Ca_New, values_Cb_New = near(phi, psi, chi1, chi2, res_name, Db)
except:
values_Ca_New = 999.00
values_Cb_New = 999.00
elif res_name == 'CYS':
values_Ca_New = 999.00
values_Cb_New = 999.00
else:
try:
omega = get_omega(res_num-1, state, total_residues)
values_Ca_New, values_Cb_New = near_pro(omega, psi, Db)
except:
values_Ca_New, values_Cb_New = 999.00, 999.00
if res_name_next == 'PRO':
a = [res_name, round((values_Ca_New -1.95 + reference), 2), round((values_Cb_New + reference),2)]
chemical_shifts.append(a)
else:
a = [res_name, round((values_Ca_New + reference), 2), round((values_Cb_New + reference),2)]
chemical_shifts.append(a)
res_num += 1
tors_list.append([res_name, phi, psi, chi1, chi2])
return chemical_shifts, tors_list
def cs_2_colors(cs_exp_name, pose, residues, total_residues, states, reference, Db):
"""Calculates the theoretical chemical shifts and calculates the errors
|CS_theo-Cs_exp|. The errors are discretized and appended to the b-factor
column of the protein model. This function is exclusive of the validation
routines """
cs_theo_list = []
tors_matrix = []
for state in range(1, states):
cs_list, tors_list = get_chemical_shifts('%s.ocs' % (cs_exp_name), residues, total_residues, state, reference, Db)
cs_theo_list.append(cs_list)
tors_matrix.append(list(tors_list))
########## create list with experimental chemical shifts ###########
exp_list = [[],[]]
for nucleus in [0, 1]:
for line in open('%s.ocs' % (cs_exp_name)).readlines():
exp_list[nucleus].append(float(line.split()[nucleus+2]))
#############################################################
# for each conformation let find how to fix the side-chains #
#############################################################
new_color_matrix = []
for state in range(0, states-1):
color_list = [[],[]]
disc_list = [[],[]]
for nucleus in [0, 1]:
if nucleus == 0:
nucleus_name = 'Ca'
cut = 1.45
else:
nucleus_name = 'Cb'
cut = 1.77
betalist = []
betalist_disc = []
for residue in range(0, len(exp_list[nucleus])):
theo_value = cs_theo_list[state][residue][nucleus+1]
if theo_value > 100:
betavalue = 999.00
else:
betavalue = abs(theo_value - exp_list[nucleus][residue])
betalist.append(betavalue)
for value in betalist:
if value > 100.:
value_disc = -2.0
color_list[nucleus].append('white')
elif value >= (cut*2):
value_disc = -1.00
color_list[nucleus].append('red')
elif value >= cut:
value_disc = -0.50
color_list[nucleus].append('yellow')
else:
value_disc = 0.00
color_list[nucleus].append('green')
betalist_disc.append(value_disc)
disc_list[nucleus] = betalist_disc
inout_list = get_inout(state, residues, total_residues)
kai2 = {'ARG':[-60,60,180],'ASN':[-75,-20,30],'ASP':[-15,0],'CYS':[float('nan')],'GLU':[-60,60,180],'GLN':[-65,65,180],'HIS':[-75,60,75],'ILE':[-60,60,180],'LEU':[65,175],'LYS':[-60,60,180],'MET':[-60,60,180],'PHE':[-90,90],'SER':[float('nan')],'THR':[float('nan')],'TYR':[-85,80],'TRP':[-105,-90,90],'VAL':[float('nan')]}
report_green = []
report_cyan = []
report_all = []
new_color = []
for i in range(0, len(inout_list)):
try:
phi = tors_matrix[state][i][1]
psi = tors_matrix[state][i][2]
chi1 = tors_matrix[state][i][3]
chi2 = tors_matrix[state][i][4]
except:
pass
res_name = inout_list[i][0]
res_num = inout_list[i][1]
Ca_disc_value = disc_list[0][i]
Cb_disc_value = disc_list[1][i]
report_all.append('%5s%6s%9s%9s%11.4f%10.4f%10.4f%10.4f\n' % (res_name, res_num, color_list[0][i], color_list[1][i], phi, psi, chi1, chi2))
if Ca_disc_value == 0.00 and Cb_disc_value == 0.00:
new_color.append(0)
if inout_list[i][2] == 'low':
report_green.append('%5s%6s%9s%9s%11.4f%10.4f%10.4f%10.4f\n' % (res_name, res_num, color_list[0][i], color_list[1][i], phi, psi, chi1, chi2))
elif Ca_disc_value == -2.00 or Cb_disc_value == -2.00: # if something is missing check if everything is missing
new_color.append(0)
elif res_name in ['ALA', 'PRO']:
new_color.append(0)
else:
if inout_list[i][2] == 'low': # does not make sense to try to fix this
new_color.append(0)
else:
fix = 0
res_name_next = inout_list[i+1][0]
for chi1 in range(-180, 180, 30):
for chi2 in kai2[res_name]:
try:
values_Ca_New, values_Cb_New = near(phi, psi, chi1, chi2, res_name, Db)
except:
values_Ca_New = float('nan')
values_Cb_New = float('nan')
if res_name_next == 'PRO':
Theo_Ca = values_Ca_New - 1.95 + reference
Theo_Cb = values_Cb_New + reference
else:
Theo_Ca = values_Ca_New + reference
Theo_Cb = values_Cb_New + reference
if abs(Theo_Ca - exp_list[0][i]) <= 1.45 and abs(Theo_Cb - exp_list[1][i]) <= 1.77:
report_cyan.append('%5s%6s%9s%9s%11.4f%10.4f%10.4f%10.4f\n' % (res_name, res_num, color_list[0][i], color_list[1][i], phi, psi, chi1, chi2))
fix += 1
if fix == 0:
new_color.append(0)
else:
new_color.append(1)
new_color_matrix.append(new_color)
############# write the ASCII report file #####################################
fd = open('%s_%02d.sc' % (pose, state ), 'w')
fd.write('*'*79+'\n')
fd.write('HEADER CheShift validation report of the protein %s, model %02d\n' % (pose, state))
fd.write('*'*79+'\n')
fd.write('REMARK The residues listed below, are those that occupy highly-populated\nREMARK regions of the Ramachandran map and for which a change in the side-chain\nREMARK torsional angles will lead to a good agreement between the observed and\nREMARK predicted chemical shifts for both the 13Ca and 13Cb nuclei, i.e., they\nREMARK will became "green" rather than yellow or red.\n')
if len(report_cyan) != 0:
fd.write('ResName Res# CA_color CB_color phi psi chi1 chi2 \n')
for i in report_cyan:
fd.write('%s' % i)
else:
fd.write('\n\n +-------------------------------+\n')
fd.write(' + There are no residues to show +\n')
fd.write(' +-------------------------------+\n\n\n')
fd.write('*'*79+'\n')
fd.write('REMARK The residues listed below belong to low-populated regions of the\nREMARK Ramachandran map and, hence, the good agreement in term of chemical\nREMARK shift (green) should be considered with caution because there is no\nREMARK one-to-one correspondence between value of the chemical shift and the\nREMARK conformation of the residue.\n')
if len(report_green) != 0:
fd.write('ResName Res# CA_color CB_color phi psi chi1 chi2 \n')
for i in report_green:
fd.write('%s' % i)
else:
fd.write('\n\n +-------------------------------+\n')
fd.write(' + There are no residues to show +\n')
fd.write(' +-------------------------------+\n\n\n')
low_Ca, high_Ca, low_Cb, high_Cb = get_outliers(cs_exp_name) # get residues with unsual experimental chemical shifts.
fd.write('*'*79+'\n')
fd.write('REMARK The residues listed below have unusual observed chemical shifts (CS)\nREMARK values according to the statistics of the BMRB database.\nREMARK (http://www.bmrb.wisc.edu/ref_info/statsel.htm)\n')
if len(low_Ca) > 0 or len(high_Ca) > 0 or len(low_Cb) > 0 or len(high_Cb) > 0:
if len(low_Ca) > 0:
low_Ca = ", ".join(low_Ca)
fd.write('\nResidues with 13Ca CS below 3 standard deviation from the expected value\n%s\n' % low_Ca)
if len(high_Ca) > 0:
high_Ca = ", ".join(high_Ca)
fd.write('\nResidues with 13Ca CS above 3 standard deviation from the expected value\n%s\n' % high_Ca)
if len(low_Cb) > 0:
low_Cb = ", ".join(low_Cb)
fd.write('\nResidues with 13Cb CS below 3 standard deviation from the expected value\n%s\n' % low_Cb)
if len(high_Cb) > 0:
high_Cb = ", ".join(high_Cb)
fd.write('\nResidues with 13Cb CS above 3 standard deviation from the expected value\n%s\n' % high_Cb)
else:
fd.write('\n\n +-------------------------------+\n')
fd.write(' + There are no residues to show +\n')
fd.write(' +-------------------------------+\n\n\n')
fd.write('*'*79+'\n')
fd.write('REMARK All residues are listed below\n')
fd.write('ResName Res# CA_color CB_color phi psi chi1 chi2 \n')
for i in report_all:
fd.write('%s' % i)
fd.close()
disc_list = [[],[]]
color_list = [[],[]]
for nucleus in [0,1]:
if nucleus == 0:
nucleus_name = 'Ca'
cut = 1.45
else:
nucleus_name = 'Cb'
cut = 1.77
cs_theo_ave_list = []
for residue in range(0, len(exp_list[nucleus])):
i = 0
lenght = 0
for conformation in range(0, len(cs_theo_list)):
if cs_theo_list[conformation][residue][nucleus+1] < 100.:
i = i + cs_theo_list[conformation][residue][nucleus+1]
lenght += 1
else:
pass
if lenght == 0:
cs_theo_ave_list.append(-999.00)
else:
cs_theo_ave_list.append((i/lenght))
betalist = []
betalist_disc = []
sqr_error_list = []
for i in range(0, len(exp_list[nucleus])):
betavalue = abs(cs_theo_ave_list[i] - exp_list[nucleus][i])
betalist.append(betavalue)
if betavalue < 100:
sqr_error = (cs_theo_ave_list[i] - exp_list[nucleus][i])**2
sqr_error_list.append(sqr_error)
ca_rmsd = np.sqrt(np.average(sqr_error_list))
print 'ca_RMSD for %s = %.2f ppm' % (nucleus_name, ca_rmsd)
for value in betalist:
if value > 100:
value_disc = -2.0
color_list[nucleus].append('white')
elif value >= (cut*2):
value_disc = -1.00
color_list[nucleus].append('red')
elif value >= cut:
value_disc = -0.50
color_list[nucleus].append('yellow')
else:
value_disc = 0.00
color_list[nucleus].append('green')
betalist_disc.append(value_disc)
disc_list[nucleus] = betalist_disc
for index in range(0, total_residues):
cmd.alter('%s and resi %s' % (pose, index), 'b=%s' % betalist_disc[index])
cmd.save('%s_%s.pdb' % (pose, nucleus_name), state=0)
blue_list = []
for column in range(0, len(new_color_matrix[0])):
suma = 0
for row in range(0 ,len(new_color_matrix)):
suma += new_color_matrix[row][column]
if suma > 0:
blue_list.append(1)
else:
blue_list.append(0)
new2_color = []
for i in range(0, len(inout_list)):
Ca_disc_value = disc_list[0][i]
Cb_disc_value = disc_list[1][i]
if blue_list[i] == 1:
new2_color.append(2.00)
elif Ca_disc_value == 0.00 and Cb_disc_value == 0.00:
new2_color.append(0.00)
elif Ca_disc_value == -2.00 or Cb_disc_value == -2.00: # if something is missing check if everything is missing
if Ca_disc_value != -2.00:
new2_color.append(Ca_disc_value)
elif Cb_disc_value != -2.00:
new2_color.append(Cb_disc_value)
else:
new2_color.append(-2.00)
else:
if Ca_disc_value == -1.00 or Cb_disc_value == -1.00:
new2_color.append(-1.00)
elif Ca_disc_value == -0.50 or Cb_disc_value == -0.50:
new2_color.append(-0.50)
for index in range(0, total_residues):
cmd.alter('%s and resi %s' % (pose, index), 'b=%s' % new2_color[index])
cmd.save('%s_CaCb.pdb' % pose, state=0)
def get_chemical_shifts_raw(residues, total_residues, state, Db):
"""Call the near and near_pro function for all the residues. This function
is exclusive of the prediction routines"""
chemical_shifts = []
for res_num in range(0, total_residues):
res_name = residues[res_num]
try:
res_name_next = residues[res_num+1]
except:
res_name_next = 'GLY'
if res_name != 'PRO' and res_name != 'CYS':
try:
phi = get_phi(res_num, state)
psi = get_psi(res_num, state, total_residues)
chi1 = get_chi1(res_num, res_name, state)
chi2 = get_chi2(res_num, res_name, state)
values_Ca_New, values_Cb_New = near(phi, psi, chi1, chi2, res_name, Db)
except:
values_Ca_New = 999.00
values_Cb_New = 999.00
elif res_name == 'CYS':
values_Ca_New = 999.00
values_Cb_New = 999.00
else:
try:
omega = get_omega(res_num-1, state, total_residues)
values_Ca_New, values_Cb_New = near_pro(omega, psi, Db)
except:
values_Ca_New, values_Cb_New = 999.00, 999.00
if res_name_next == 'PRO':
a = [res_name, round((values_Ca_New -1.95), 2), round((values_Cb_New),2)]
chemical_shifts.append(a)
else:
a = [res_name, round((values_Ca_New), 2), round((values_Cb_New),2)]
chemical_shifts.append(a)
return chemical_shifts
def raw(pose, residues, total_residues, states, Db):
"""Calculates the theoretical chemical shifts. This function is exclusive
of the prediction routine"""
cs_theo_list = []
for state in range(1, states):
cs_list = get_chemical_shifts_raw(residues, total_residues, state, Db)
cs_theo_list.append(cs_list)
fd = open('%s.txt' % pose, 'w')
fd.write('Ca Chemical Shifts\n')
for residue in range(0, total_residues):
cs_theo_line = []
for conformation in range(0, len(cs_theo_list)):
res_name, Ca_shift, Cb_shift = cs_theo_list[conformation][residue]
if float(Ca_shift) > 100.:
Ca_shift = 999.00
cs_theo_line.append('%6.2f' % (Ca_shift))
res_line = "\t".join(cs_theo_line)
fd.write('%s\t %s\n' % (residues[residue], res_line))
fd.write('\nCb Chemical Shifts\n')
for residue in range(0, len(residues)):
cs_theo_line=[]
for conformation in range(0, len(cs_theo_list)):
res_name, Ca_shift, Cb_shift = cs_theo_list[conformation][residue]
if float(Cb_shift) > 100.:
Cb_shift = 999.00
cs_theo_line.append('%6.2f' % (Cb_shift))
res_line = "\t".join(cs_theo_line)
fd.write('%s\t %s\n' % (residues[residue], res_line))
fd.close()
def clean(pose):
"""Deletes everything and load the validated models"""
cmd.delete('all')
cmd.load('%s_Ca.pdb' % pose)
cmd.load('%s_Cb.pdb' % pose)
cmd.load('%s_CaCb.pdb' % pose)
cmd.intra_fit('%s_Ca' % pose, 0, quiet=1)
cmd.intra_fit('%s_Cb' % pose, 0, quiet=1)
cmd.intra_fit('%s_CaCb' % pose, 0, quiet=1)
cmd.dss('all')
cmd.disable('%s_Ca' % pose)
cmd.disable('%s_Cb' % pose)
|
aloctavodia/cheshift
|
cheshift/__init__.py
|
Python
|
gpl-3.0
| 45,088
|
[
"PyMOL"
] |
602aa1b2de3f25be06d7c0d3a10caece66ab530fba475d0a0048ba033080ca3e
|
""" This object is a wrapper for setting and getting jobs states
"""
import datetime
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.WorkloadManagementSystem.Client.JobState.JobManifest import JobManifest
from DIRAC.WorkloadManagementSystem.Client import JobStatus
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB, singleValueDefFields, multiValueDefFields
from DIRAC.WorkloadManagementSystem.Service.JobPolicy import RIGHT_GET_INFO, RIGHT_RESCHEDULE
from DIRAC.WorkloadManagementSystem.Service.JobPolicy import RIGHT_RESET, RIGHT_CHANGE_STATUS
class JobState:
class DBHold:
def __init__(self):
self.checked = False
self.reset()
def reset(self):
self.jobDB = None
self.logDB = None
self.tqDB = None
__db = DBHold()
@classmethod
def checkDBAccess(cls):
# Init DB if there
if not JobState.__db.checked:
JobState.__db.checked = True
JobState.__db.jobDB = JobDB()
JobState.__db.logDB = JobLoggingDB()
JobState.__db.tqDB = TaskQueueDB()
def __init__(self, jid, source="Unknown"):
self.__jid = jid
self.__source = str(source)
self.checkDBAccess()
@property
def jid(self):
return self.__jid
def setSource(self, source):
self.__source = source
def getManifest(self, rawData=False):
result = JobState.__db.jobDB.getJobJDL(self.__jid)
if not result["OK"] or rawData:
return result
if not result["Value"]:
return S_ERROR("No manifest for job %s" % self.__jid)
manifest = JobManifest()
result = manifest.loadJDL(result["Value"])
if not result["OK"]:
return result
return S_OK(manifest)
def setManifest(self, manifest):
if not isinstance(manifest, JobManifest):
manifestStr = manifest
manifest = JobManifest()
result = manifest.load(manifestStr)
if not result["OK"]:
return result
manifestJDL = manifest.dumpAsJDL()
return self.__retryFunction(5, JobState.__db.jobDB.setJobJDL, (self.__jid, manifestJDL))
# Execute traces
def __retryFunction(self, retries, functor, args=False, kwargs=False):
retries = max(1, retries)
if not args:
args = tuple()
if not kwargs:
kwargs = {}
while retries:
retries -= 1
result = functor(*args, **kwargs)
if result["OK"]:
return result
if retries == 0:
return result
return S_ERROR("No more retries")
right_commitCache = RIGHT_GET_INFO
def commitCache(self, initialState, cache, jobLog):
try:
self.__checkType(initialState, dict)
self.__checkType(cache, dict)
self.__checkType(jobLog, (list, tuple))
except TypeError as excp:
return S_ERROR(str(excp))
result = self.getAttributes(list(initialState))
if not result["OK"]:
return result
if not result["Value"] == initialState:
return S_OK(False)
gLogger.verbose("Job %s: About to execute trace. Current state %s" % (self.__jid, initialState))
data = {"att": [], "jobp": [], "optp": []}
for key in cache:
for dk in data:
if key.find("%s." % dk) == 0:
data[dk].append((key[len(dk) + 1 :], cache[key]))
if data["att"]:
attN = [t[0] for t in data["att"]]
attV = [t[1] for t in data["att"]]
result = self.__retryFunction(
5, JobState.__db.jobDB.setJobAttributes, (self.__jid, attN, attV), {"update": True}
)
if not result["OK"]:
return result
if data["jobp"]:
result = self.__retryFunction(5, JobState.__db.jobDB.setJobParameters, (self.__jid, data["jobp"]))
if not result["OK"]:
return result
for k, v in data["optp"]:
result = self.__retryFunction(5, JobState.__db.jobDB.setJobOptParameter, (self.__jid, k, v))
if not result["OK"]:
return result
if "inputData" in cache:
result = self.__retryFunction(5, JobState.__db.jobDB.setInputData, (self.__jid, cache["inputData"]))
if not result["OK"]:
return result
gLogger.verbose("Adding logging records", " for %s" % self.__jid)
for record, updateTime, source in jobLog:
gLogger.verbose("", "Logging records for %s: %s %s %s" % (self.__jid, record, updateTime, source))
record["date"] = updateTime
record["source"] = source
result = self.__retryFunction(5, JobState.__db.logDB.addLoggingRecord, (self.__jid,), record)
if not result["OK"]:
return result
gLogger.info("Job %s: Ended trace execution" % self.__jid)
# We return a new initial state
return self.getAttributes(list(initialState))
#
# Status
#
def __checkType(self, value, tList, canBeNone=False):
"""Raise TypeError if the value does not have one of the expected types
:param value: the value to test
:param tList: type or tuple of types
:param canBeNone: boolean, since there is no type for None to be used with isinstance
"""
if canBeNone:
if value is None:
return
if not isinstance(value, tList):
raise TypeError("%s has wrong type. Has to be one of %s" % (value, tList))
right_setStatus = RIGHT_GET_INFO
def setStatus(self, majorStatus, minorStatus=None, appStatus=None, source=None, updateTime=None):
try:
self.__checkType(majorStatus, str)
self.__checkType(minorStatus, str, canBeNone=True)
self.__checkType(appStatus, str, canBeNone=True)
self.__checkType(source, str, canBeNone=True)
self.__checkType(updateTime, datetime.datetime, canBeNone=True)
except TypeError as excp:
return S_ERROR(str(excp))
result = JobState.__db.jobDB.setJobStatus(
self.__jid, status=majorStatus, minorStatus=minorStatus, applicationStatus=appStatus
)
if not result["OK"]:
return result
# HACK: Cause joblogging is crappy
if not minorStatus:
minorStatus = "idem"
if not appStatus:
appStatus = "idem"
if not source:
source = self.__source
return JobState.__db.logDB.addLoggingRecord(
self.__jid,
status=majorStatus,
minorStatus=minorStatus,
applicationStatus=appStatus,
date=updateTime,
source=source,
)
right_getMinorStatus = RIGHT_GET_INFO
def setMinorStatus(self, minorStatus, source=None, updateTime=None):
try:
self.__checkType(minorStatus, str)
self.__checkType(source, str, canBeNone=True)
except TypeError as excp:
return S_ERROR(str(excp))
result = JobState.__db.jobDB.setJobStatus(self.__jid, minorStatus=minorStatus)
if not result["OK"]:
return result
if not source:
source = self.__source
return JobState.__db.logDB.addLoggingRecord(self.__jid, minorStatus=minorStatus, date=updateTime, source=source)
def getStatus(self):
result = JobState.__db.jobDB.getJobAttributes(self.__jid, ["Status", "MinorStatus"])
if not result["OK"]:
return result
data = result["Value"]
if data:
return S_OK((data["Status"], data["MinorStatus"]))
return S_ERROR("Job %d not found in the JobDB" % int(self.__jid))
right_setAppStatus = RIGHT_GET_INFO
def setAppStatus(self, appStatus, source=None, updateTime=None):
try:
self.__checkType(appStatus, str)
self.__checkType(source, str, canBeNone=True)
except TypeError as excp:
return S_ERROR(str(excp))
result = JobState.__db.jobDB.setJobStatus(self.__jid, applicationStatus=appStatus)
if not result["OK"]:
return result
if not source:
source = self.__source
return JobState.__db.logDB.addLoggingRecord(
self.__jid, applicationStatus=appStatus, date=updateTime, source=source
)
right_getAppStatus = RIGHT_GET_INFO
def getAppStatus(self):
result = JobState.__db.jobDB.getJobAttributes(self.__jid, ["ApplicationStatus"])
if result["OK"]:
result["Value"] = result["Value"]["ApplicationStatus"]
return result
# Attributes
right_setAttribute = RIGHT_GET_INFO
def setAttribute(self, name, value):
try:
self.__checkType(name, str)
self.__checkType(value, str)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.jobDB.setJobAttribute(self.__jid, name, value)
right_setAttributes = RIGHT_GET_INFO
def setAttributes(self, attDict):
try:
self.__checkType(attDict, dict)
except TypeError as excp:
return S_ERROR(str(excp))
keys = [key for key in attDict]
values = [attDict[key] for key in keys]
return JobState.__db.jobDB.setJobAttributes(self.__jid, keys, values)
right_getAttribute = RIGHT_GET_INFO
def getAttribute(self, name):
try:
self.__checkType(name, str)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.jobDB.getJobAttribute(self.__jid, name)
right_getAttributes = RIGHT_GET_INFO
def getAttributes(self, nameList=None):
try:
self.__checkType(nameList, (list, tuple), canBeNone=True)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.jobDB.getJobAttributes(self.__jid, nameList)
# OptimizerParameters
right_setOptParameter = RIGHT_GET_INFO
def setOptParameter(self, name, value):
try:
self.__checkType(name, str)
self.__checkType(value, str)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.jobDB.setJobOptParameter(self.__jid, name, value)
right_setOptParameters = RIGHT_GET_INFO
def setOptParameters(self, pDict):
try:
self.__checkType(pDict, dict)
except TypeError as excp:
return S_ERROR(str(excp))
for name in pDict:
result = JobState.__db.jobDB.setJobOptParameter(self.__jid, name, pDict[name])
if not result["OK"]:
return result
return S_OK()
right_removeOptParameters = RIGHT_GET_INFO
def removeOptParameters(self, nameList):
if isinstance(nameList, str):
nameList = [nameList]
try:
self.__checkType(nameList, (list, tuple))
except TypeError as excp:
return S_ERROR(str(excp))
for name in nameList:
result = JobState.__db.jobDB.removeJobOptParameter(self.__jid, name)
if not result["OK"]:
return result
return S_OK()
right_getOptParameter = RIGHT_GET_INFO
def getOptParameter(self, name):
try:
self.__checkType(name, str)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.jobDB.getJobOptParameter(self.__jid, name)
right_getOptParameters = RIGHT_GET_INFO
def getOptParameters(self, nameList=None):
try:
self.__checkType(nameList, (list, tuple), canBeNone=True)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.jobDB.getJobOptParameters(self.__jid, nameList)
# Other
right_resetJob = RIGHT_RESCHEDULE
def rescheduleJob(self, source=""):
result = JobState.__db.tqDB.deleteJob(self.__jid)
if not result["OK"]:
return S_ERROR("Cannot delete from TQ job %s: %s" % (self.__jid, result["Message"]))
result = JobState.__db.jobDB.rescheduleJob(self.__jid)
if not result["OK"]:
return S_ERROR("Cannot reschedule in JobDB job %s: %s" % (self.__jid, result["Message"]))
JobState.__db.logDB.addLoggingRecord(
self.__jid, status=JobStatus.RECEIVED, minorStatus="", applicationStatus="", source=source
)
return S_OK()
right_resetJob = RIGHT_RESET
def resetJob(self, source=""):
result = JobState.__db.jobDB.setJobAttribute(self.__jid, "RescheduleCounter", -1)
if not result["OK"]:
return S_ERROR("Cannot set the RescheduleCounter for job %s: %s" % (self.__jid, result["Message"]))
result = JobState.__db.tqDB.deleteJob(self.__jid)
if not result["OK"]:
return S_ERROR("Cannot delete from TQ job %s: %s" % (self.__jid, result["Message"]))
result = JobState.__db.jobDB.rescheduleJob(self.__jid)
if not result["OK"]:
return S_ERROR("Cannot reschedule in JobDB job %s: %s" % (self.__jid, result["Message"]))
JobState.__db.logDB.addLoggingRecord(
self.__jid, status=JobStatus.RECEIVED, minorStatus="", applicationStatus="", source=source
)
return S_OK()
right_getInputData = RIGHT_GET_INFO
def getInputData(self):
return JobState.__db.jobDB.getInputData(self.__jid)
@classmethod
def checkInputDataStructure(cls, pDict):
if not isinstance(pDict, dict):
return S_ERROR("Input data has to be a dictionary")
for lfn in pDict:
if "Replicas" not in pDict[lfn]:
return S_ERROR("Missing replicas for lfn %s" % lfn)
replicas = pDict[lfn]["Replicas"]
for seName in replicas:
if "SURL" not in replicas or "Disk" not in replicas:
return S_ERROR("Missing SURL or Disk for %s:%s replica" % (seName, lfn))
return S_OK()
right_setInputData = RIGHT_GET_INFO
def set_InputData(self, lfnData):
result = self.checkInputDataStructure(lfnData)
if not result["OK"]:
return result
return JobState.__db.jobDB.setInputData(self.__jid, lfnData)
right_insertIntoTQ = RIGHT_CHANGE_STATUS
def insertIntoTQ(self, manifest=None):
if not manifest:
result = self.getManifest()
if not result["OK"]:
return result
manifest = result["Value"]
reqSection = "JobRequirements"
result = manifest.getSection(reqSection)
if not result["OK"]:
return S_ERROR("No %s section in the job manifest" % reqSection)
reqCfg = result["Value"]
jobReqDict = {}
for name in singleValueDefFields:
if name in reqCfg:
if name == "CPUTime":
jobReqDict[name] = int(reqCfg[name])
else:
jobReqDict[name] = reqCfg[name]
for name in multiValueDefFields:
if name in reqCfg:
jobReqDict[name] = reqCfg.getOption(name, [])
jobPriority = reqCfg.getOption("UserPriority", 1)
result = self.__retryFunction(2, JobState.__db.tqDB.insertJob, (self.__jid, jobReqDict, jobPriority))
if not result["OK"]:
errMsg = result["Message"]
# Force removing the job from the TQ if it was actually inserted
result = JobState.__db.tqDB.deleteJob(self.__jid)
if result["OK"]:
if result["Value"]:
gLogger.info("Job %s removed from the TQ" % self.__jid)
return S_ERROR("Cannot insert in task queue: %s" % errMsg)
return S_OK()
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Client/JobState/JobState.py
|
Python
|
gpl-3.0
| 16,139
|
[
"DIRAC"
] |
32ae72d51b4667a903408403d53f4ac0010c16f1b687fe33c5f181493d18ef7b
|
# This script processes MIMIC-III dataset and builds longitudinal diagnosis records for patients with at least two visits.
# The output data are cPickled, and suitable for training Doctor AI or RETAIN
# Written by Edward Choi (mp2893@gatech.edu)
# Usage: Put this script to the foler where MIMIC-III CSV files are located. Then execute the below command.
# python process_mimic.py ADMISSIONS.csv DIAGNOSES_ICD.csv <output file>
# Output files
# <output file>.seqs: Dataset that follows the format described in the README.md.
# <output file>.types: Python dictionary that maps string diagnosis codes to integer diagnosis codes.
# <output file>.3digitICD9.seqs: Dataset that follows the format described in the README.md. This uses only the first 3 digits of the ICD9 diagnosis code.
# <output file>.3digitICD9.types: Python dictionary that maps 3-digit string diagnosis codes to integer diagnosis codes.
import sys
import cPickle as pickle
from datetime import datetime
def convert_to_icd9(dxStr):
if dxStr.startswith('E'):
if len(dxStr) > 4: return dxStr[:4] + '.' + dxStr[4:]
else: return dxStr
else:
if len(dxStr) > 3: return dxStr[:3] + '.' + dxStr[3:]
else: return dxStr
def convert_to_3digit_icd9(dxStr):
if dxStr.startswith('E'):
if len(dxStr) > 4: return dxStr[:4]
else: return dxStr
else:
if len(dxStr) > 3: return dxStr[:3]
else: return dxStr
if __name__ == '__main__':
admissionFile = sys.argv[1]
diagnosisFile = sys.argv[2]
outFile = sys.argv[3]
print 'Building pid-admission mapping, admission-date mapping'
pidAdmMap = {}
admDateMap = {}
infd = open(admissionFile, 'r')
infd.readline()
for line in infd:
tokens = line.strip().split(',')
pid = int(tokens[1])
admId = int(tokens[2])
admTime = datetime.strptime(tokens[3], '%Y-%m-%d %H:%M:%S')
admDateMap[admId] = admTime
if pid in pidAdmMap: pidAdmMap[pid].append(admId)
else: pidAdmMap[pid] = [admId]
infd.close()
print 'Building admission-dxList mapping'
admDxMap = {}
admDxMap_3digit = {}
infd = open(diagnosisFile, 'r')
infd.readline()
for line in infd:
tokens = line.strip().split(',')
admId = int(tokens[2])
dxStr = 'D_' + convert_to_icd9(tokens[4][1:-1]) ############## Uncomment this line and comment the line below, if you want to use the entire ICD9 digits.
dxStr_3digit = 'D_' + convert_to_3digit_icd9(tokens[4][1:-1])
if admId in admDxMap:
admDxMap[admId].append(dxStr)
else:
admDxMap[admId] = [dxStr]
if admId in admDxMap_3digit:
admDxMap_3digit[admId].append(dxStr_3digit)
else:
admDxMap_3digit[admId] = [dxStr_3digit]
infd.close()
print 'Building pid-sortedVisits mapping'
pidSeqMap = {}
pidSeqMap_3digit = {}
for pid, admIdList in pidAdmMap.iteritems():
if len(admIdList) < 2: continue
sortedList = sorted([(admDateMap[admId], admDxMap[admId]) for admId in admIdList])
pidSeqMap[pid] = sortedList
sortedList_3digit = sorted([(admDateMap[admId], admDxMap_3digit[admId]) for admId in admIdList])
pidSeqMap_3digit[pid] = sortedList_3digit
print 'Building pids, dates, strSeqs'
pids = []
dates = []
seqs = []
for pid, visits in pidSeqMap.iteritems():
pids.append(pid)
seq = []
date = []
for visit in visits:
date.append(visit[0])
seq.append(visit[1])
dates.append(date)
seqs.append(seq)
print 'Building pids, dates, strSeqs for 3digit ICD9 code'
seqs_3digit = []
for pid, visits in pidSeqMap_3digit.iteritems():
seq = []
for visit in visits:
seq.append(visit[1])
seqs_3digit.append(seq)
print 'Converting strSeqs to intSeqs, and making types'
types = {}
newSeqs = []
for patient in seqs:
newPatient = []
for visit in patient:
newVisit = []
for code in visit:
if code in types:
newVisit.append(types[code])
else:
types[code] = len(types)
newVisit.append(types[code])
newPatient.append(newVisit)
newSeqs.append(newPatient)
print 'Converting strSeqs to intSeqs, and making types for 3digit ICD9 code'
types_3digit = {}
newSeqs_3digit = []
for patient in seqs_3digit:
newPatient = []
for visit in patient:
newVisit = []
for code in set(visit):
if code in types_3digit:
newVisit.append(types_3digit[code])
else:
types_3digit[code] = len(types_3digit)
newVisit.append(types_3digit[code])
newPatient.append(newVisit)
newSeqs_3digit.append(newPatient)
print 'Re-formatting to Med2Vec dataset'
seqs = []
for patient in newSeqs:
seqs.extend(patient)
seqs.append([-1])
seqs = seqs[:-1]
seqs_3digit = []
for patient in newSeqs_3digit:
seqs_3digit.extend(patient)
seqs_3digit.append([-1])
seqs_3digit = seqs_3digit[:-1]
pickle.dump(seqs, open(outFile+'.seqs', 'wb'), -1)
pickle.dump(types, open(outFile+'.types', 'wb'), -1)
pickle.dump(seqs_3digit, open(outFile+'.3digitICD9.seqs', 'wb'), -1)
pickle.dump(types_3digit, open(outFile+'.3digitICD9.types', 'wb'), -1)
|
mp2893/med2vec
|
process_mimic.py
|
Python
|
bsd-3-clause
| 4,885
|
[
"VisIt"
] |
3257e2efd3d325deb87f1bb57810ceda6edccb52115a6162ceb586733d64d47d
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSnpstats(RPackage):
"""SnpMatrix and XSnpMatrix classes and methods.
Classes and statistical methods for large SNP association studies. This
extends the earlier snpMatrix package, allowing for uncertainty in
genotypes."""
homepage = "https://bioconductor.org/packages/snpStats"
git = "https://git.bioconductor.org/packages/snpStats.git"
version('1.34.0', commit='e31cdfb18a9e12d70d6a3e8e6fbf7cf8faa3ea5b')
version('1.32.0', commit='7c31158183b4e39da6dc30c7da275acc36b2e32f')
version('1.30.0', commit='0dc1e4246f015feaf2579d60268b10ab5149ce09')
version('1.28.0', commit='8df9f4188f720dfbb4f4f4ec255cd2e22f3f4426')
version('1.26.0', commit='7c9b3304073e0556d694a8531882b349822fdda8')
depends_on('r@2.10.0:', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
depends_on('r-matrix', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-snpstats/package.py
|
Python
|
lgpl-2.1
| 1,232
|
[
"Bioconductor"
] |
e24b2c6ab07d9a5ec4d063a58b6e1cc731cb8d219a0a981a921d66303efbaf59
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import email.parser
import logging
import json
import math
import os
import sys
import traceback
import unittest
from contextlib import contextmanager
from shutil import rmtree, copyfile, move
import gc
import time
from textwrap import dedent
from hashlib import md5
import collections
from pyeclib.ec_iface import ECDriverError
from tempfile import mkdtemp, NamedTemporaryFile
import weakref
import operator
import functools
from swift.obj import diskfile
import re
import random
from collections import defaultdict
import uuid
import mock
from eventlet import sleep, spawn, wsgi, Timeout, debug
from eventlet.green import httplib
from six import BytesIO
from six import StringIO
from six.moves import range
from six.moves.urllib.parse import quote
from test import listen_zero
from test.unit import (
connect_tcp, readuntil2crlfs, FakeLogger, fake_http_connect, FakeRing,
FakeMemcache, debug_logger, patch_policies, write_fake_ring,
mocked_http_conn, DEFAULT_TEST_EC_TYPE, make_timestamp_iter)
from test.unit.helpers import setup_servers, teardown_servers
from swift.proxy import server as proxy_server
from swift.proxy.controllers.obj import ReplicatedObjectController
from swift.obj import server as object_server
from swift.common.middleware import proxy_logging, versioned_writes, \
copy
from swift.common.middleware.acl import parse_acl, format_acl
from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \
APIVersionError, ChunkWriteTimeout
from swift.common import utils, constraints
from swift.common.utils import hash_path, storage_directory, \
parse_content_type, parse_mime_headers, \
iter_multipart_mime_documents, public, mkdirs, NullLogger
from swift.common.wsgi import monkey_patch_mimetools, loadapp, ConfigString
from swift.proxy.controllers import base as proxy_base
from swift.proxy.controllers.base import get_cache_key, cors_validation, \
get_account_info, get_container_info
import swift.proxy.controllers
import swift.proxy.controllers.obj
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import Request, Response, HTTPUnauthorized, \
HTTPException, HTTPBadRequest
from swift.common.storage_policy import StoragePolicy, POLICIES
import swift.common.request_helpers
from swift.common.request_helpers import get_sys_meta_prefix
# mocks
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
STATIC_TIME = time.time()
_test_context = _test_servers = _test_sockets = _testdir = \
_test_POLICIES = None
def do_setup(object_server):
# setup test context and break out some globals for convenience
global _test_context, _testdir, _test_servers, _test_sockets, \
_test_POLICIES
monkey_patch_mimetools()
_test_context = setup_servers(object_server)
_testdir = _test_context["testdir"]
_test_servers = _test_context["test_servers"]
_test_sockets = _test_context["test_sockets"]
_test_POLICIES = _test_context["test_POLICIES"]
def unpatch_policies(f):
"""
This will unset a TestCase level patch_policies to use the module level
policies setup for the _test_servers instead.
N.B. You should NEVER modify the _test_server policies or rings during a
test because they persist for the life of the entire module!
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
with patch_policies(_test_POLICIES):
return f(*args, **kwargs)
return wrapper
def setUpModule():
do_setup(object_server)
def tearDownModule():
teardown_servers(_test_context)
def sortHeaderNames(headerNames):
"""
Return the given string of header names sorted.
headerName: a comma-delimited list of header names
"""
headers = [a.strip() for a in headerNames.split(',') if a.strip()]
headers.sort()
return ', '.join(headers)
def parse_headers_string(headers_str):
headers_dict = HeaderKeyDict()
for line in headers_str.split('\r\n'):
if ': ' in line:
header, value = line.split(': ', 1)
headers_dict[header] = value
return headers_dict
def node_error_count(proxy_app, ring_node):
# Reach into the proxy's internals to get the error count for a
# particular node
node_key = proxy_app._error_limit_node_key(ring_node)
return proxy_app._error_limiting.get(node_key, {}).get('errors', 0)
def node_last_error(proxy_app, ring_node):
# Reach into the proxy's internals to get the last error for a
# particular node
node_key = proxy_app._error_limit_node_key(ring_node)
return proxy_app._error_limiting.get(node_key, {}).get('last_error')
def set_node_errors(proxy_app, ring_node, value, last_error):
# Set the node's error count to value
node_key = proxy_app._error_limit_node_key(ring_node)
stats = proxy_app._error_limiting.setdefault(node_key, {})
stats['errors'] = value
stats['last_error'] = last_error
class FakeMemcacheReturnsNone(FakeMemcache):
def get(self, key):
# Returns None as the timestamp of the container; assumes we're only
# using the FakeMemcache for container existence checks.
return None
@contextmanager
def save_globals():
orig_http_connect = getattr(swift.proxy.controllers.base, 'http_connect',
None)
orig_account_info = getattr(swift.proxy.controllers.Controller,
'account_info', None)
orig_container_info = getattr(swift.proxy.controllers.Controller,
'container_info', None)
try:
yield True
finally:
swift.proxy.controllers.Controller.account_info = orig_account_info
swift.proxy.controllers.base.http_connect = orig_http_connect
swift.proxy.controllers.obj.http_connect = orig_http_connect
swift.proxy.controllers.account.http_connect = orig_http_connect
swift.proxy.controllers.container.http_connect = orig_http_connect
swift.proxy.controllers.Controller.container_info = orig_container_info
def set_http_connect(*args, **kwargs):
new_connect = fake_http_connect(*args, **kwargs)
swift.proxy.controllers.base.http_connect = new_connect
swift.proxy.controllers.obj.http_connect = new_connect
swift.proxy.controllers.account.http_connect = new_connect
swift.proxy.controllers.container.http_connect = new_connect
return new_connect
def _make_callback_func(calls):
def callback(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
context = {}
context['method'] = method
context['path'] = path
context['headers'] = headers or {}
calls.append(context)
return callback
def _limit_max_file_size(f):
"""
This will limit constraints.MAX_FILE_SIZE for the duration of the
wrapped function, based on whether MAX_FILE_SIZE exceeds the
sys.maxsize limit on the system running the tests.
This allows successful testing on 32 bit systems.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_max_file_size = constraints.MAX_FILE_SIZE
if constraints.MAX_FILE_SIZE >= sys.maxsize:
test_max_file_size = (2 ** 30 + 2)
with mock.patch.object(constraints, 'MAX_FILE_SIZE',
test_max_file_size):
return f(*args, **kwargs)
return wrapper
# tests
class TestController(unittest.TestCase):
def setUp(self):
self.account_ring = FakeRing()
self.container_ring = FakeRing()
self.memcache = FakeMemcache()
app = proxy_server.Application(None, self.memcache,
account_ring=self.account_ring,
container_ring=self.container_ring)
self.controller = swift.proxy.controllers.Controller(app)
class FakeReq(object):
def __init__(self):
self.url = "/foo/bar"
self.method = "METHOD"
def as_referer(self):
return self.method + ' ' + self.url
self.account = 'some_account'
self.container = 'some_container'
self.request = FakeReq()
self.read_acl = 'read_acl'
self.write_acl = 'write_acl'
def test_transfer_headers(self):
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
'new-owner': 'Kun'}
dst_headers = {'x-base-meta-owner': 'Gareth',
'x-base-meta-size': '150M'}
self.controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-base-meta-owner': '',
'x-base-meta-size': '151M'}
self.assertEqual(dst_headers, expected_headers)
def check_account_info_return(self, partition, nodes, is_none=False):
if is_none:
p, n = None, None
else:
p, n = self.account_ring.get_nodes(self.account)
self.assertEqual(p, partition)
self.assertEqual(n, nodes)
def test_account_info_container_count(self):
with save_globals():
set_http_connect(200, count=123)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 123)
with save_globals():
set_http_connect(200, count='123')
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 123)
with save_globals():
cache_key = get_cache_key(self.account)
account_info = {'status': 200, 'container_count': 1234}
self.memcache.set(cache_key, account_info)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 1234)
with save_globals():
cache_key = get_cache_key(self.account)
account_info = {'status': 200, 'container_count': '1234'}
self.memcache.set(cache_key, account_info)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 1234)
def test_make_requests(self):
with save_globals():
set_http_connect(200)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
set_http_connect(201, raise_timeout_exc=True)
self.controller._make_request(
nodes, partition, 'POST', '/', '', '',
self.controller.app.logger.thread_locals)
# tests if 200 is cached and used
def test_account_info_200(self):
with save_globals():
set_http_connect(200)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes)
self.assertEqual(count, 12345)
# Test the internal representation in memcache
# 'container_count' changed from int to str
cache_key = get_cache_key(self.account)
container_info = {'status': 200,
'account_really_exists': True,
'container_count': '12345',
'total_object_count': None,
'bytes': None,
'meta': {},
'sysmeta': {}}
self.assertEqual(container_info,
self.memcache.get(cache_key))
set_http_connect()
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes)
self.assertEqual(count, 12345)
# tests if 404 is cached and used
def test_account_info_404(self):
with save_globals():
set_http_connect(404, 404, 404)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, True)
self.assertIsNone(count)
# Test the internal representation in memcache
# 'container_count' changed from 0 to None
cache_key = get_cache_key(self.account)
account_info = {'status': 404,
'container_count': None, # internally keep None
'total_object_count': None,
'bytes': None,
'meta': {},
'sysmeta': {}}
self.assertEqual(account_info,
self.memcache.get(cache_key))
set_http_connect()
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, True)
self.assertIsNone(count)
# tests if some http status codes are not cached
def test_account_info_no_cache(self):
def test(*status_list):
set_http_connect(*status_list)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.assertEqual(len(self.memcache.keys()), 0)
self.check_account_info_return(partition, nodes, True)
self.assertIsNone(count)
with save_globals():
# We cache if we have two 404 responses - fail if only one
test(503, 503, 404)
test(504, 404, 503)
test(404, 507, 503)
test(503, 503, 503)
def test_account_info_no_account(self):
with save_globals():
self.memcache.store = {}
set_http_connect(404, 404, 404)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, is_none=True)
self.assertIsNone(count)
def check_container_info_return(self, ret, is_none=False):
if is_none:
partition, nodes, read_acl, write_acl = None, None, None, None
else:
partition, nodes = self.container_ring.get_nodes(self.account,
self.container)
read_acl, write_acl = self.read_acl, self.write_acl
self.assertEqual(partition, ret['partition'])
self.assertEqual(nodes, ret['nodes'])
self.assertEqual(read_acl, ret['read_acl'])
self.assertEqual(write_acl, ret['write_acl'])
def test_container_info_invalid_account(self):
def account_info(self, account, request, autocreate=False):
return None, None
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
ret = self.controller.container_info(self.account,
self.container,
self.request)
self.check_container_info_return(ret, True)
# tests if 200 is cached and used
def test_container_info_200(self):
with save_globals():
headers = {'x-container-read': self.read_acl,
'x-container-write': self.write_acl}
set_http_connect(200, # account_info is found
200, headers=headers) # container_info is found
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret)
cache_key = get_cache_key(self.account, self.container)
cache_value = self.memcache.get(cache_key)
self.assertIsInstance(cache_value, dict)
self.assertEqual(200, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret)
# tests if 404 is cached and used
def test_container_info_404(self):
def account_info(self, account, request):
return True, True, 0
with save_globals():
set_http_connect(503, 204, # account_info found
504, 404, 404) # container_info 'NotFound'
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
cache_key = get_cache_key(self.account, self.container)
cache_value = self.memcache.get(cache_key)
self.assertIsInstance(cache_value, dict)
self.assertEqual(404, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
set_http_connect(503, 404, 404) # account_info 'NotFound'
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
cache_key = get_cache_key(self.account, self.container)
cache_value = self.memcache.get(cache_key)
self.assertIsInstance(cache_value, dict)
self.assertEqual(404, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
# tests if some http status codes are not cached
def test_container_info_no_cache(self):
def test(*status_list):
set_http_connect(*status_list)
ret = self.controller.container_info(
self.account, self.container, self.request)
self.assertEqual(len(self.memcache.keys()), 0)
self.check_container_info_return(ret, True)
with save_globals():
# We cache if we have two 404 responses - fail if only one
test(503, 503, 404)
test(504, 404, 503)
test(404, 507, 503)
test(503, 503, 503)
def test_get_account_info_returns_values_as_strings(self):
app = mock.MagicMock()
app.memcache = mock.MagicMock()
app.memcache.get = mock.MagicMock()
app.memcache.get.return_value = {
u'foo': u'\u2603',
u'meta': {u'bar': u'\u2603'},
u'sysmeta': {u'baz': u'\u2603'}}
env = {'PATH_INFO': '/v1/a'}
ai = get_account_info(env, app)
# Test info is returned as strings
self.assertEqual(ai.get('foo'), '\xe2\x98\x83')
self.assertIsInstance(ai.get('foo'), str)
# Test info['meta'] is returned as strings
m = ai.get('meta', {})
self.assertEqual(m.get('bar'), '\xe2\x98\x83')
self.assertIsInstance(m.get('bar'), str)
# Test info['sysmeta'] is returned as strings
m = ai.get('sysmeta', {})
self.assertEqual(m.get('baz'), '\xe2\x98\x83')
self.assertIsInstance(m.get('baz'), str)
def test_get_container_info_returns_values_as_strings(self):
app = mock.MagicMock()
app.memcache = mock.MagicMock()
app.memcache.get = mock.MagicMock()
app.memcache.get.return_value = {
u'foo': u'\u2603',
u'meta': {u'bar': u'\u2603'},
u'sysmeta': {u'baz': u'\u2603'},
u'cors': {u'expose_headers': u'\u2603'}}
env = {'PATH_INFO': '/v1/a/c'}
ci = get_container_info(env, app)
# Test info is returned as strings
self.assertEqual(ci.get('foo'), '\xe2\x98\x83')
self.assertIsInstance(ci.get('foo'), str)
# Test info['meta'] is returned as strings
m = ci.get('meta', {})
self.assertEqual(m.get('bar'), '\xe2\x98\x83')
self.assertIsInstance(m.get('bar'), str)
# Test info['sysmeta'] is returned as strings
m = ci.get('sysmeta', {})
self.assertEqual(m.get('baz'), '\xe2\x98\x83')
self.assertIsInstance(m.get('baz'), str)
# Test info['cors'] is returned as strings
m = ci.get('cors', {})
self.assertEqual(m.get('expose_headers'), '\xe2\x98\x83')
self.assertIsInstance(m.get('expose_headers'), str)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestProxyServer(unittest.TestCase):
def test_creation(self):
# later config should be extended to assert more config options
app = proxy_server.Application({'node_timeout': '3.5',
'recoverable_node_timeout': '1.5'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(app.node_timeout, 3.5)
self.assertEqual(app.recoverable_node_timeout, 1.5)
def test_get_object_ring(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
with patch_policies([
StoragePolicy(0, 'a', False, object_ring=123),
StoragePolicy(1, 'b', True, object_ring=456),
StoragePolicy(2, 'd', False, object_ring=789)
]):
# None means legacy so always use policy 0
ring = baseapp.get_object_ring(None)
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('')
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('0')
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('1')
self.assertEqual(ring, 456)
ring = baseapp.get_object_ring('2')
self.assertEqual(ring, 789)
# illegal values
self.assertRaises(ValueError, baseapp.get_object_ring, '99')
self.assertRaises(ValueError, baseapp.get_object_ring, 'asdf')
def test_unhandled_exception(self):
class MyApp(proxy_server.Application):
def get_controller(self, path):
raise Exception('this shouldn\'t be caught')
app = MyApp(None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
app.update_request(req)
resp = app.handle_request(req)
self.assertEqual(resp.status_int, 500)
def test_internal_method_request(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '__init__'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
def test_inexistent_method_request(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '!invalid'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
def test_calls_authorize_allow(self):
called = [False]
def authorize(req):
called[0] = True
with save_globals():
set_http_connect(200)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
app.handle_request(req)
self.assertTrue(called[0])
def test_calls_authorize_deny(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
app.handle_request(req)
self.assertTrue(called[0])
def test_negative_content_length(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
FakeRing(), FakeRing())
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-1'}))
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, 'Invalid Content-Length')
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-123'}))
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, 'Invalid Content-Length')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_adds_transaction_id(self):
swift_dir = mkdtemp()
try:
logger = FakeLogger()
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), logger,
container_ring=FakeLogger(),
account_ring=FakeRing())
baseapp.handle_request(
Request.blank('/info',
environ={'HTTP_X_TRANS_ID_EXTRA': 'sardine',
'REQUEST_METHOD': 'GET'}))
# This is kind of a hokey way to get the transaction ID; it'd be
# better to examine response headers, but the catch_errors
# middleware is what sets the X-Trans-Id header, and we don't have
# that available here.
self.assertTrue(logger.txn_id.endswith('-sardine'))
finally:
rmtree(swift_dir, ignore_errors=True)
def test_adds_transaction_id_length_limit(self):
swift_dir = mkdtemp()
try:
logger = FakeLogger()
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), logger,
container_ring=FakeLogger(),
account_ring=FakeRing())
baseapp.handle_request(
Request.blank('/info',
environ={'HTTP_X_TRANS_ID_EXTRA': 'a' * 1000,
'REQUEST_METHOD': 'GET'}))
self.assertTrue(logger.txn_id.endswith(
'-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
finally:
rmtree(swift_dir, ignore_errors=True)
def test_denied_host_header(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir,
'deny_host_headers':
'invalid_host.com'},
FakeMemcache(),
container_ring=FakeLogger(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a/c/o',
environ={'HTTP_HOST': 'invalid_host.com'}))
self.assertEqual(resp.status, '403 Forbidden')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_node_timing(self):
baseapp = proxy_server.Application({'sorting_method': 'timing'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(baseapp.node_timings, {})
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
baseapp.update_request(req)
resp = baseapp.handle_request(req)
self.assertEqual(resp.status_int, 503) # couldn't connect to anything
exp_timings = {}
self.assertEqual(baseapp.node_timings, exp_timings)
times = [time.time()]
exp_timings = {'127.0.0.1': (0.1, times[0] + baseapp.timing_expiry)}
with mock.patch('swift.proxy.server.time', lambda: times.pop(0)):
baseapp.set_node_timing({'ip': '127.0.0.1'}, 0.1)
self.assertEqual(baseapp.node_timings, exp_timings)
nodes = [{'ip': '127.0.0.1'}, {'ip': '127.0.0.2'}, {'ip': '127.0.0.3'}]
with mock.patch('swift.proxy.server.shuffle', lambda l: l):
res = baseapp.sort_nodes(nodes)
exp_sorting = [{'ip': '127.0.0.2'}, {'ip': '127.0.0.3'},
{'ip': '127.0.0.1'}]
self.assertEqual(res, exp_sorting)
def _do_sort_nodes(self, conf, policy_conf, nodes, policy,
node_timings=None):
# Note with shuffling mocked out, sort_nodes will by default return
# nodes in the order they are given
nodes = list(nodes)
conf = dict(conf, policy_config=policy_conf)
baseapp = proxy_server.Application(conf,
FakeMemcache(),
logger=FakeLogger(),
container_ring=FakeRing(),
account_ring=FakeRing())
if node_timings:
for i, n in enumerate(nodes):
baseapp.set_node_timing(n, node_timings[i])
with mock.patch('swift.proxy.server.shuffle', lambda x: x):
app_sorted = baseapp.sort_nodes(nodes, policy)
self.assertFalse(baseapp.logger.get_lines_for_level('warning'))
return baseapp, app_sorted
def test_sort_nodes_default(self):
nodes = [{'region': 0, 'zone': 1, 'ip': '127.0.0.3'},
{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 2, 'zone': 2, 'ip': '127.0.0.2'}]
# sanity check - no affinity conf results in node order unchanged
app, actual = self._do_sort_nodes({}, {}, nodes, None)
self.assertEqual(nodes, actual)
def test_sort_nodes_by_affinity_proxy_server_config(self):
nodes = [{'region': 0, 'zone': 1, 'ip': '127.0.0.3'},
{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 2, 'zone': 2, 'ip': '127.0.0.2'}]
# proxy-server affinity conf is to prefer r2
conf = {'sorting_method': 'affinity', 'read_affinity': 'r2=1'}
app, actual = self._do_sort_nodes(conf, {}, nodes, None)
self.assertEqual([nodes[2], nodes[0], nodes[1]], actual)
app, actual = self._do_sort_nodes(conf, {}, nodes, POLICIES[0])
self.assertEqual([nodes[2], nodes[0], nodes[1]], actual)
# check that node timings are not collected if sorting_method != timing
self.assertFalse(app.sorts_by_timing) # sanity check
self.assertFalse(app.node_timings) # sanity check
# proxy-server affinity conf is to prefer region 1
conf = {'sorting_method': 'affinity', 'read_affinity': 'r1=1'}
app, actual = self._do_sort_nodes(conf, {}, nodes, None)
self.assertEqual([nodes[1], nodes[0], nodes[2]], actual)
app, actual = self._do_sort_nodes(conf, {}, nodes, POLICIES[0])
self.assertEqual([nodes[1], nodes[0], nodes[2]], actual)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing()),
StoragePolicy(1, 'one', False, object_ring=FakeRing())])
def test_sort_nodes_by_affinity_per_policy(self):
nodes = [{'region': 0, 'zone': 1, 'ip': '127.0.0.4'},
{'region': 1, 'zone': 0, 'ip': '127.0.0.3'},
{'region': 2, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 3, 'zone': 0, 'ip': '127.0.0.2'}]
conf = {'sorting_method': 'affinity', 'read_affinity': 'r3=1'}
per_policy = {'0': {'sorting_method': 'affinity',
'read_affinity': 'r1=1'},
'1': {'sorting_method': 'affinity',
'read_affinity': 'r2=1'}}
# policy 0 affinity prefers r1
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0])
self.assertEqual([nodes[1], nodes[0], nodes[2], nodes[3]], actual)
# policy 1 affinity prefers r2
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[1])
self.assertEqual([nodes[2], nodes[0], nodes[1], nodes[3]], actual)
# default affinity prefers r3
app, actual = self._do_sort_nodes(conf, per_policy, nodes, None)
self.assertEqual([nodes[3], nodes[0], nodes[1], nodes[2]], actual)
def test_sort_nodes_by_affinity_per_policy_with_no_default(self):
# no proxy-server setting but policy 0 prefers r0
nodes = [{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 0, 'zone': 2, 'ip': '127.0.0.2'}]
conf = {}
per_policy = {'0': {'sorting_method': 'affinity',
'read_affinity': 'r0=0'}}
# policy 0 uses affinity sorting
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0])
self.assertEqual([nodes[1], nodes[0]], actual)
# any other policy will use default sorting
app, actual = self._do_sort_nodes(conf, per_policy, nodes, None)
self.assertEqual(nodes, actual)
def test_sort_nodes_by_affinity_per_policy_inherits(self):
# policy 0 has read_affinity but no sorting_method override,
nodes = [{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 0, 'zone': 2, 'ip': '127.0.0.2'}]
conf = {}
per_policy = {'0': {'read_affinity': 'r0=0'}}
# policy 0 uses the default sorting method instead of affinity sorting
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0])
self.assertEqual(nodes, actual)
# but if proxy-server sorting_method is affinity then policy 0 inherits
conf = {'sorting_method': 'affinity'}
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0])
self.assertEqual([nodes[1], nodes[0]], actual)
def test_sort_nodes_by_affinity_per_policy_overrides(self):
# default setting is to sort by timing but policy 0 uses read affinity
nodes = [{'region': 0, 'zone': 1, 'ip': '127.0.0.3'},
{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 2, 'zone': 2, 'ip': '127.0.0.2'}]
node_timings = [10, 1, 100]
conf = {'sorting_method': 'timing'}
per_policy = {'0': {'sorting_method': 'affinity',
'read_affinity': 'r1=1,r2=2'}}
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0],
node_timings=node_timings)
self.assertEqual([nodes[1], nodes[2], nodes[0]], actual)
# check that timings are collected despite one policy using affinity
self.assertTrue(app.sorts_by_timing)
self.assertEqual(3, len(app.node_timings))
# check app defaults to sorting by timing when no policy specified
app, actual = self._do_sort_nodes(conf, per_policy, nodes, None,
node_timings=node_timings)
self.assertEqual([nodes[1], nodes[0], nodes[2]], actual)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing()),
StoragePolicy(1, 'one', False, object_ring=FakeRing())])
def test_sort_nodes_by_timing_per_policy(self):
# default setting is to sort by affinity but policy 0 uses timing
nodes = [{'region': 0, 'zone': 1, 'ip': '127.0.0.3'},
{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 2, 'zone': 2, 'ip': '127.0.0.2'}]
node_timings = [10, 1, 100]
conf = {'sorting_method': 'affinity', 'read_affinity': 'r1=1,r2=2'}
per_policy = {'0': {'sorting_method': 'timing',
'read_affinity': 'r1=1,r2=2'}, # should be ignored
'1': {'read_affinity': 'r2=1'}}
# policy 0 uses timing
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0],
node_timings=node_timings)
self.assertEqual([nodes[1], nodes[0], nodes[2]], actual)
self.assertTrue(app.sorts_by_timing)
self.assertEqual(3, len(app.node_timings))
# policy 1 uses policy specific read affinity
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[1],
node_timings=node_timings)
self.assertEqual([nodes[2], nodes[0], nodes[1]], actual)
# check that with no policy specified the default read affinity is used
app, actual = self._do_sort_nodes(conf, per_policy, nodes, None,
node_timings=node_timings)
self.assertEqual([nodes[1], nodes[2], nodes[0]], actual)
def test_node_concurrency(self):
nodes = [{'region': 1, 'zone': 1, 'ip': '127.0.0.1', 'port': 6010,
'device': 'sda'},
{'region': 2, 'zone': 2, 'ip': '127.0.0.2', 'port': 6010,
'device': 'sda'},
{'region': 3, 'zone': 3, 'ip': '127.0.0.3', 'port': 6010,
'device': 'sda'}]
timings = {'127.0.0.1': 2, '127.0.0.2': 1, '127.0.0.3': 0}
statuses = {'127.0.0.1': 200, '127.0.0.2': 200, '127.0.0.3': 200}
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'GET'})
def fake_iter_nodes(*arg, **karg):
return iter(nodes)
class FakeConn(object):
def __init__(self, ip, *args, **kargs):
self.ip = ip
self.args = args
self.kargs = kargs
def getresponse(self):
def mygetheader(header, *args, **kargs):
if header == "Content-Type":
return ""
else:
return 1
resp = mock.Mock()
resp.read.side_effect = ['Response from %s' % self.ip, '']
resp.getheader = mygetheader
resp.getheaders.return_value = {}
resp.reason = ''
resp.status = statuses[self.ip]
sleep(timings[self.ip])
return resp
def myfake_http_connect_raw(ip, *args, **kargs):
conn = FakeConn(ip, *args, **kargs)
return conn
with mock.patch('swift.proxy.server.Application.iter_nodes',
fake_iter_nodes):
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
myfake_http_connect_raw):
app_conf = {'concurrent_gets': 'on',
'concurrency_timeout': 0}
baseapp = proxy_server.Application(app_conf,
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertTrue(baseapp.concurrent_gets)
self.assertEqual(baseapp.concurrency_timeout, 0)
baseapp.update_request(req)
resp = baseapp.handle_request(req)
# Should get 127.0.0.3 as this has a wait of 0 seconds.
self.assertEqual(resp.body, 'Response from 127.0.0.3')
# lets try again, with 127.0.0.1 with 0 timing but returns an
# error.
timings['127.0.0.1'] = 0
statuses['127.0.0.1'] = 500
# Should still get 127.0.0.3 as this has a wait of 0 seconds
# and a success
baseapp.update_request(req)
resp = baseapp.handle_request(req)
self.assertEqual(resp.body, 'Response from 127.0.0.3')
# Now lets set the concurrency_timeout
app_conf['concurrency_timeout'] = 2
baseapp = proxy_server.Application(app_conf,
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(baseapp.concurrency_timeout, 2)
baseapp.update_request(req)
resp = baseapp.handle_request(req)
# Should get 127.0.0.2 as this has a wait of 1 seconds.
self.assertEqual(resp.body, 'Response from 127.0.0.2')
def test_info_defaults(self):
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
self.assertTrue(app.expose_info)
self.assertIsInstance(app.disallowed_sections, list)
self.assertEqual(1, len(app.disallowed_sections))
self.assertEqual(['swift.valid_api_versions'],
app.disallowed_sections)
self.assertIsNone(app.admin_key)
def test_get_info_controller(self):
req = Request.blank('/info')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
controller, path_parts = app.get_controller(req)
self.assertIn('version', path_parts)
self.assertIsNone(path_parts['version'])
self.assertIn('disallowed_sections', path_parts)
self.assertIn('expose_info', path_parts)
self.assertIn('admin_key', path_parts)
self.assertEqual(controller.__name__, 'InfoController')
def test_exception_occurred(self):
def do_test(additional_info):
logger = debug_logger('test')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=logger)
node = app.container_ring.get_part_nodes(0)[0]
node_key = app._error_limit_node_key(node)
self.assertNotIn(node_key, app._error_limiting) # sanity
try:
raise Exception('kaboom1!')
except Exception as err:
app.exception_occurred(node, 'server-type', additional_info)
self.assertEqual(1, app._error_limiting[node_key]['errors'])
line = logger.get_lines_for_level('error')[-1]
self.assertIn('server-type server', line)
self.assertIn(additional_info.decode('utf8'), line)
self.assertIn(node['ip'], line)
self.assertIn(str(node['port']), line)
self.assertIn(node['device'], line)
log_args, log_kwargs = logger.log_dict['error'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(err, log_kwargs['exc_info'][1])
do_test('success')
do_test('succès')
do_test(u'success')
def test_error_occurred(self):
def do_test(msg):
logger = debug_logger('test')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=logger)
node = app.container_ring.get_part_nodes(0)[0]
node_key = app._error_limit_node_key(node)
self.assertNotIn(node_key, app._error_limiting) # sanity
app.error_occurred(node, msg)
self.assertEqual(1, app._error_limiting[node_key]['errors'])
line = logger.get_lines_for_level('error')[-1]
self.assertIn(msg.decode('utf8'), line)
self.assertIn(node['ip'], line)
self.assertIn(str(node['port']), line)
self.assertIn(node['device'], line)
do_test('success')
do_test('succès')
do_test(u'success')
def test_error_limit_methods(self):
logger = debug_logger('test')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=logger)
node = app.container_ring.get_part_nodes(0)[0]
# error occurred
app.error_occurred(node, 'test msg')
self.assertTrue('test msg' in
logger.get_lines_for_level('error')[-1])
self.assertEqual(1, node_error_count(app, node))
# exception occurred
try:
raise Exception('kaboom1!')
except Exception as e1:
app.exception_occurred(node, 'test1', 'test1 msg')
line = logger.get_lines_for_level('error')[-1]
self.assertIn('test1 server', line)
self.assertIn('test1 msg', line)
log_args, log_kwargs = logger.log_dict['error'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e1)
self.assertEqual(2, node_error_count(app, node))
# warning exception occurred
try:
raise Exception('kaboom2!')
except Exception as e2:
app.exception_occurred(node, 'test2', 'test2 msg',
level=logging.WARNING)
line = logger.get_lines_for_level('warning')[-1]
self.assertIn('test2 server', line)
self.assertIn('test2 msg', line)
log_args, log_kwargs = logger.log_dict['warning'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e2)
self.assertEqual(3, node_error_count(app, node))
# custom exception occurred
try:
raise Exception('kaboom3!')
except Exception as e3:
e3_info = sys.exc_info()
try:
raise Exception('kaboom4!')
except Exception:
pass
app.exception_occurred(node, 'test3', 'test3 msg',
level=logging.WARNING, exc_info=e3_info)
line = logger.get_lines_for_level('warning')[-1]
self.assertIn('test3 server', line)
self.assertIn('test3 msg', line)
log_args, log_kwargs = logger.log_dict['warning'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e3)
self.assertEqual(4, node_error_count(app, node))
def test_valid_api_version(self):
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
# The version string is only checked for account, container and object
# requests; the raised APIVersionError returns a 404 to the client
for path in [
'/v2/a',
'/v2/a/c',
'/v2/a/c/o']:
req = Request.blank(path)
self.assertRaises(APIVersionError, app.get_controller, req)
# Default valid API versions are ok
for path in [
'/v1/a',
'/v1/a/c',
'/v1/a/c/o',
'/v1.0/a',
'/v1.0/a/c',
'/v1.0/a/c/o']:
req = Request.blank(path)
controller, path_parts = app.get_controller(req)
self.assertIsNotNone(controller)
# Ensure settings valid API version constraint works
for version in ["42", 42]:
try:
with NamedTemporaryFile() as f:
f.write('[swift-constraints]\n')
f.write('valid_api_versions = %s\n' % version)
f.flush()
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
req = Request.blank('/%s/a' % version)
controller, _ = app.get_controller(req)
self.assertIsNotNone(controller)
# In this case v1 is invalid
req = Request.blank('/v1/a')
self.assertRaises(APIVersionError, app.get_controller, req)
finally:
constraints.reload_constraints()
# Check that the valid_api_versions is not exposed by default
req = Request.blank('/info')
controller, path_parts = app.get_controller(req)
self.assertTrue('swift.valid_api_versions' in
path_parts.get('disallowed_sections'))
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
])
class TestProxyServerLoading(unittest.TestCase):
def setUp(self):
self._orig_hash_suffix = utils.HASH_PATH_SUFFIX
utils.HASH_PATH_SUFFIX = 'endcap'
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
utils.HASH_PATH_SUFFIX = self._orig_hash_suffix
for policy in POLICIES:
policy.object_ring = None
def test_load_policy_rings(self):
for policy in POLICIES:
self.assertFalse(policy.object_ring)
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % self.tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
account_ring_path = os.path.join(self.tempdir, 'account.ring.gz')
write_fake_ring(account_ring_path)
container_ring_path = os.path.join(self.tempdir, 'container.ring.gz')
write_fake_ring(container_ring_path)
for policy in POLICIES:
object_ring_path = os.path.join(self.tempdir,
policy.ring_name + '.ring.gz')
write_fake_ring(object_ring_path)
app = loadapp(conf_path)
# find the end of the pipeline
while hasattr(app, 'app'):
app = app.app
# validate loaded rings
self.assertEqual(app.account_ring.serialized_path,
account_ring_path)
self.assertEqual(app.container_ring.serialized_path,
container_ring_path)
for policy in POLICIES:
self.assertEqual(policy.object_ring,
app.get_object_ring(int(policy)))
def test_missing_rings(self):
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % self.tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
ring_paths = [
os.path.join(self.tempdir, 'account.ring.gz'),
os.path.join(self.tempdir, 'container.ring.gz'),
]
for policy in POLICIES:
self.assertFalse(policy.object_ring)
object_ring_path = os.path.join(self.tempdir,
policy.ring_name + '.ring.gz')
ring_paths.append(object_ring_path)
for policy in POLICIES:
self.assertFalse(policy.object_ring)
for ring_path in ring_paths:
self.assertFalse(os.path.exists(ring_path))
self.assertRaises(IOError, loadapp, conf_path)
write_fake_ring(ring_path)
# all rings exist, app should load
loadapp(conf_path)
for policy in POLICIES:
self.assertTrue(policy.object_ring)
@patch_policies()
class TestProxyServerConfigLoading(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
account_ring_path = os.path.join(self.tempdir, 'account.ring.gz')
write_fake_ring(account_ring_path)
container_ring_path = os.path.join(self.tempdir, 'container.ring.gz')
write_fake_ring(container_ring_path)
def tearDown(self):
rmtree(self.tempdir)
def _write_conf(self, conf_body):
# this is broken out to a method so that subclasses can override
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
return conf_path
def _write_conf_and_load_app(self, conf_sections, app_name='proxy-server'):
# write proxy-server.conf file, load app
conf_body = dedent("""
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = %s
%s
""") % (self.tempdir, app_name, dedent(conf_sections))
conf_path = self._write_conf(conf_body)
with mock.patch('swift.proxy.server.get_logger',
return_value=FakeLogger()):
app = loadapp(conf_path, allow_modify_pipeline=False)
return app
def _check_policy_options(self, app, exp_options, exp_is_local):
# verify expected config
for policy, options in exp_options.items():
for k, v in options.items():
actual = getattr(app.get_policy_options(policy), k)
if k == "write_affinity_node_count_fn":
if policy: # this check only applies when using a policy
actual = actual(policy.object_ring.replica_count)
self.assertEqual(v, actual)
continue
self.assertEqual(v, actual,
"Expected %s=%s but got %s=%s for policy %s" %
(k, v, k, actual, policy))
for policy, nodes in exp_is_local.items():
fn = app.get_policy_options(policy).write_affinity_is_local_fn
if nodes is None:
self.assertIsNone(fn)
continue
for node, expected_result in nodes:
actual = fn(node)
self.assertIs(expected_result, actual,
"Expected %s but got %s for %s, policy %s" %
(expected_result, actual, node, policy))
def test_per_policy_conf_none_configured(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
"""
expected_default = {"read_affinity": "",
"sorting_method": "shuffle",
"write_affinity": "",
"write_affinity_node_count_fn": 6}
exp_options = {None: expected_default,
POLICIES[0]: expected_default,
POLICIES[1]: expected_default}
exp_is_local = {POLICIES[0]: None,
POLICIES[1]: None}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, exp_is_local)
def test_per_policy_conf_one_configured(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
[proxy-server:policy:0]
sorting_method = affinity
read_affinity = r1=100
write_affinity = r1
write_affinity_node_count = 1 * replicas
write_affinity_handoff_delete_count = 4
"""
expected_default = {"read_affinity": "",
"sorting_method": "shuffle",
"write_affinity": "",
"write_affinity_node_count_fn": 6,
"write_affinity_handoff_delete_count": None}
exp_options = {None: expected_default,
POLICIES[0]: {"read_affinity": "r1=100",
"sorting_method": "affinity",
"write_affinity": "r1",
"write_affinity_node_count_fn": 3,
"write_affinity_handoff_delete_count": 4},
POLICIES[1]: expected_default}
exp_is_local = {POLICIES[0]: [({'region': 1, 'zone': 2}, True),
({'region': 2, 'zone': 1}, False)],
POLICIES[1]: None}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, exp_is_local)
default_options = app.get_policy_options(None)
self.assertEqual(
"ProxyOverrideOptions({}, {'sorting_method': 'shuffle', "
"'read_affinity': '', 'write_affinity': '', "
"'write_affinity_node_count': '2 * replicas', "
"'write_affinity_handoff_delete_count': None})",
repr(default_options))
self.assertEqual(default_options, eval(repr(default_options), {
'ProxyOverrideOptions': default_options.__class__}))
policy_0_options = app.get_policy_options(POLICIES[0])
self.assertEqual(
"ProxyOverrideOptions({}, {'sorting_method': 'affinity', "
"'read_affinity': 'r1=100', 'write_affinity': 'r1', "
"'write_affinity_node_count': '1 * replicas', "
"'write_affinity_handoff_delete_count': 4})",
repr(policy_0_options))
self.assertEqual(policy_0_options, eval(repr(policy_0_options), {
'ProxyOverrideOptions': policy_0_options.__class__}))
self.assertNotEqual(default_options, policy_0_options)
policy_1_options = app.get_policy_options(POLICIES[1])
self.assertIs(default_options, policy_1_options)
def test_per_policy_conf_inherits_defaults(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
write_affinity_node_count = 1 * replicas
write_affinity_handoff_delete_count = 3
[proxy-server:policy:0]
read_affinity = r1=100
write_affinity = r1
"""
expected_default = {"read_affinity": "",
"sorting_method": "affinity",
"write_affinity": "",
"write_affinity_node_count_fn": 3,
"write_affinity_handoff_delete_count": 3}
exp_options = {None: expected_default,
POLICIES[0]: {"read_affinity": "r1=100",
"sorting_method": "affinity",
"write_affinity": "r1",
"write_affinity_node_count_fn": 3,
"write_affinity_handoff_delete_count": 3},
POLICIES[1]: expected_default}
exp_is_local = {POLICIES[0]: [({'region': 1, 'zone': 2}, True),
({'region': 2, 'zone': 1}, False)],
POLICIES[1]: None}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, exp_is_local)
def test_per_policy_conf_overrides_default_affinity(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
read_affinity = r2=10
write_affinity_node_count = 1 * replicas
write_affinity = r2
write_affinity_handoff_delete_count = 2
[proxy-server:policy:0]
read_affinity = r1=100
write_affinity = r1
write_affinity_node_count = 5
write_affinity_handoff_delete_count = 3
[proxy-server:policy:1]
read_affinity = r1=1
write_affinity = r3
write_affinity_node_count = 4
write_affinity_handoff_delete_count = 4
"""
exp_options = {None: {"read_affinity": "r2=10",
"sorting_method": "affinity",
"write_affinity": "r2",
"write_affinity_node_count_fn": 3,
"write_affinity_handoff_delete_count": 2},
POLICIES[0]: {"read_affinity": "r1=100",
"sorting_method": "affinity",
"write_affinity": "r1",
"write_affinity_node_count_fn": 5,
"write_affinity_handoff_delete_count": 3},
POLICIES[1]: {"read_affinity": "r1=1",
"sorting_method": "affinity",
"write_affinity": "r3",
"write_affinity_node_count_fn": 4,
"write_affinity_handoff_delete_count": 4}}
exp_is_local = {POLICIES[0]: [({'region': 1, 'zone': 2}, True),
({'region': 2, 'zone': 1}, False)],
POLICIES[1]: [({'region': 3, 'zone': 2}, True),
({'region': 1, 'zone': 1}, False),
({'region': 2, 'zone': 1}, False)]}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, exp_is_local)
def test_per_policy_conf_overrides_default_sorting_method(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = timing
[proxy-server:policy:0]
sorting_method = affinity
read_affinity = r1=100
[proxy-server:policy:1]
sorting_method = affinity
read_affinity = r1=1
"""
exp_options = {None: {"read_affinity": "",
"sorting_method": "timing"},
POLICIES[0]: {"read_affinity": "r1=100",
"sorting_method": "affinity"},
POLICIES[1]: {"read_affinity": "r1=1",
"sorting_method": "affinity"}}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, {})
def test_per_policy_conf_with_DEFAULT_options(self):
conf_body = """
[DEFAULT]
write_affinity = r0
read_affinity = r0=100
swift_dir = %s
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
# in a paste-deploy section, DEFAULT section value overrides
write_affinity = r2
# ...but the use of 'set' overrides the DEFAULT section value
set read_affinity = r1=100
[proxy-server:policy:0]
# not a paste-deploy section so any value here overrides DEFAULT
sorting_method = affinity
write_affinity = r2
read_affinity = r2=100
[proxy-server:policy:1]
sorting_method = affinity
""" % self.tempdir
# Don't just use _write_conf_and_load_app, as we don't want to have
# duplicate DEFAULT sections
conf_path = self._write_conf(conf_body)
with mock.patch('swift.proxy.server.get_logger',
return_value=FakeLogger()):
app = loadapp(conf_path, allow_modify_pipeline=False)
exp_options = {
# default read_affinity is r1, set in proxy-server section
None: {"read_affinity": "r1=100",
"sorting_method": "shuffle",
"write_affinity": "r0",
"write_affinity_node_count_fn": 6,
"write_affinity_handoff_delete_count": None},
# policy 0 read affinity is r2, dictated by policy 0 section
POLICIES[0]: {"read_affinity": "r2=100",
"sorting_method": "affinity",
"write_affinity": "r2",
"write_affinity_node_count_fn": 6,
"write_affinity_handoff_delete_count": None},
# policy 1 read_affinity is r0, dictated by DEFAULT section,
# overrides proxy server section
POLICIES[1]: {"read_affinity": "r0=100",
"sorting_method": "affinity",
"write_affinity": "r0",
"write_affinity_node_count_fn": 6,
"write_affinity_handoff_delete_count": None}}
exp_is_local = {
# default write_affinity is r0, dictated by DEFAULT section
None: [({'region': 0, 'zone': 2}, True),
({'region': 1, 'zone': 1}, False)],
# policy 0 write_affinity is r2, dictated by policy 0 section
POLICIES[0]: [({'region': 0, 'zone': 2}, False),
({'region': 2, 'zone': 1}, True)],
# policy 1 write_affinity is r0, inherited from default
POLICIES[1]: [({'region': 0, 'zone': 2}, True),
({'region': 1, 'zone': 1}, False)]}
self._check_policy_options(app, exp_options, exp_is_local)
def test_per_policy_conf_warns_about_sorting_method_mismatch(self):
# verify that policy specific warnings are emitted when read_affinity
# is set but sorting_method is not affinity
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
read_affinity = r2=10
sorting_method = timing
[proxy-server:policy:0]
read_affinity = r1=100
[proxy-server:policy:1]
sorting_method = affinity
read_affinity = r1=1
"""
exp_options = {None: {"read_affinity": "r2=10",
"sorting_method": "timing"},
POLICIES[0]: {"read_affinity": "r1=100",
"sorting_method": "timing"},
POLICIES[1]: {"read_affinity": "r1=1",
"sorting_method": "affinity"}}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, {})
lines = app.logger.get_lines_for_level('warning')
labels = {'default', 'policy 0 (nulo)'}
for line in lines[:2]:
self.assertIn(
"sorting_method is set to 'timing', not 'affinity'", line)
for label in labels:
if label in line:
labels.remove(label)
break
else:
self.fail("None of %s found in warning: %r" % (labels, line))
self.assertFalse(labels)
def test_per_policy_conf_warns_override_sorting_method_mismatch(self):
# verify that policy specific warnings are emitted when read_affinity
# is set but sorting_method is not affinity in a policy config
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
read_affinity = r2=10
[proxy-server:policy:0]
sorting_method = timing
"""
exp_options = {None: {"read_affinity": "r2=10",
"write_affinity": "",
"sorting_method": "affinity"},
POLICIES[0]: {"read_affinity": "r2=10",
"write_affinity": "",
"sorting_method": "timing"}}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, {})
lines = app.logger.get_lines_for_level('warning')
for line in lines:
# proxy-server gets instantiated twice during loadapp so expect two
# warnings; check that both warnings refer to policy 0 and not the
# default config
self.assertIn(
"sorting_method is set to 'timing', not 'affinity'", line)
self.assertIn('policy 0 (nulo)', line)
self.assertFalse(lines[2:])
def test_per_policy_conf_section_name_inherits_from_app_section_name(self):
conf_sections = """
[app:proxy-srv]
use = egg:swift#proxy
sorting_method = affinity
[proxy-server:policy:0]
sorting_method = timing
# ignored!
[proxy-srv:policy:1]
sorting_method = shuffle
"""
exp_options = {None: {'sorting_method': 'affinity'},
POLICIES[0]: {'sorting_method': 'affinity'},
POLICIES[1]: {'sorting_method': 'shuffle'}}
app = self._write_conf_and_load_app(conf_sections, 'proxy-srv')
self._check_policy_options(app, exp_options, {})
def test_per_policy_conf_with_unknown_policy(self):
# verify that unknown policy section raises an error
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
read_affinity = r2=10
sorting_method = affinity
[proxy-server:policy:999]
read_affinity = r2z1=1
"""
with self.assertRaises(ValueError) as cm:
self._write_conf_and_load_app(conf_sections)
self.assertIn('No policy found for override config, index: 999',
cm.exception.message)
def test_per_policy_conf_sets_timing_sorting_method(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
[proxy-server:policy:0]
sorting_method = timing
[proxy-server:policy:1]
read_affinity = r1=1
"""
exp_options = {None: {"read_affinity": "",
"sorting_method": "affinity"},
POLICIES[0]: {"read_affinity": "",
"sorting_method": "timing"},
POLICIES[1]: {"read_affinity": "r1=1",
"sorting_method": "affinity"}}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, {})
def test_per_policy_conf_invalid_read_affinity_value(self):
def do_test(conf_sections, label):
with self.assertRaises(ValueError) as cm:
self._write_conf_and_load_app(conf_sections)
self.assertIn('broken', cm.exception.message)
self.assertIn(
'Invalid read_affinity value:', cm.exception.message)
self.assertIn(label, cm.exception.message)
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
read_affinity = r1=1
[proxy-server:policy:0]
sorting_method = affinity
read_affinity = broken
"""
do_test(conf_sections, 'policy 0 (nulo)')
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
read_affinity = broken
[proxy-server:policy:0]
sorting_method = affinity
read_affinity = r1=1
"""
do_test(conf_sections, '(default)')
def test_per_policy_conf_invalid_write_affinity_value(self):
def do_test(conf_sections, label):
with self.assertRaises(ValueError) as cm:
self._write_conf_and_load_app(conf_sections)
self.assertIn('broken', cm.exception.message)
self.assertIn(
'Invalid write_affinity value:', cm.exception.message)
self.assertIn(label, cm.exception.message)
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
write_affinity = r1
[proxy-server:policy:0]
sorting_method = affinity
write_affinity = broken
"""
do_test(conf_sections, 'policy 0 (nulo)')
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
write_affinity = broken
[proxy-server:policy:0]
write_affinity = r1
"""
do_test(conf_sections, '(default)')
def test_per_policy_conf_invalid_write_affinity_node_count_value(self):
def do_test(conf_sections, label):
with self.assertRaises(ValueError) as cm:
self._write_conf_and_load_app(conf_sections)
self.assertIn('2* replicas', cm.exception.message)
self.assertIn('Invalid write_affinity_node_count value:',
cm.exception.message)
self.assertIn(label, cm.exception.message)
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
write_affinity_node_count = 2 * replicas
[proxy-server:policy:0]
sorting_method = affinity
write_affinity_node_count = 2* replicas
"""
do_test(conf_sections, 'policy 0 (nulo)')
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
write_affinity_node_count = 2* replicas
[proxy-server:policy:0]
write_affinity_node_count = 2 * replicas
"""
do_test(conf_sections, '(default)')
def test_per_policy_conf_bad_section_name(self):
def do_test(policy):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
[proxy-server:policy:%s]
""" % policy
with self.assertRaises(ValueError) as cm:
self._write_conf_and_load_app(conf_sections)
self.assertEqual(
"Override config must refer to policy index: %r" % policy,
cm.exception.message)
do_test('')
do_test('uno')
do_test('0.0')
class TestProxyServerConfigStringLoading(TestProxyServerConfigLoading):
# The proxy may be loaded from a conf string rather than a conf file, for
# example when ContainerSync creates an InternalClient from a default
# config string. So repeat super-class tests using a string loader.
def _write_conf(self, conf_body):
# this is broken out to a method so that subclasses can override
return ConfigString(conf_body)
class BaseTestObjectController(object):
"""
A root of TestObjController that implements helper methods for child
TestObjControllers.
"""
def setUp(self):
# clear proxy logger result for each test
_test_servers[0].logger._clear()
def assert_status_map(self, method, statuses, expected, raise_exc=False):
with save_globals():
kwargs = {}
if raise_exc:
kwargs['raise_exc'] = raise_exc
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
try:
res = method(req)
except HTTPException as res:
pass
self.assertEqual(res.status_int, expected)
# repeat test
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
try:
res = method(req)
except HTTPException as res:
pass
self.assertEqual(res.status_int, expected)
def _sleep_enough(self, condition):
for sleeptime in (0.1, 1.0):
sleep(sleeptime)
if condition():
break
def put_container(self, policy_name, container_name):
# Note: only works if called with unpatched policies
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: %s\r\n'
'\r\n' % (container_name, policy_name))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
def _test_conditional_GET(self, policy):
container_name = uuid.uuid4().hex
object_path = '/v1/a/%s/conditionals' % container_name
self.put_container(policy.name, container_name)
obj = 'this object has an etag and is otherwise unimportant'
etag = md5(obj).hexdigest()
not_etag = md5(obj + "blahblah").hexdigest()
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (object_path, len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
for verb, body in (('GET', obj), ('HEAD', '')):
# If-Match
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-Match': etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
self.assertEqual(etag, resp.headers.get('etag'))
self.assertEqual('bytes', resp.headers.get('accept-ranges'))
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-Match': not_etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 412)
self.assertEqual(etag, resp.headers.get('etag'))
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-Match': "*"})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
self.assertEqual(etag, resp.headers.get('etag'))
self.assertEqual('bytes', resp.headers.get('accept-ranges'))
# If-None-Match
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 304)
self.assertEqual(etag, resp.headers.get('etag'))
self.assertEqual('bytes', resp.headers.get('accept-ranges'))
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': not_etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
self.assertEqual(etag, resp.headers.get('etag'))
self.assertEqual('bytes', resp.headers.get('accept-ranges'))
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': "*"})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 304)
self.assertEqual(etag, resp.headers.get('etag'))
self.assertEqual('bytes', resp.headers.get('accept-ranges'))
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(base_port=3000))])
class TestReplicatedObjectController(
BaseTestObjectController, unittest.TestCase):
"""
Test suite for replication policy
"""
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
super(TestReplicatedObjectController, self).setUp()
def tearDown(self):
self.app.account_ring.set_replicas(3)
self.app.container_ring.set_replicas(3)
for policy in POLICIES:
policy.object_ring = FakeRing(base_port=3000)
@unpatch_policies
def test_policy_IO(self):
def check_file(policy, cont, devs, check_val):
partition, nodes = policy.object_ring.get_nodes('a', cont, 'o')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
for dev in devs:
file = df_mgr.get_diskfile(dev, partition, 'a',
cont, 'o',
policy=policy)
if check_val is True:
file.open()
prolis = _test_sockets[0]
prosrv = _test_servers[0]
# check policy 0: put file on c, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'test_object0'
path = '/v1/a/c/o'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[0], 'c', ['sda1', 'sdb1'], True)
check_file(POLICIES[0], 'c', ['sdc1', 'sdd1', 'sde1', 'sdf1'], False)
# check policy 1: put file on c1, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c1/o'
obj = 'test_object1'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[1], 'c1', ['sdc1', 'sdd1'], True)
check_file(POLICIES[1], 'c1', ['sda1', 'sdb1', 'sde1', 'sdf1'], False)
# check policy 2: put file on c2, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c2/o'
obj = 'test_object2'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[2], 'c2', ['sde1', 'sdf1'], True)
check_file(POLICIES[2], 'c2', ['sda1', 'sdb1', 'sdc1', 'sdd1'], False)
@unpatch_policies
def test_policy_IO_override(self):
if hasattr(_test_servers[-1], '_filesystem'):
# ironically, the _filesystem attribute on the object server means
# the in-memory diskfile is in use, so this test does not apply
return
prosrv = _test_servers[0]
# validate container policy is 1
req = Request.blank('/v1/a/c1', method='HEAD')
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 204) # sanity check
self.assertEqual(POLICIES[1].name, res.headers['x-storage-policy'])
# check overrides: put it in policy 2 (not where the container says)
req = Request.blank(
'/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': BytesIO(b"hello")},
headers={'Content-Type': 'text/plain',
'Content-Length': '5',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 201) # sanity check
# go to disk to make sure it's there
partition, nodes = prosrv.get_object_ring(2).get_nodes(
'a', 'c1', 'wrong-o')
node = nodes[0]
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
df = df_mgr.get_diskfile(node['device'], partition, 'a',
'c1', 'wrong-o', policy=POLICIES[2])
with df.open():
contents = ''.join(df.reader())
self.assertEqual(contents, "hello")
# can't get it from the normal place
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 404) # sanity check
# but we can get it from policy 2
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, 'hello')
# and we can delete it the same way
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 204)
df = df_mgr.get_diskfile(node['device'], partition, 'a',
'c1', 'wrong-o', policy=POLICIES[2])
try:
df.open()
except DiskFileNotExist as e:
self.assertGreater(float(e.timestamp), 0)
else:
self.fail('did not raise DiskFileNotExist')
@unpatch_policies
def test_GET_newest_large_file(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'a' * (1024 * 1024)
path = '/v1/a/c/o.large'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'application/octet-stream',
'X-Newest': 'true'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
@unpatch_policies
def test_GET_ranges(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = (''.join(
('beans lots of beans lots of beans lots of beans yeah %04d ' % i)
for i in range(100)))
path = '/v1/a/c/o.beans'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# one byte range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=10-200'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 206)
self.assertEqual(res.body, obj[10:201])
# multiple byte ranges
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=10-200,1000-1099,4123-4523'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 206)
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges')
boundary = dict(params).get('boundary')
self.assertIsNotNone(boundary)
got_mime_docs = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(res.body),
boundary):
headers = parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_mime_docs.append((headers, body))
self.assertEqual(len(got_mime_docs), 3)
first_range_headers = got_mime_docs[0][0]
first_range_body = got_mime_docs[0][1]
self.assertEqual(first_range_headers['Content-Range'],
'bytes 10-200/5800')
self.assertEqual(first_range_body, obj[10:201])
second_range_headers = got_mime_docs[1][0]
second_range_body = got_mime_docs[1][1]
self.assertEqual(second_range_headers['Content-Range'],
'bytes 1000-1099/5800')
self.assertEqual(second_range_body, obj[1000:1100])
second_range_headers = got_mime_docs[2][0]
second_range_body = got_mime_docs[2][1]
self.assertEqual(second_range_headers['Content-Range'],
'bytes 4123-4523/5800')
self.assertEqual(second_range_body, obj[4123:4524])
@unpatch_policies
def test_GET_bad_range_zero_byte(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c/o.zerobyte'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: 0\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (path,))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# bad byte-range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=spaghetti-carbonara'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, '')
# not a byte-range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'Kotta'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, '')
@unpatch_policies
def test_GET_ranges_resuming(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = (''.join(
('Smurf! The smurfing smurf is completely smurfed. %03d ' % i)
for i in range(1000)))
path = '/v1/a/c/o.smurfs'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/smurftet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
kaboomed = [0]
bytes_before_timeout = [None]
class FileLikeKaboom(object):
def __init__(self, inner_file_like):
self.inner_file_like = inner_file_like
# close(), etc.
def __getattr__(self, attr):
return getattr(self.inner_file_like, attr)
def readline(self, *a, **kw):
if bytes_before_timeout[0] <= 0:
kaboomed[0] += 1
raise ChunkReadTimeout(None)
result = self.inner_file_like.readline(*a, **kw)
if len(result) > bytes_before_timeout[0]:
result = result[:bytes_before_timeout[0]]
bytes_before_timeout[0] -= len(result)
return result
def read(self, length=None):
result = self.inner_file_like.read(length)
if bytes_before_timeout[0] <= 0:
kaboomed[0] += 1
raise ChunkReadTimeout(None)
if len(result) > bytes_before_timeout[0]:
result = result[:bytes_before_timeout[0]]
bytes_before_timeout[0] -= len(result)
return result
orig_hrtdi = swift.common.request_helpers. \
http_response_to_document_iters
# Use this to mock out http_response_to_document_iters. On the first
# call, the result will be sabotaged to blow up with
# ChunkReadTimeout after some number of bytes are read. On
# subsequent calls, no sabotage will be added.
def sabotaged_hrtdi(*a, **kw):
resp_parts = orig_hrtdi(*a, **kw)
for sb, eb, l, h, range_file in resp_parts:
if bytes_before_timeout[0] <= 0:
# simulate being unable to read MIME part of
# multipart/byteranges response
kaboomed[0] += 1
raise ChunkReadTimeout(None)
boomer = FileLikeKaboom(range_file)
yield sb, eb, l, h, boomer
sabotaged = [False]
def single_sabotage_hrtdi(*a, **kw):
if not sabotaged[0]:
sabotaged[0] = True
return sabotaged_hrtdi(*a, **kw)
else:
return orig_hrtdi(*a, **kw)
# We want sort of an end-to-end test of object resuming, so what we
# do is mock out stuff so the proxy thinks it only read a certain
# number of bytes before it got a timeout.
bytes_before_timeout[0] = 300
with mock.patch.object(proxy_base,
'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=0-500'})
res = req.get_response(prosrv)
body = res.body # read the whole thing
self.assertEqual(kaboomed[0], 1) # sanity check
self.assertEqual(res.status_int, 206)
self.assertEqual(len(body), 501)
self.assertEqual(body, obj[:501])
# Sanity-check for multi-range resume: make sure we actually break
# in the middle of the second byterange. This test is partially
# about what happens when all the object servers break at once, and
# partially about validating all these mocks we do. After all, the
# point of resuming is that the client can't tell anything went
# wrong, so we need a test where we can't resume and something
# *does* go wrong so we can observe it.
bytes_before_timeout[0] = 700
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base,
'http_response_to_document_iters',
sabotaged_hrtdi): # perma-broken
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''
try:
for chunk in res.app_iter:
body += chunk
except ChunkReadTimeout:
pass
self.assertEqual(res.status_int, 206)
self.assertGreater(kaboomed[0], 0) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertIsNotNone(boundary) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(len(got_byteranges[1]), 199) # partial
# Multi-range resume, resuming in the middle of the first byterange
bytes_before_timeout[0] = 300
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base,
'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertEqual(kaboomed[0], 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
# Multi-range resume, first GET dies in the middle of the second set
# of MIME headers
bytes_before_timeout[0] = 501
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base,
'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertGreaterEqual(kaboomed[0], 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertIsNotNone(boundary) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
# Multi-range resume, first GET dies in the middle of the second
# byterange
bytes_before_timeout[0] = 750
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base,
'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertGreaterEqual(kaboomed[0], 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertIsNotNone(boundary) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
@unpatch_policies
def test_conditional_GET_replication(self):
policy = POLICIES[0]
self.assertEqual('replication', policy.policy_type) # sanity
self._test_conditional_GET(policy)
def test_PUT_expect_header_zero_content_length(self):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
if 'expect' in headers or 'Expect' in headers:
test_errors.append('Expect was in headers for object '
'server!')
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
# The (201, Exception('test')) tuples in there have the effect of
# changing the status of the initial expect response. The default
# expect response from FakeConn for 201 is 100.
# But the object server won't send a 100 continue line if the
# client doesn't send a expect 100 header (as is the case with
# zero byte PUTs as validated by this test), nevertheless the
# object controller calls getexpect without prejudice. In this
# case the status from the response shows up early in getexpect
# instead of having to wait until getresponse. The Exception is
# in there to ensure that the object controller also *uses* the
# result of getexpect instead of calling getresponse in which case
# our FakeConn will blow up.
success_codes = [(201, Exception('test'))] * 3
set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertEqual(test_errors, [])
self.assertTrue(res.status.startswith('201 '), res.status)
def test_PUT_expect_header_nonzero_content_length(self):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
if 'Expect' not in headers:
test_errors.append('Expect was not in headers for '
'non-zero byte PUT!')
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
# the (100, 201) tuples in there are just being extra explicit
# about the FakeConn returning the 100 Continue status when the
# object controller calls getexpect. Which is FakeConn's default
# for 201 if no expect_status is specified.
success_codes = [(100, 201)] * 3
set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertEqual(test_errors, [])
self.assertTrue(res.status.startswith('201 '))
def _check_PUT_respects_write_affinity(self, conf, policy,
expected_region):
written_to = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
written_to.append((ipaddr, port, device))
# mock shuffle to be a no-op to ensure that the only way nodes would
# not be used in ring order is if affinity is respected.
with mock.patch('swift.proxy.server.shuffle', lambda x: x):
app = proxy_server.Application(
conf, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
with save_globals():
object_ring = app.get_object_ring(policy)
object_ring.max_more_nodes = 100
controller = \
ReplicatedObjectController(
app, 'a', 'c', 'o.jpg')
# requests go to acc, con, obj, obj, obj
set_http_connect(200, 200, 201, 201, 201,
give_connect=test_connect)
req = Request.blank(
'/v1/a/c/o.jpg', method='PUT', body='a',
headers={'X-Backend-Storage-Policy-Index': str(policy)})
app.memcache.store = {}
res = controller.PUT(req)
self.assertTrue(res.status.startswith('201 '))
self.assertEqual(3, len(written_to))
for ip, port, device in written_to:
# this is kind of a hokey test, but in FakeRing, the port is even
# when the region is 0, and odd when the region is 1, so this test
# asserts that we only wrote to nodes in region 0.
self.assertEqual(expected_region, port % 2)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing()),
StoragePolicy(1, 'one', False, object_ring=FakeRing())])
def test_PUT_respects_write_affinity(self):
# nodes in fake ring order have r0z0, r1z1, r0z2
# Check default conf via proxy server conf
conf = {'write_affinity': 'r0'}
self._check_PUT_respects_write_affinity(conf, 0, 0)
# policy 0 and policy 1 have conf via per policy conf section
conf = {
'write_affinity': '',
'policy_config': {
'0': {'write_affinity': 'r0'},
'1': {'write_affinity': 'r1'}
}
}
self._check_PUT_respects_write_affinity(conf, 0, 0)
self._check_PUT_respects_write_affinity(conf, 1, 1)
# policy 0 conf via per policy conf section override proxy server conf,
# policy 1 uses default
conf = {
'write_affinity': 'r0',
'policy_config': {
'0': {'write_affinity': 'r1'}
}
}
self._check_PUT_respects_write_affinity(conf, 0, 1)
self._check_PUT_respects_write_affinity(conf, 1, 0)
def test_PUT_respects_write_affinity_with_507s(self):
written_to = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
written_to.append((ipaddr, port, device))
with save_globals():
def is_r0(node):
return node['region'] == 0
object_ring = self.app.get_object_ring(0)
object_ring.max_more_nodes = 100
policy_options = self.app.get_policy_options(POLICIES[0])
policy_options.write_affinity_is_local_fn = is_r0
policy_options.write_affinity_node_count_fn = lambda r: 3
controller = \
ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
self.app.error_limit(
object_ring.get_part_nodes(1)[0], 'test')
set_http_connect(200, 200, # account, container
201, 201, 201, # 3 working backends
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertTrue(res.status.startswith('201 '))
# this is kind of a hokey test, but in FakeRing, the port is even when
# the region is 0, and odd when the region is 1, so this test asserts
# that we wrote to 2 nodes in region 0, then went to 1 non-r0 node.
def get_region(x):
return x[1] % 2 # it's (ip, port, device)
self.assertEqual([0, 0, 1], [get_region(x) for x in written_to])
@unpatch_policies
def test_PUT_no_etag_fallocate(self):
with mock.patch('swift.obj.diskfile.fallocate') as mock_fallocate:
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'hemoleucocytic-surfactant'
fd.write('PUT /v1/a/c/o HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# one for each obj server; this test has 2
self.assertEqual(len(mock_fallocate.mock_calls), 2)
@unpatch_policies
def test_PUT_message_length_using_content_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'j' * 20
fd.write('PUT /v1/a/c/o.content-length HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_using_transfer_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_using_both(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_bad_message_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: gzip\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 400'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_unsup_xfr_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: gzip,chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 501'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_too_large(self):
with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n\r\n'
'oh say can you see by the dawns\'\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 413'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_POST_last_modified(self):
prolis = _test_sockets[0]
def _do_HEAD():
# do a HEAD to get reported last modified time
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
last_modified_head = [line for line in headers.split('\r\n')
if lm_hdr in line][0][len(lm_hdr):]
return last_modified_head
def _do_conditional_GET_checks(last_modified_time):
# check If-(Un)Modified-Since GETs
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'If-Modified-Since: %s\r\n'
'X-Storage-Token: t\r\n\r\n' % last_modified_time)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 304'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'If-Unmodified-Since: %s\r\n'
'X-Storage-Token: t\r\n\r\n' % last_modified_time)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
# PUT the object
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
lm_hdr = 'Last-Modified: '
self.assertEqual(headers[:len(exp)], exp)
last_modified_put = [line for line in headers.split('\r\n')
if lm_hdr in line][0][len(lm_hdr):]
last_modified_head = _do_HEAD()
self.assertEqual(last_modified_put, last_modified_head)
_do_conditional_GET_checks(last_modified_put)
# now POST to the object
# last-modified rounded in sec so sleep a sec to increment
sleep(1)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('POST /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 202'
self.assertEqual(headers[:len(exp)], exp)
for line in headers.split('\r\n'):
self.assertFalse(line.startswith(lm_hdr))
# last modified time will have changed due to POST
last_modified_head = _do_HEAD()
self.assertNotEqual(last_modified_put, last_modified_head)
_do_conditional_GET_checks(last_modified_head)
def test_PUT_auto_content_type(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_content_type(filename, expected):
# The three responses here are for account_info() (HEAD to
# account server), container_info() (HEAD to container server)
# and three calls to _connect_put_node() (PUT to three object
# servers)
set_http_connect(201, 201, 201, 201, 201,
give_content_type=lambda content_type:
self.assertEqual(content_type,
next(expected)))
# We need into include a transfer-encoding to get past
# constraints.check_object_creation()
req = Request.blank('/v1/a/c/%s' % filename, {},
headers={'transfer-encoding': 'chunked'})
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
# If we don't check the response here we could miss problems
# in PUT()
self.assertEqual(res.status_int, 201)
test_content_type('test.jpg', iter(['', '', 'image/jpeg',
'image/jpeg', 'image/jpeg']))
test_content_type('test.html', iter(['', '', 'text/html',
'text/html', 'text/html']))
test_content_type('test.css', iter(['', '', 'text/css',
'text/css', 'text/css']))
def test_custom_mime_types_files(self):
swift_dir = mkdtemp()
try:
with open(os.path.join(swift_dir, 'mime.types'), 'w') as fp:
fp.write('foo/bar foo\n')
proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
FakeRing(), FakeRing())
self.assertEqual(proxy_server.mimetypes.guess_type('blah.foo')[0],
'foo/bar')
self.assertEqual(proxy_server.mimetypes.guess_type('blah.jpg')[0],
'image/jpeg')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_PUT(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, 201), 201)
test_status_map((200, 200, 201, 201, 500), 201)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 200, 204, 500, 404), 503)
test_status_map((200, 200, 202, 202, 204), 204)
def test_PUT_connect_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, -1), 201) # connect exc
# connect errors
test_status_map((200, 200, Timeout(), 201, 201, ), 201)
test_status_map((200, 200, 201, 201, Exception()), 201)
# expect errors
test_status_map((200, 200, (Timeout(), None), 201, 201), 201)
test_status_map((200, 200, (Exception(), None), 201, 201), 201)
# response errors
test_status_map((200, 200, (100, Timeout()), 201, 201), 201)
test_status_map((200, 200, (100, Exception()), 201, 201), 201)
test_status_map((200, 200, 507, 201, 201), 201) # error limited
test_status_map((200, 200, -1, 201, -1), 503)
test_status_map((200, 200, 503, -1, 503), 503)
def test_PUT_send_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg',
environ={'REQUEST_METHOD': 'PUT'},
body='some data')
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, -1, 201), 201)
test_status_map((200, 200, 201, -1, -1), 503)
test_status_map((200, 200, 503, 503, -1), 503)
def test_PUT_max_size(self):
with save_globals():
set_http_connect(201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 413)
def test_PUT_bad_content_type(self):
with save_globals():
set_http_connect(201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': 0, 'Content-Type': 'foo/bar;swift_hey=45'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 400)
def test_PUT_getresponse_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
test_status_map((200, 200, 201, 201, -1), 201)
test_status_map((200, 200, 201, -1, -1), 503)
test_status_map((200, 200, 503, 503, -1), 503)
def test_POST(self):
with save_globals():
self.app.object_post_as_copy = False
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = req.get_response(self.app)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 202, 202, 202), 202)
test_status_map((200, 200, 202, 202, 500), 202)
test_status_map((200, 200, 202, 500, 500), 503)
test_status_map((200, 200, 202, 404, 500), 503)
test_status_map((200, 200, 202, 404, 404), 404)
test_status_map((200, 200, 404, 500, 500), 503)
test_status_map((200, 200, 404, 404, 404), 404)
@patch_policies([
StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_POST_backend_headers(self):
# reset the router post patch_policies
self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.object_post_as_copy = False
self.app.sort_nodes = lambda nodes, *args, **kwargs: nodes
backend_requests = []
def capture_requests(ip, port, method, path, headers, *args,
**kwargs):
backend_requests.append((method, path, headers))
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue',
'Content-Type': 'text/plain'})
# we want the container_info response to says a policy index of 1
resp_headers = {'X-Backend-Storage-Policy-Index': 1}
with mocked_http_conn(
200, 200, 202, 202, 202,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 5)
def check_request(req, method, path, headers=None):
req_method, req_path, req_headers = req
self.assertEqual(method, req_method)
# caller can ignore leading path parts
self.assertTrue(req_path.endswith(path),
'expected path to end with %s, it was %s' % (
path, req_path))
headers = headers or {}
# caller can ignore some headers
for k, v in headers.items():
self.assertEqual(req_headers[k], v)
account_request = backend_requests.pop(0)
check_request(account_request, method='HEAD', path='/sda/0/a')
container_request = backend_requests.pop(0)
check_request(container_request, method='HEAD', path='/sda/0/a/c')
# make sure backend requests included expected container headers
container_headers = {}
for request in backend_requests:
req_headers = request[2]
device = req_headers['x-container-device']
host = req_headers['x-container-host']
container_headers[device] = host
expectations = {
'method': 'POST',
'path': '/0/a/c/o',
'headers': {
'X-Container-Partition': '0',
'Connection': 'close',
'User-Agent': 'proxy-server %s' % os.getpid(),
'Host': 'localhost:80',
'Referer': 'POST http://localhost/v1/a/c/o',
'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': '1'
},
}
check_request(request, **expectations)
expected = {}
for i, device in enumerate(['sda', 'sdb', 'sdc']):
expected[device] = '10.0.0.%d:100%d' % (i, i)
self.assertEqual(container_headers, expected)
# and again with policy override
self.app.memcache.store = {}
backend_requests = []
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue',
'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': 0})
with mocked_http_conn(
200, 200, 202, 202, 202,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 5)
for request in backend_requests[2:]:
expectations = {
'method': 'POST',
'path': '/0/a/c/o', # ignore device bit
'headers': {
'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': '0',
}
}
check_request(request, **expectations)
def test_DELETE(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'DELETE'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
test_status_map((200, 200, 204, 204, 204), 204)
test_status_map((200, 200, 204, 204, 500), 204)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 204, 500, 500, 404), 503)
test_status_map((200, 200, 404, 404, 404), 404)
test_status_map((200, 200, 400, 400, 400), 400)
def test_HEAD(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
if expected < 400:
self.assertIn('x-works', res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
self.assertIn('accept-ranges', res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
test_status_map((200, 200, 200, 404, 404), 200)
test_status_map((200, 200, 200, 500, 404), 200)
test_status_map((200, 200, 304, 500, 404), 304)
test_status_map((200, 200, 404, 404, 404), 404)
test_status_map((200, 200, 404, 404, 500), 404)
test_status_map((200, 200, 500, 500, 500), 503)
def test_HEAD_newest(self):
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'},
headers={'x-newest': 'true'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
# acct cont obj obj obj
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
test_status_map((200, 200, 404, 404, 200), 200, ('0', '0', None,
None, '1'), '1')
def test_GET_newest(self):
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'GET'},
headers={'x-newest': 'true'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
'1', '2'), None)
def test_POST_meta_val_len(self):
with save_globals():
limit = constraints.MAX_META_VALUE_LENGTH
self.app.object_post_as_copy = False
ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * limit})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * (limit + 1)})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_authorize(self):
def authorize(req):
req.headers['X-Object-Meta-Foo'] = 'x' * (limit + 1)
return
with save_globals():
limit = constraints.MAX_META_VALUE_LENGTH
self.app.object_post_as_copy = False
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = controller.POST(req)
self.assertEqual(res.status_int, 400)
def test_POST_meta_key_len(self):
with save_globals():
limit = constraints.MAX_META_NAME_LENGTH
self.app.object_post_as_copy = False
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * limit): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * (limit + 1)): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_count(self):
with save_globals():
limit = constraints.MAX_META_COUNT
headers = dict(
(('X-Object-Meta-' + str(i), 'a') for i in range(limit + 1)))
headers.update({'Content-Type': 'foo/bar'})
set_http_connect(202, 202, 202)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers=headers)
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_size(self):
with save_globals():
limit = constraints.MAX_META_OVERALL_SIZE
count = limit / 256 # enough to cause the limit to be reached
headers = dict(
(('X-Object-Meta-' + str(i), 'a' * 256)
for i in range(count + 1)))
headers.update({'Content-Type': 'foo/bar'})
set_http_connect(202, 202, 202)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers=headers)
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_PUT_not_autodetect_content_type(self):
with save_globals():
headers = {'Content-Type': 'something/right', 'Content-Length': 0}
it_worked = []
def verify_content_type(ipaddr, port, device, partition,
method, path, headers=None,
query_string=None):
if path == '/a/c/o.html':
it_worked.append(
headers['Content-Type'].startswith('something/right'))
set_http_connect(204, 204, 201, 201, 201,
give_connect=verify_content_type)
req = Request.blank('/v1/a/c/o.html', {'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
req.get_response(self.app)
self.assertNotEqual(it_worked, [])
self.assertTrue(all(it_worked))
def test_PUT_autodetect_content_type(self):
with save_globals():
headers = {'Content-Type': 'something/wrong', 'Content-Length': 0,
'X-Detect-Content-Type': 'True'}
it_worked = []
def verify_content_type(ipaddr, port, device, partition,
method, path, headers=None,
query_string=None):
if path == '/a/c/o.html':
it_worked.append(
headers['Content-Type'].startswith('text/html'))
set_http_connect(204, 204, 201, 201, 201,
give_connect=verify_content_type)
req = Request.blank('/v1/a/c/o.html', {'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
req.get_response(self.app)
self.assertNotEqual(it_worked, [])
self.assertTrue(all(it_worked))
def test_client_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
class SlowBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
if self.sent < 4:
sleep(0.1)
self.sent += 1
return ' '
return ''
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.app.client_timeout = 0.05
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(201, 201, 201)
# obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_client_disconnect(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
class DisconnectedBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
return ''
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': DisconnectedBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_node_read_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
set_http_connect(200, 200, 200, slow=0.1)
req.sent_size = 0
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertFalse(got_exc)
self.app.recoverable_node_timeout = 0.1
set_http_connect(200, 200, 200, slow=1.0)
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
def test_node_read_timeout_retry(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
self.app.recoverable_node_timeout = 0.1
set_http_connect(200, 200, 200, slow=[1.0, 1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual('', resp.body)
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertFalse(got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'a', 'a'])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertFalse(got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'b', 'a'])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertFalse(got_exc)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'b', 'b'])
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
def test_node_write_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'},
body=' ')
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201, slow=0.1)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.app.node_timeout = 0.1
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'},
body=' ')
self.app.update_request(req)
set_http_connect(201, 201, 201, slow=1.0)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_node_request_setting(self):
baseapp = proxy_server.Application({'request_node_count': '3'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(baseapp.request_node_count(3), 3)
def test_iter_nodes(self):
with save_globals():
try:
object_ring = self.app.get_object_ring(None)
object_ring.max_more_nodes = 2
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 5)
object_ring.max_more_nodes = 6
self.app.request_node_count = lambda r: 20
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 9)
# zero error-limited primary nodes -> no handoff warnings
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
object_ring.max_more_nodes = 20
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [])
self.assertEqual(self.app.logger.get_increments(), [])
# one error-limited primary node -> one handoff warning
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
self.app._error_limiting = {} # clear out errors
set_node_errors(self.app, object_ring._devs[0], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (5)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count'])
# two error-limited primary nodes -> two handoff warnings
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
self.app._error_limiting = {} # clear out errors
for i in range(2):
set_node_errors(self.app, object_ring._devs[i], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (5)',), {}),
(('Handoff requested (6)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count',
'handoff_count'])
# all error-limited primary nodes -> four handoff warnings,
# plus a handoff-all metric
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 10
object_ring.set_replicas(4) # otherwise we run out of handoffs
self.app._error_limiting = {} # clear out errors
for i in range(4):
set_node_errors(self.app, object_ring._devs[i], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 10)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (7)',), {}),
(('Handoff requested (8)',), {}),
(('Handoff requested (9)',), {}),
(('Handoff requested (10)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count',
'handoff_count',
'handoff_count',
'handoff_count',
'handoff_all_count'])
finally:
object_ring.max_more_nodes = 0
def test_iter_nodes_calls_sort_nodes(self):
with mock.patch.object(self.app, 'sort_nodes') as sort_nodes:
object_ring = self.app.get_object_ring(None)
for node in self.app.iter_nodes(object_ring, 0):
pass
sort_nodes.assert_called_once_with(
object_ring.get_part_nodes(0), policy=None)
def test_iter_nodes_skips_error_limited(self):
with mock.patch.object(self.app, 'sort_nodes',
lambda n, *args, **kwargs: n):
object_ring = self.app.get_object_ring(None)
first_nodes = list(self.app.iter_nodes(object_ring, 0))
second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertIn(first_nodes[0], second_nodes)
self.app.error_limit(first_nodes[0], 'test')
second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertNotIn(first_nodes[0], second_nodes)
def test_iter_nodes_gives_extra_if_error_limited_inline(self):
object_ring = self.app.get_object_ring(None)
with mock.patch.object(self.app, 'sort_nodes',
lambda n, *args, **kwargs: n), \
mock.patch.object(self.app, 'request_node_count',
lambda r: 6), \
mock.patch.object(object_ring, 'max_more_nodes', 99):
first_nodes = list(self.app.iter_nodes(object_ring, 0))
second_nodes = []
for node in self.app.iter_nodes(object_ring, 0):
if not second_nodes:
self.app.error_limit(node, 'test')
second_nodes.append(node)
self.assertEqual(len(first_nodes), 6)
self.assertEqual(len(second_nodes), 7)
def test_iter_nodes_with_custom_node_iter(self):
object_ring = self.app.get_object_ring(None)
node_list = [dict(id=n, ip='1.2.3.4', port=n, device='D')
for n in range(10)]
with mock.patch.object(self.app, 'sort_nodes', lambda n, *args, **kwargs: n), \
mock.patch.object(self.app, 'request_node_count',
lambda r: 3):
got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list[:3], got_nodes)
with mock.patch.object(self.app, 'sort_nodes', lambda n, *args, **kwargs: n), \
mock.patch.object(self.app, 'request_node_count',
lambda r: 1000000):
got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list, got_nodes)
def test_best_response_sets_headers(self):
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object', headers=[{'X-Test': '1'},
{'X-Test': '2'},
{'X-Test': '3'}])
self.assertEqual(resp.headers['X-Test'], '1')
def test_best_response_sets_etag(self):
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object')
self.assertIsNone(resp.etag)
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object',
etag='68b329da9893e34099c7d8ad5cb9c940'
)
self.assertEqual(resp.etag, '68b329da9893e34099c7d8ad5cb9c940')
def test_proxy_passes_content_type(self):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
set_http_connect(200, 200, 200)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'x-application/test')
set_http_connect(200, 200, 200)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 0)
set_http_connect(200, 200, 200, slow=True)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 4)
def test_proxy_passes_content_length_on_head(self):
with save_globals():
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 0)
set_http_connect(200, 200, 200, slow=True)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 4)
def test_error_limiting(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l, *args, **kwargs: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
self.assert_status_map(controller.PUT, (200, 200, 200, 201, 201,
201), 503)
self.assert_status_map(controller.POST,
(200, 200, 200, 200, 200, 200, 202, 202,
202), 503)
self.assert_status_map(controller.DELETE,
(200, 200, 200, 204, 204, 204), 503)
self.app.error_suppression_interval = -300
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
200)
self.assertRaises(BaseException,
self.assert_status_map, controller.DELETE,
(200, 200, 200, 204, 204, 204), 503,
raise_exc=True)
def test_error_limiting_survives_ring_reload(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l, *args, **kwargs: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
# wipe out any state in the ring
for policy in POLICIES:
policy.object_ring = FakeRing(base_port=3000)
# and we still get an error, which proves that the
# error-limiting info survived a ring reload
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
def test_PUT_error_limiting(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l, *args, **kwargs: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
self.assert_status_map(controller.PUT, (200, 200, 503, 200, 200),
200)
# 2, not 1, because assert_status_map() calls the method twice
odevs = object_ring.devs
self.assertEqual(node_error_count(controller.app, odevs[0]), 2)
self.assertEqual(node_error_count(controller.app, odevs[1]), 0)
self.assertEqual(node_error_count(controller.app, odevs[2]), 0)
self.assertTrue(
node_last_error(controller.app, odevs[0]) is not None)
self.assertTrue(node_last_error(controller.app, odevs[1]) is None)
self.assertTrue(node_last_error(controller.app, odevs[2]) is None)
def test_PUT_error_limiting_last_node(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l, *args, **kwargs: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
self.assert_status_map(controller.PUT, (200, 200, 200, 200, 503),
200)
# 2, not 1, because assert_status_map() calls the method twice
odevs = object_ring.devs
self.assertEqual(node_error_count(controller.app, odevs[0]), 0)
self.assertEqual(node_error_count(controller.app, odevs[1]), 0)
self.assertEqual(node_error_count(controller.app, odevs[2]), 2)
self.assertTrue(node_last_error(controller.app, odevs[0]) is None)
self.assertTrue(node_last_error(controller.app, odevs[1]) is None)
self.assertTrue(
node_last_error(controller.app, odevs[2]) is not None)
def test_acc_or_con_missing_returns_404(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
self.app._error_limiting = {}
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
self.app.update_request(req)
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 200)
set_http_connect(404, 404, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 503, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 503, 503)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 200, 204, 204, 204)
# acct cont obj obj obj
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 204)
set_http_connect(200, 404, 404, 404)
# acct cont cont cont
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 503, 503, 503)
# acct cont cont cont
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(
self.app, dev, self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200)
# acct [isn't actually called since everything
# is error limited]
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(self.app, dev, 0, last_error=None)
for dev in self.app.container_ring.devs:
set_node_errors(self.app, dev,
self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200, 200)
# acct cont [isn't actually called since
# everything is error limited]
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
def test_PUT_POST_requires_container_exist(self):
with save_globals():
self.app.object_post_as_copy = False
self.app.memcache = FakeMemcacheReturnsNone()
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 404, 404, 404, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'text/plain'})
self.app.update_request(req)
resp = controller.POST(req)
self.assertEqual(resp.status_int, 404)
def test_bad_metadata(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-' + (
'a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'Content-Length': '0',
'X-Object-Meta-' + (
'a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
(constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
for x in range(constraints.MAX_META_COUNT):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < constraints.MAX_META_OVERALL_SIZE - 4 - \
constraints.MAX_META_VALUE_LENGTH:
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Object-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Object-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Object-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
@contextmanager
def controller_context(self, req, *args, **kwargs):
_v, account, container, obj = utils.split_path(req.path, 4, 4, True)
controller = ReplicatedObjectController(
self.app, account, container, obj)
self.app.update_request(req)
self.app.memcache.store = {}
with save_globals():
new_connect = set_http_connect(*args, **kwargs)
yield controller
unused_status_list = []
while True:
try:
unused_status_list.append(next(new_connect.code_iter))
except StopIteration:
break
if unused_status_list:
raise self.fail('UN-USED STATUS CODES: %r' %
unused_status_list)
@unpatch_policies
def test_chunked_put_bad_version(self):
# Check bad version
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v0 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_path(self):
# Check bad path
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET invalid HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_utf8(self):
# Check invalid utf-8
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a%80 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_path_no_controller(self):
# Check bad path, no controller
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_method(self):
# Check bad method
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('LICK /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 405'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_unhandled_exception(self):
# Check unhandled exception
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv, obj4srv, obj5srv, obj6srv) = _test_servers
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
orig_update_request = prosrv.update_request
def broken_update_request(*args, **kwargs):
raise Exception('fake: this should be printed')
prosrv.update_request = broken_update_request
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 500'
self.assertEqual(headers[:len(exp)], exp)
prosrv.update_request = orig_update_request
@unpatch_policies
def test_chunked_put_head_account(self):
# Head account, just a double check and really is here to test
# the part Application.log_request that 'enforces' a
# content_length on the response.
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('\r\nContent-Length: 0\r\n', headers)
@unpatch_policies
def test_chunked_put_utf8_all_the_way_down(self):
# Test UTF-8 Unicode all the way through the system
ustr = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xba \xe1\xbc\xb0\xce' \
'\xbf\xe1\xbd\xbb\xce\x87 \xcf\x84\xe1\xbd\xb0 \xcf' \
'\x80\xe1\xbd\xb1\xce\xbd\xcf\x84\xca\xbc \xe1\xbc' \
'\x82\xce\xbd \xe1\xbc\x90\xce\xbe\xe1\xbd\xb5\xce' \
'\xba\xce\xbf\xce\xb9 \xcf\x83\xce\xb1\xcf\x86\xe1' \
'\xbf\x86.Test'
ustr_short = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xbatest'
# Create ustr container
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# List account with ustr container (test plain)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
containers = fd.read().split('\n')
self.assertIn(ustr, containers)
# List account with ustr container (test json)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a?format=json HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
listing = json.loads(fd.read())
self.assertIn(ustr.decode('utf8'), [l['name'] for l in listing])
# List account with ustr container (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a?format=xml HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('<name>%s</name>' % ustr, fd.read())
# Create ustr object with ustr metadata in ustr container
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'X-Object-Meta-%s: %s\r\nContent-Length: 0\r\n\r\n' %
(quote(ustr), quote(ustr), quote(ustr_short),
quote(ustr)))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# List ustr container with ustr object (test plain)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
objects = fd.read().split('\n')
self.assertIn(ustr, objects)
# List ustr container with ustr object (test json)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?format=json HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' %
quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
listing = json.loads(fd.read())
self.assertEqual(listing[0]['name'], ustr.decode('utf8'))
# List ustr container with ustr object (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?format=xml HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' %
quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('<name>%s</name>' % ustr, fd.read())
# Retrieve ustr object with ustr metadata
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' %
(quote(ustr), quote(ustr)))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('\r\nX-Object-Meta-%s: %s\r\n' %
(quote(ustr_short).lower(), quote(ustr)), headers)
@unpatch_policies
def test_chunked_put_chunked_put(self):
# Do chunked object put
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# Also happens to assert that x-storage-token is taken as a
# replacement for x-auth-token.
fd.write('PUT /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\noh\r\n4\r\n hai\r\nf\r\n123456789abcdef\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Ensure we get what we put
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
self.assertEqual(body, 'oh hai123456789abcdef')
@unpatch_policies
def test_conditional_range_get(self):
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis,
obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# make a container
fd = sock.makefile()
fd.write('PUT /v1/a/con HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
exp = 'HTTP/1.1 201'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
# put an object in it
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/con/o HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: 10\r\n'
'Content-Type: text/plain\r\n'
'\r\n'
'abcdefghij\r\n')
fd.flush()
exp = 'HTTP/1.1 201'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
# request with both If-None-Match and Range
etag = md5("abcdefghij").hexdigest()
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/con/o HTTP/1.1\r\n' +
'Host: localhost\r\n' +
'Connection: close\r\n' +
'X-Storage-Token: t\r\n' +
'If-None-Match: "' + etag + '"\r\n' +
'Range: bytes=3-8\r\n' +
'\r\n')
fd.flush()
exp = 'HTTP/1.1 304'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
def test_mismatched_etags(self):
with save_globals():
# no etag supplied, object servers return success w/ diff values
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
set_http_connect(200, 201, 201, 201,
etags=[None,
'68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c941'])
resp = controller.PUT(req)
self.assertEqual(resp.status_int // 100, 5) # server error
# req supplies etag, object servers return 422 - mismatch
headers = {'Content-Length': '0',
'ETag': '68b329da9893e34099c7d8ad5cb9c940'}
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
set_http_connect(200, 422, 422, 503,
etags=['68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c941',
None,
None])
resp = controller.PUT(req)
self.assertEqual(resp.status_int // 100, 4) # client error
def test_response_get_accept_ranges_header(self):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.GET(req)
self.assertIn('accept-ranges', resp.headers)
self.assertEqual(resp.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertIn('accept-ranges', resp.headers)
self.assertEqual(resp.headers['accept-ranges'], 'bytes')
def test_GET_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.GET(req)
self.assertTrue(called[0])
def _check_GET_respects_read_affinity(self, conf, policy, expected_nodes):
actual_nodes = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
actual_nodes.append(ipaddr)
# mock shuffle to be a no-op to ensure that the only way nodes would
# not be used in ring order is if affinity is respected.
with mock.patch('swift.proxy.server.shuffle', lambda x: x):
app = proxy_server.Application(
conf, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
with save_globals():
object_ring = app.get_object_ring(policy)
object_ring.max_more_nodes = 100
controller = \
ReplicatedObjectController(
app, 'a', 'c', 'o.jpg')
# requests go to acc, con, obj, obj, obj
set_http_connect(200, 200, 404, 404, 200,
give_connect=test_connect)
req = Request.blank(
'/v1/a/c/o.jpg',
headers={'X-Backend-Storage-Policy-Index': str(policy)})
app.memcache.store = {}
res = controller.GET(req)
self.assertTrue(res.status.startswith('200 '))
self.assertEqual(3, len(actual_nodes))
self.assertEqual(expected_nodes, actual_nodes)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing()),
StoragePolicy(1, 'one', False, object_ring=FakeRing())])
def test_GET_respects_read_affinity(self):
# nodes in fake ring order have r0z0, r1z1, r0z2
# Check default conf via proxy server conf
conf = {'read_affinity': 'r0z2=1, r1=2',
'sorting_method': 'affinity'}
expected_nodes = ['10.0.0.2', '10.0.0.1', '10.0.0.0']
self._check_GET_respects_read_affinity(conf, 0, expected_nodes)
# policy 0 and policy 1 have conf via per policy conf section
conf = {
'read_affinity': '',
'sorting_method': 'shuffle',
'policy_config': {
'0': {'read_affinity': 'r1z1=1, r0z2=2',
'sorting_method': 'affinity'},
'1': {'read_affinity': 'r0z2=1, r0z0=2',
'sorting_method': 'affinity'}
}
}
expected_nodes = ['10.0.0.1', '10.0.0.2', '10.0.0.0']
self._check_GET_respects_read_affinity(conf, 0, expected_nodes)
expected_nodes = ['10.0.0.2', '10.0.0.0', '10.0.0.1']
self._check_GET_respects_read_affinity(conf, 1, expected_nodes)
# policy 0 conf via per policy conf section overrides proxy server conf
conf = {
'read_affinity': 'r1z1=1, r0z2=2',
'sorting_method': 'affinity',
'policy_config': {
'0': {'read_affinity': 'r0z2=1, r0=2',
'sorting_method': 'affinity'}
}
}
expected_nodes = ['10.0.0.2', '10.0.0.0', '10.0.0.1']
self._check_GET_respects_read_affinity(conf, 0, expected_nodes)
def test_HEAD_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.HEAD(req)
self.assertTrue(called[0])
def test_POST_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
self.app.object_post_as_copy = False
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_PUT_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
def test_POST_converts_delete_after_to_delete_at(self):
with save_globals():
self.app.object_post_as_copy = False
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
self.app.memcache.store = {}
orig_time = time.time
try:
t = time.time()
time.time = lambda: t
req = Request.blank('/v1/a/c/o', {},
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '60'})
self.app.update_request(req)
res = controller.POST(req)
self.assertEqual(res.status, '202 Fake')
self.assertEqual(req.headers.get('x-delete-at'),
str(int(t + 60)))
finally:
time.time = orig_time
@unpatch_policies
def test_leak_1(self):
_request_instances = weakref.WeakKeyDictionary()
_orig_init = Request.__init__
def request_init(self, *args, **kwargs):
_orig_init(self, *args, **kwargs)
_request_instances[self] = None
with mock.patch.object(Request, "__init__", request_init):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
obj_len = prosrv.client_chunk_size * 2
# PUT test file
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/test_leak_1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Auth-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (obj_len, 'a' * obj_len))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Remember Request instance count, make sure the GC is run for
# pythons without reference counting.
for i in range(4):
sleep(0) # let eventlet do its thing
gc.collect()
else:
sleep(0)
before_request_instances = len(_request_instances)
# GET test file, but disconnect early
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/test_leak_1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Auth-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
fd.read(1)
sock.fd._sock.close()
# Make sure the GC is run again for pythons without reference
# counting
for i in range(4):
sleep(0) # let eventlet do its thing
gc.collect()
else:
sleep(0)
self.assertEqual(
before_request_instances, len(_request_instances))
def test_OPTIONS(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
def my_empty_container_info(*args):
return {}
controller.container_info = my_empty_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
self.assertNotIn('Access-Control-Allow-Origin', resp.headers)
self.assertNotIn('Vary', resp.headers)
def my_empty_origin_container_info(*args):
return {'cors': {'allow_origin': None}}
controller.container_info = my_empty_origin_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
self.assertNotIn('Access-Control-Allow-Origin', resp.headers)
self.assertNotIn('Vary', resp.headers)
def my_container_info(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar:8080 https://foo.bar',
'max_age': '999',
}
}
controller.container_info = my_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
'https://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('Origin', resp.headers.get('vary'))
self.assertEqual(
sorted(resp.headers['access-control-allow-methods']
.split(', ')),
sorted('OPTIONS GET POST PUT DELETE HEAD'.split()))
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
self.assertNotIn('Access-Control-Allow-Origin', resp.headers)
self.assertNotIn('Vary', resp.headers)
req = Request.blank('/v1/a/c/o.jpg', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
sorted(resp.headers['Allow'].split(', ')),
sorted('OPTIONS GET POST PUT DELETE HEAD'.split()))
self.assertNotIn('Access-Control-Allow-Origin', resp.headers)
self.assertNotIn('Vary', resp.headers)
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
self.assertNotIn('Access-Control-Allow-Origin', resp.headers)
self.assertNotIn('Vary', resp.headers)
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
controller.app.cors_allow_origin = ['http://foo.bar', ]
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('Origin', resp.headers.get('vary'))
def my_container_info_wildcard(*args):
return {
'cors': {
'allow_origin': '*',
'max_age': '999',
}
}
controller.container_info = my_container_info_wildcard
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('*', resp.headers['access-control-allow-origin'])
self.assertNotIn('Vary', resp.headers)
self.assertEqual(
sorted(resp.headers['access-control-allow-methods']
.split(', ')),
sorted('OPTIONS GET POST PUT DELETE HEAD'.split()))
self.assertEqual('999', resp.headers['access-control-max-age'])
def _get_CORS_response(self, container_cors, strict_mode, object_get=None):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
'cors': container_cors
}
controller.container_info = stubContainerInfo
controller.app.strict_cors_mode = strict_mode
def objectGET(controller, req):
return Response(headers={
'X-Object-Meta-Color': 'red',
'X-Super-Secret': 'hush',
})
mock_object_get = object_get or objectGET
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(mock_object_get)(controller, req)
return resp
def test_CORS_valid_non_strict(self):
# test expose_headers to non-allowed origins
container_cors = {'allow_origin': 'http://not.foo.bar',
'expose_headers': 'X-Object-Meta-Color '
'X-Object-Meta-Color-Ex'}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=False)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('red', resp.headers['x-object-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set([
'cache-control', 'content-language', 'content-type', 'expires',
'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id',
'x-openstack-request-id', 'x-object-meta-color',
'x-object-meta-color-ex'])
self.assertEqual(expected_exposed, exposed)
# test allow_origin *
container_cors = {'allow_origin': '*'}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=False)
self.assertEqual(200, resp.status_int)
self.assertEqual('*',
resp.headers['access-control-allow-origin'])
# test allow_origin empty
container_cors = {'allow_origin': ''}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=False)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
def test_CORS_valid_strict(self):
# test expose_headers to non-allowed origins
container_cors = {'allow_origin': 'http://not.foo.bar',
'expose_headers': 'X-Object-Meta-Color '
'X-Object-Meta-Color-Ex'}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=True)
self.assertEqual(200, resp.status_int)
self.assertNotIn('access-control-expose-headers', resp.headers)
self.assertNotIn('access-control-allow-origin', resp.headers)
# test allow_origin *
container_cors = {'allow_origin': '*'}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=True)
self.assertEqual(200, resp.status_int)
self.assertEqual('*',
resp.headers['access-control-allow-origin'])
self.assertEqual('red', resp.headers['x-object-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set([
'cache-control', 'content-language', 'content-type', 'expires',
'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id',
'x-openstack-request-id', 'x-object-meta-color'])
self.assertEqual(expected_exposed, exposed)
# test allow_origin empty
container_cors = {'allow_origin': ''}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=True)
self.assertNotIn('access-control-expose-headers', resp.headers)
self.assertNotIn('access-control-allow-origin', resp.headers)
def test_CORS_valid_with_obj_headers(self):
container_cors = {'allow_origin': 'http://foo.bar'}
def objectGET(controller, req):
return Response(headers={
'X-Object-Meta-Color': 'red',
'X-Super-Secret': 'hush',
'Access-Control-Allow-Origin': 'http://obj.origin',
'Access-Control-Expose-Headers': 'x-trans-id'
})
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=True,
object_get=objectGET)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://obj.origin',
resp.headers['access-control-allow-origin'])
self.assertEqual('x-trans-id',
resp.headers['access-control-expose-headers'])
def test_CORS_expose_headers(self):
default_expected_exposed = set([
'cache-control', 'content-language', 'content-type', 'expires',
'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id',
'x-openstack-request-id'])
def objectGET(controller, req):
return Response(headers={
'X-Custom-Operator': 'hush',
'X-Custom-User': 'hush',
})
# test default expose_headers
self.app.cors_expose_headers = []
container_cors = {'allow_origin': 'http://foo.bar'}
resp = self._get_CORS_response(container_cors=container_cors,
strict_mode=False, object_get=objectGET)
self.assertEqual(200, resp.status_int)
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
self.assertEqual(default_expected_exposed, exposed)
# test operator expose_headers
self.app.cors_expose_headers = ['x-custom-operator', ]
container_cors = {'allow_origin': 'http://foo.bar'}
resp = self._get_CORS_response(container_cors=container_cors,
strict_mode=False, object_get=objectGET)
self.assertEqual(200, resp.status_int)
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
self.assertEqual(default_expected_exposed | set(['x-custom-operator']),
exposed)
# test user expose_headers
self.app.cors_expose_headers = []
container_cors = {'allow_origin': 'http://foo.bar',
'expose_headers': 'x-custom-user'}
resp = self._get_CORS_response(container_cors=container_cors,
strict_mode=False, object_get=objectGET)
self.assertEqual(200, resp.status_int)
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
self.assertEqual(default_expected_exposed | set(['x-custom-user']),
exposed)
# test user and operator expose_headers
self.app.cors_expose_headers = ['x-custom-operator', ]
container_cors = {'allow_origin': 'http://foo.bar',
'expose_headers': 'x-custom-user'}
resp = self._get_CORS_response(container_cors=container_cors,
strict_mode=False, object_get=objectGET)
self.assertEqual(200, resp.status_int)
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
self.assertEqual(default_expected_exposed | set(['x-custom-user',
'x-custom-operator']),
exposed)
def _gather_x_container_headers(self, controller_call, req, *connect_args,
**kwargs):
header_list = kwargs.pop('header_list', ['X-Container-Device',
'X-Container-Host',
'X-Container-Partition'])
seen_headers = []
def capture_headers(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
captured = {}
for header in header_list:
captured[header] = headers.get(header)
seen_headers.append(captured)
with save_globals():
self.app.allow_account_management = True
set_http_connect(*connect_args, give_connect=capture_headers,
**kwargs)
resp = controller_call(req)
self.assertEqual(2, resp.status_int // 100) # sanity check
# don't care about the account/container HEADs, so chuck
# the first two requests
return sorted(seen_headers[2:],
key=lambda d: d.get(header_list[0]) or 'z')
def test_PUT_x_container_headers_with_equal_replicas(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_PUT_x_container_headers_with_fewer_container_replicas(self):
self.app.container_ring.set_replicas(2)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'}])
def test_PUT_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_POST_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
self.app.object_post_as_copy = False
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/stuff'})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.POST, req,
200, 200, 200, 200, 200) # HEAD HEAD POST POST POST
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_DELETE_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'application/stuff'})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.DELETE, req,
200, 200, 200, 200, 200) # HEAD HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}
])
@mock.patch('time.time', new=lambda: STATIC_TIME)
def test_PUT_x_delete_at_with_fewer_container_replicas(self):
self.app.container_ring.set_replicas(2)
delete_at_timestamp = int(time.time()) + 100000
delete_at_container = utils.get_expirer_container(
delete_at_timestamp, self.app.expiring_objects_container_divisor,
'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': '0',
'X-Delete-At': str(delete_at_timestamp)})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
header_list=('X-Delete-At-Host', 'X-Delete-At-Device',
'X-Delete-At-Partition', 'X-Delete-At-Container'))
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': None,
'X-Delete-At-Container': None,
'X-Delete-At-Partition': None,
'X-Delete-At-Device': None}
])
@mock.patch('time.time', new=lambda: STATIC_TIME)
def test_PUT_x_delete_at_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
self.app.expiring_objects_account = 'expires'
self.app.expiring_objects_container_divisor = 60
delete_at_timestamp = int(time.time()) + 100000
delete_at_container = utils.get_expirer_container(
delete_at_timestamp, self.app.expiring_objects_container_divisor,
'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': 0,
'X-Delete-At': str(delete_at_timestamp)})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
header_list=('X-Delete-At-Host', 'X-Delete-At-Device',
'X-Delete-At-Partition', 'X-Delete-At-Container'))
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda,sdd'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': '10.0.0.2:1002',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdc'}
])
class BaseTestECObjectController(BaseTestObjectController):
def test_PUT_ec(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = 'abCD' * 10 # small, so we don't get multiple EC stripes
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/o1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (self.ec_policy.name, md5(obj).hexdigest(),
len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
ecd = self.ec_policy.pyeclib_driver
expected_pieces = set(ecd.encode(obj))
# go to disk to make sure it's there and all erasure-coded
partition, nodes = self.ec_policy.object_ring.get_nodes(
'a', self.ec_policy.name, 'o1')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[self.ec_policy]
got_pieces = set()
got_indices = set()
got_durable = []
for node_index, node in enumerate(nodes):
df = df_mgr.get_diskfile(node['device'], partition,
'a', self.ec_policy.name, 'o1',
policy=self.ec_policy)
with df.open():
meta = df.get_metadata()
contents = ''.join(df.reader())
got_pieces.add(contents)
lmeta = dict((k.lower(), v) for k, v in meta.items())
got_indices.add(
lmeta['x-object-sysmeta-ec-frag-index'])
self.assertEqual(
lmeta['x-object-sysmeta-ec-etag'],
md5(obj).hexdigest())
self.assertEqual(
lmeta['x-object-sysmeta-ec-content-length'],
str(len(obj)))
self.assertEqual(
lmeta['x-object-sysmeta-ec-segment-size'],
'4096')
self.assertEqual(
lmeta['x-object-sysmeta-ec-scheme'],
'%s 2+1' % DEFAULT_TEST_EC_TYPE)
self.assertEqual(
lmeta['etag'],
md5(contents).hexdigest())
# check presence for a durable data file for the timestamp
durable_file = (
utils.Timestamp(df.timestamp).internal +
'#%s' % lmeta['x-object-sysmeta-ec-frag-index'] +
'#d.data')
durable_file = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(self.ec_policy),
partition, hash_path('a', self.ec_policy.name, 'o1')),
durable_file)
if os.path.isfile(durable_file):
got_durable.append(True)
self.assertEqual(expected_pieces, got_pieces)
self.assertEqual(set(('0', '1', '2')), got_indices)
# verify at least 2 puts made it all the way to the end of 2nd
# phase, ie at least 2 durable statuses were written
num_durable_puts = sum(d is True for d in got_durable)
self.assertGreaterEqual(num_durable_puts, 2)
def test_PUT_ec_multiple_segments(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
pyeclib_header_size = len(self.ec_policy.pyeclib_driver.encode("")[0])
segment_size = self.ec_policy.ec_segment_size
# Big enough to have multiple segments. Also a multiple of the
# segment size to get coverage of that path too.
obj = 'ABC' * segment_size
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/o2 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (self.ec_policy.name, len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# it's a 2+1 erasure code, so each fragment archive should be half
# the length of the object, plus three inline pyeclib metadata
# things (one per segment)
expected_length = (len(obj) / 2 + pyeclib_header_size * 3)
partition, nodes = self.ec_policy.object_ring.get_nodes(
'a', self.ec_policy.name, 'o2')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[self.ec_policy]
got_durable = []
fragment_archives = []
for node in nodes:
df = df_mgr.get_diskfile(
node['device'], partition, 'a',
self.ec_policy.name, 'o2', policy=self.ec_policy)
with df.open():
meta = df.get_metadata()
contents = ''.join(df.reader())
fragment_archives.append(contents)
self.assertEqual(len(contents), expected_length)
durable_file = (
utils.Timestamp(df.timestamp).internal +
'#%s' % meta['X-Object-Sysmeta-Ec-Frag-Index'] +
'#d.data')
durable_file = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(self.ec_policy),
partition, hash_path('a', self.ec_policy.name, 'o2')),
durable_file)
if os.path.isfile(durable_file):
got_durable.append(True)
# Verify that we can decode each individual fragment and that they
# are all the correct size
fragment_size = self.ec_policy.fragment_size
nfragments = int(
math.ceil(float(len(fragment_archives[0])) / fragment_size))
for fragment_index in range(nfragments):
fragment_start = fragment_index * fragment_size
fragment_end = (fragment_index + 1) * fragment_size
try:
frags = [fa[fragment_start:fragment_end]
for fa in fragment_archives]
seg = self.ec_policy.pyeclib_driver.decode(frags)
except ECDriverError:
self.fail("Failed to decode fragments %d; this probably "
"means the fragments are not the sizes they "
"should be" % fragment_index)
segment_start = fragment_index * segment_size
segment_end = (fragment_index + 1) * segment_size
self.assertEqual(seg, obj[segment_start:segment_end])
# verify at least 2 puts made it all the way to the end of 2nd
# phase, ie at least 2 .durable statuses were written
num_durable_puts = sum(d is True for d in got_durable)
self.assertGreaterEqual(num_durable_puts, 2)
def test_PUT_ec_object_etag_mismatch(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = '90:6A:02:60:B1:08-96da3e706025537fc42464916427727e'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/o3 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (self.ec_policy.name,
md5('something else').hexdigest(),
len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 422'
self.assertEqual(headers[:len(exp)], exp)
# nothing should have made it to disk on the object servers
partition, nodes = prosrv.get_object_ring(
int(self.ec_policy)).get_nodes('a', self.ec_policy.name, 'o3')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[self.ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', self.ec_policy.name, 'o3',
policy=self.ec_policy)
self.assertRaises(DiskFileNotExist, df.open)
def test_PUT_ec_fragment_archive_etag_mismatch(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
# Cause a hash mismatch by feeding one particular MD5 hasher some
# extra data. The goal here is to get exactly more than one of the
# hashers in an object server.
count = (
self.ec_policy.object_ring.replica_count - self.ec_policy.ec_ndata)
countdown = [count]
def busted_md5_constructor(initial_str=""):
hasher = md5(initial_str)
if countdown[0] > 0:
hasher.update('wrong')
countdown[0] -= 1
return hasher
obj = 'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
with mock.patch('swift.obj.server.md5', busted_md5_constructor):
fd = sock.makefile()
fd.write('PUT /v1/a/%s/pimento HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (self.ec_policy.name, md5(obj).hexdigest(),
len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 503' # no quorum
self.assertEqual(headers[:len(exp)], exp)
# replica count - 1 of the fragment archives should have
# landed on disk
partition, nodes = prosrv.get_object_ring(
int(self.ec_policy)).get_nodes('a', self.ec_policy.name, 'pimento')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[self.ec_policy]
found = 0
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', self.ec_policy.name, 'pimento',
policy=self.ec_policy)
try:
# diskfile open won't succeed because no durable was written,
# so look under the hood for data files.
files = os.listdir(df._datadir)
if len(files) > 0:
# Although the third fragment archive hasn't landed on
# disk, the directory df._datadir is pre-maturely created
# and is empty when we use O_TMPFILE + linkat()
num_data_files = \
len([f for f in files if f.endswith('.data')])
self.assertEqual(1, num_data_files)
found += 1
except OSError:
pass
self.assertEqual(found, self.ec_policy.ec_ndata)
def test_PUT_ec_fragment_quorum_archive_etag_mismatch(self):
self.put_container("ec", "ec-con")
def busted_md5_constructor(initial_str=""):
hasher = md5(initial_str)
hasher.update('wrong')
return hasher
obj = 'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
call_count = [0]
def mock_committer(self):
call_count[0] += 1
commit_confirmation = \
'swift.proxy.controllers.obj.MIMEPutter.send_commit_confirmation'
with mock.patch('swift.obj.server.md5', busted_md5_constructor), \
mock.patch(commit_confirmation, mock_committer):
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/quorum HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 503' # no quorum
self.assertEqual(headers[:len(exp)], exp)
# Don't send commit to object-server if quorum responses consist of 4xx
self.assertEqual(0, call_count[0])
# no fragment archives should have landed on disk
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'quorum')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[self.ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'quorum',
policy=self.ec_policy)
if os.path.exists(df._datadir):
self.assertFalse(os.listdir(df._datadir)) # should be empty
def test_PUT_ec_fragment_quorum_bad_request(self):
self.put_container("ec", "ec-con")
obj = 'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
call_count = [0]
def mock_committer(self):
call_count[0] += 1
read_footer = \
'swift.obj.server.ObjectController._read_metadata_footer'
commit_confirmation = \
'swift.proxy.controllers.obj.MIMEPutter.send_commit_confirmation'
with mock.patch(read_footer) as read_footer_call, \
mock.patch(commit_confirmation, mock_committer):
# Emulate missing footer MIME doc in all object-servers
read_footer_call.side_effect = HTTPBadRequest(
body="couldn't find footer MIME doc")
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/quorum HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
# Don't show a result of the bad conversation between proxy-server
# and object-server
exp = 'HTTP/1.1 503'
self.assertEqual(headers[:len(exp)], exp)
# Don't send commit to object-server if quorum responses consist of 4xx
self.assertEqual(0, call_count[0])
# no fragment archives should have landed on disk
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'quorum')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[self.ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'quorum',
policy=self.ec_policy)
if os.path.exists(df._datadir):
self.assertFalse(os.listdir(df._datadir)) # should be empty
def test_PUT_ec_if_none_match(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = 'ananepionic-lepidophyllous-ropewalker-neglectful'
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/inm HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (self.ec_policy.name, md5(obj).hexdigest(),
len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/inm HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'If-None-Match: *\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (self.ec_policy.name, md5(obj).hexdigest(),
len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
def test_GET_ec(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = '0123456' * 11 * 17
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'X-Object-Meta-Color: chartreuse\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (self.ec_policy.name, len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
self.assertEqual(gotten_obj, obj)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_conditional_GET_ec(self):
# sanity
self.assertEqual('erasure_coding', self.ec_policy.policy_type)
self._test_conditional_GET(self.ec_policy)
def test_GET_ec_big(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
self.put_container(self.ec_policy.name, self.ec_policy.name)
# our EC segment size is 4 KiB, so this is multiple (3) segments;
# we'll verify that with a sanity check
obj = 'a moose once bit my sister' * 400
self.assertGreater(
len(obj), self.ec_policy.ec_segment_size * 2,
"object is too small for proper testing")
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/big-obj-get HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (self.ec_policy.name, len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/big-obj-get HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
# This may look like a redundant test, but when things fail, this
# has a useful failure message while the subsequent one spews piles
# of garbage and demolishes your terminal's scrollback buffer.
self.assertEqual(len(gotten_obj), len(obj))
self.assertEqual(gotten_obj, obj)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_GET_ec_failure_handling(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = 'look at this object; it is simply amazing ' * 500
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/crash-test-dummy HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (self.ec_policy.name, len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def explodey_iter(inner_iter):
yield next(inner_iter)
raise Exception("doom ba doom")
def explodey_doc_parts_iter(inner_iter_iter):
try:
for item in inner_iter_iter:
item = item.copy() # paranoia about mutable data
item['part_iter'] = explodey_iter(item['part_iter'])
yield item
except GeneratorExit:
inner_iter_iter.close()
raise
real_ec_app_iter = swift.proxy.controllers.obj.ECAppIter
def explodey_ec_app_iter(path, policy, iterators, *a, **kw):
# Each thing in `iterators` here is a document-parts iterator,
# and we want to fail after getting a little into each part.
#
# That way, we ensure we've started streaming the response to
# the client when things go wrong.
return real_ec_app_iter(
path, policy,
[explodey_doc_parts_iter(i) for i in iterators],
*a, **kw)
with mock.patch("swift.proxy.controllers.obj.ECAppIter",
explodey_ec_app_iter):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/crash-test-dummy HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
gotten_obj = ''
try:
# don't hang the test run when this fails
with Timeout(300):
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
except Timeout:
self.fail("GET hung when connection failed")
# Ensure we failed partway through, otherwise the mocks could
# get out of date without anyone noticing
self.assertTrue(0 < len(gotten_obj) < len(obj))
def test_HEAD_ec(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = '0123456' * 11 * 17
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/go-head-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'X-Object-Meta-Color: chartreuse\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (self.ec_policy.name, len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/%s/go-head-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_GET_ec_404(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
self.put_container(self.ec_policy.name, self.ec_policy.name)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/yes-we-have-no-bananas HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_HEAD_ec_404(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
self.put_container(self.ec_policy.name, self.ec_policy.name)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/%s/yes-we-have-no-bananas HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_reload_ring_ec(self):
self.put_container("ec", "ec-con")
orig_rtime = self.ec_policy.object_ring._rtime
orig_replica_count = self.ec_policy.object_ring.replica_count
# save original file as back up
copyfile(self.ec_policy.object_ring.serialized_path,
self.ec_policy.object_ring.serialized_path + '.bak')
try:
# overwrite with 2 replica, 2 devices ring
obj_devs = []
obj_devs.append(
{'port': _test_sockets[-3].getsockname()[1],
'device': 'sdg1'})
obj_devs.append(
{'port': _test_sockets[-2].getsockname()[1],
'device': 'sdh1'})
write_fake_ring(self.ec_policy.object_ring.serialized_path,
*obj_devs)
def get_ring_reloaded_response(method):
# force to reload at the request
self.ec_policy.object_ring._rtime = 0
trans_data = ['%s /v1/a/ec-con/o2 HTTP/1.1\r\n' % method,
'Host: localhost\r\n',
'Connection: close\r\n',
'X-Storage-Token: t\r\n']
if method == 'PUT':
# small, so we don't get multiple EC stripes
obj = 'abCD' * 10
extra_trans_data = [
'Etag: "%s"\r\n' % md5(obj).hexdigest(),
'Content-Length: %d\r\n' % len(obj),
'Content-Type: application/octet-stream\r\n',
'\r\n%s' % obj
]
trans_data.extend(extra_trans_data)
else:
trans_data.append('\r\n')
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write(''.join(trans_data))
fd.flush()
headers = readuntil2crlfs(fd)
# use older ring with rollbacking
return headers
for method in ('PUT', 'HEAD', 'GET', 'POST', 'DELETE'):
headers = get_ring_reloaded_response(method)
exp = 'HTTP/1.1 20'
self.assertEqual(headers[:len(exp)], exp)
# proxy didn't load newest ring, use older one
self.assertEqual(orig_replica_count,
self.ec_policy.object_ring.replica_count)
if method == 'POST':
# Take care fast post here!
orig_post_as_copy = getattr(
_test_servers[0], 'object_post_as_copy', None)
try:
_test_servers[0].object_post_as_copy = False
with mock.patch.object(
_test_servers[0],
'object_post_as_copy', False):
headers = get_ring_reloaded_response(method)
finally:
if orig_post_as_copy is None:
del _test_servers[0].object_post_as_copy
else:
_test_servers[0].object_post_as_copy = \
orig_post_as_copy
exp = 'HTTP/1.1 20'
self.assertEqual(headers[:len(exp)], exp)
# sanity
self.assertEqual(orig_replica_count,
self.ec_policy.object_ring.replica_count)
finally:
self.ec_policy.object_ring._rtime = orig_rtime
os.rename(self.ec_policy.object_ring.serialized_path + '.bak',
self.ec_policy.object_ring.serialized_path)
def test_ec_client_disconnect(self):
prolis = _test_sockets[0]
# create connection
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# create container
fd.write('PUT /v1/a/%s-discon HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: %s\r\n'
'\r\n' % (self.ec_policy.name, self.ec_policy.name))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
# create object
obj = 'a' * 4 * 64 * 2 ** 10
fd.write('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n%s' % (self.ec_policy.name, len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
class WrappedTimeout(ChunkWriteTimeout):
def __enter__(self):
timeouts[self] = traceback.extract_stack()
return super(WrappedTimeout, self).__enter__()
def __exit__(self, typ, value, tb):
timeouts[self] = None
return super(WrappedTimeout, self).__exit__(typ, value, tb)
timeouts = {}
with mock.patch('swift.proxy.controllers.base.ChunkWriteTimeout',
WrappedTimeout):
with mock.patch.object(_test_servers[0], 'client_timeout', new=5):
# get object
fd.write('GET /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
# read most of the object, and disconnect
fd.read(10)
sock.fd._sock.close()
self._sleep_enough(
lambda:
_test_servers[0].logger.get_lines_for_level('warning'))
# check for disconnect message!
expected = ['Client disconnected on read'] * 2
self.assertEqual(
_test_servers[0].logger.get_lines_for_level('warning'),
expected)
# check that no coro was left waiting to write
self.assertTrue(timeouts) # sanity - WrappedTimeout did get called
missing_exits = [tb for tb in timeouts.values() if tb is not None]
self.assertFalse(
missing_exits, 'Failed to exit all ChunkWriteTimeouts.\n' +
''.join(['No exit from ChunkWriteTimeout entered at:\n' +
''.join(traceback.format_list(tb)[:-1])
for tb in missing_exits]))
# and check that the ChunkWriteTimeouts did not raise Exceptions
self.assertFalse(_test_servers[0].logger.get_lines_for_level('error'))
def test_ec_client_put_disconnect(self):
prolis = _test_sockets[0]
# create connection
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# create container
fd.write('PUT /v1/a/%s-discon HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: %s\r\n'
'\r\n' % (self.ec_policy.name, self.ec_policy.name))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
# create object
obj = 'a' * 4 * 64 * 2 ** 10
fd.write('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n%s' % (self.ec_policy.name, len(obj), obj[:-10]))
fd.flush()
fd.close()
sock.close()
# sleep to trampoline enough
condition = \
lambda: _test_servers[0].logger.get_lines_for_level('warning')
self._sleep_enough(condition)
expected = ['Client disconnected without sending enough data']
warns = _test_servers[0].logger.get_lines_for_level('warning')
self.assertEqual(expected, warns)
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
class TestECObjectController(BaseTestECObjectController, unittest.TestCase):
def setUp(self):
self.ec_policy = POLICIES[3]
super(TestECObjectController, self).setUp()
class TestECDuplicationObjectController(
BaseTestECObjectController, unittest.TestCase):
def setUp(self):
self.ec_policy = POLICIES[4]
super(TestECDuplicationObjectController, self).setUp()
class TestECMismatchedFA(unittest.TestCase):
def tearDown(self):
prosrv = _test_servers[0]
# don't leak error limits and poison other tests
prosrv._error_limiting = {}
def test_mixing_different_objects_fragment_archives(self):
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv, obj4srv, obj5srv, obj6srv) = _test_servers
ec_policy = POLICIES[3]
@public
def bad_disk(req):
return Response(status=507, body="borken")
ensure_container = Request.blank(
"/v1/a/ec-crazytown",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Storage-Policy": "ec", "X-Auth-Token": "t"})
resp = ensure_container.get_response(prosrv)
self.assertIn(resp.status_int, (201, 202))
obj1 = "first version..."
put_req1 = Request.blank(
"/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req1.body = obj1
obj2 = u"versión segundo".encode("utf-8")
put_req2 = Request.blank(
"/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req2.body = obj2
# pyeclib has checks for unequal-length; we don't want to trip those
self.assertEqual(len(obj1), len(obj2))
# Server obj1 will have the first version of the object (obj2 also
# gets it, but that gets stepped on later)
prosrv._error_limiting = {}
with mock.patch.object(obj3srv, 'PUT', bad_disk), \
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum'):
type(ec_policy).quorum = mock.PropertyMock(return_value=2)
resp = put_req1.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# Servers obj2 and obj3 will have the second version of the object.
prosrv._error_limiting = {}
with mock.patch.object(obj1srv, 'PUT', bad_disk), \
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum'):
type(ec_policy).quorum = mock.PropertyMock(return_value=2)
resp = put_req2.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# A GET that only sees 1 fragment archive should fail
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj1srv, 'GET', bad_disk), \
mock.patch.object(obj2srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
# A GET that sees 2 matching FAs will work
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj1srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, obj2)
# A GET that sees 2 mismatching FAs will fail
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj2srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
def test_mixing_different_objects_fragment_archives_with_dup_factor(self):
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv, obj4srv, obj5srv, obj6srv) = _test_servers
ec_policy = POLICIES[4]
@public
def bad_disk(req):
return Response(status=507, body="borken")
ensure_container = Request.blank(
"/v1/a/ec-dup-crazytown",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Storage-Policy": "ec-dup", "X-Auth-Token": "t"})
resp = ensure_container.get_response(prosrv)
self.assertIn(resp.status_int, (201, 202))
obj1 = "first version..."
put_req1 = Request.blank(
"/v1/a/ec-dup-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req1.body = obj1
obj2 = u"versión segundo".encode("utf-8")
put_req2 = Request.blank(
"/v1/a/ec-dup-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req2.body = obj2
# pyeclib has checks for unequal-length; we don't want to trip those
self.assertEqual(len(obj1), len(obj2))
# First subset of object server will have the first version of the
# object
prosrv._error_limiting = {}
with mock.patch.object(obj4srv, 'PUT', bad_disk), \
mock.patch.object(obj5srv, 'PUT', bad_disk), \
mock.patch.object(obj6srv, 'PUT', bad_disk), \
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum'):
type(ec_policy).quorum = mock.PropertyMock(return_value=3)
resp = put_req1.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# Second subset will have the second version of the object.
prosrv._error_limiting = {}
with mock.patch.object(obj1srv, 'PUT', bad_disk), \
mock.patch.object(obj2srv, 'PUT', bad_disk), \
mock.patch.object(obj3srv, 'PUT', bad_disk), \
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum'):
type(ec_policy).quorum = mock.PropertyMock(return_value=3)
resp = put_req2.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# A GET that only sees 1 fragment archive should fail
get_req = Request.blank("/v1/a/ec-dup-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj2srv, 'GET', bad_disk), \
mock.patch.object(obj3srv, 'GET', bad_disk), \
mock.patch.object(obj4srv, 'GET', bad_disk), \
mock.patch.object(obj5srv, 'GET', bad_disk), \
mock.patch.object(obj6srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
# A GET that sees 2 matching FAs will work
get_req = Request.blank("/v1/a/ec-dup-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj1srv, 'GET', bad_disk), \
mock.patch.object(obj2srv, 'GET', bad_disk), \
mock.patch.object(obj3srv, 'GET', bad_disk), \
mock.patch.object(obj6srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, obj2)
# A GET that sees 2 mismatching FAs will fail
get_req = Request.blank("/v1/a/ec-dup-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj2srv, 'GET', bad_disk), \
mock.patch.object(obj3srv, 'GET', bad_disk), \
mock.patch.object(obj4srv, 'GET', bad_disk), \
mock.patch.object(obj6srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
class TestECGets(unittest.TestCase):
def tearDown(self):
prosrv = _test_servers[0]
# don't leak error limits and poison other tests
prosrv._error_limiting = {}
def _setup_nodes_and_do_GET(self, objs, node_state):
"""
A helper method that creates object fragments, stashes them in temp
dirs, and then moves selected fragments back into the hash_dirs on each
node according to a specified desired node state description.
:param objs: a dict that maps object references to dicts that describe
the object timestamp and content. Object frags will be
created for each item in this dict.
:param node_state: a dict that maps a node index to the desired state
for that node. Each desired state is a list of
dicts, with each dict describing object reference,
frag_index and whether the file moved to the node's
hash_dir should be marked as durable or not.
"""
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv, _obj4srv, _obj5srv, _obj6srv) = _test_servers
ec_policy = POLICIES[3]
container_name = uuid.uuid4().hex
obj_name = uuid.uuid4().hex
obj_path = os.path.join(os.sep, 'v1', 'a', container_name, obj_name)
# PUT container, make sure it worked
container_path = os.path.join(os.sep, 'v1', 'a', container_name)
ec_container = Request.blank(
container_path, environ={"REQUEST_METHOD": "PUT"},
headers={"X-Storage-Policy": "ec", "X-Auth-Token": "t"})
resp = ec_container.get_response(prosrv)
self.assertIn(resp.status_int, (201, 202))
partition, nodes = \
ec_policy.object_ring.get_nodes('a', container_name, obj_name)
# map nodes to hash dirs
node_hash_dirs = {}
node_tmp_dirs = collections.defaultdict(dict)
for node in nodes:
node_hash_dirs[node['index']] = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(ec_policy),
partition, hash_path('a', container_name, obj_name)))
def _put_object(ref, timestamp, body):
# PUT an object and then move its disk files to a temp dir
headers = {"X-Timestamp": timestamp.internal}
put_req1 = Request.blank(obj_path, method='PUT', headers=headers)
put_req1.body = body
resp = put_req1.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# GET the obj, should work fine
get_req = Request.blank(obj_path, method="GET")
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
# move all hash dir files to per-node, per-obj tempdir
for node_index, hash_dir in node_hash_dirs.items():
node_tmp_dirs[node_index][ref] = mkdtemp()
for f in os.listdir(hash_dir):
move(os.path.join(hash_dir, f),
os.path.join(node_tmp_dirs[node_index][ref], f))
for obj_ref, obj_info in objs.items():
_put_object(obj_ref, **obj_info)
# sanity check - all hash_dirs are empty and GET returns a 404
for hash_dir in node_hash_dirs.values():
self.assertFalse(os.listdir(hash_dir))
get_req = Request.blank(obj_path, method="GET")
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 404)
# node state is in form:
# {node_index: [{ref: object reference,
# frag_index: index,
# durable: True or False}, ...],
# node_index: ...}
for node_index, state in node_state.items():
dest = node_hash_dirs[node_index]
for frag_info in state:
src = node_tmp_dirs[frag_info['frag_index']][frag_info['ref']]
src_files = os.listdir(src)
# sanity check, expect just a single .data file
self.assertFalse(src_files[1:])
dest_file = src_files[0].replace(
'#d', '#d' if frag_info['durable'] else '')
move(os.path.join(src, src_files[0]),
os.path.join(dest, dest_file))
# do an object GET
get_req = Request.blank(obj_path, method='GET')
return get_req.get_response(prosrv)
def test_GET_with_missing_durables(self):
# verify object GET behavior when durable files are missing
ts_iter = make_timestamp_iter()
objs = {'obj1': dict(timestamp=next(ts_iter), body='body')}
# durable missing from 2/3 nodes
node_state = {
0: [dict(ref='obj1', frag_index=0, durable=True)],
1: [dict(ref='obj1', frag_index=1, durable=False)],
2: [dict(ref='obj1', frag_index=2, durable=False)]
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1']['body'])
# all files missing on 1 node, durable missing from 1/2 other nodes
# durable missing from 2/3 nodes
node_state = {
0: [dict(ref='obj1', frag_index=0, durable=True)],
1: [],
2: [dict(ref='obj1', frag_index=2, durable=False)]
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1']['body'])
# durable missing from all 3 nodes
node_state = {
0: [dict(ref='obj1', frag_index=0, durable=False)],
1: [dict(ref='obj1', frag_index=1, durable=False)],
2: [dict(ref='obj1', frag_index=2, durable=False)]
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 503)
def test_GET_with_multiple_frags_per_node(self):
# verify object GET behavior when multiple fragments are on same node
ts_iter = make_timestamp_iter()
objs = {'obj1': dict(timestamp=next(ts_iter), body='body')}
# scenario: only two frags, both on same node
node_state = {
0: [],
1: [dict(ref='obj1', frag_index=0, durable=True),
dict(ref='obj1', frag_index=1, durable=False)],
2: []
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1']['body'])
# scenario: all 3 frags on same node
node_state = {
0: [],
1: [dict(ref='obj1', frag_index=0, durable=True),
dict(ref='obj1', frag_index=1, durable=False),
dict(ref='obj1', frag_index=2, durable=False)],
2: []
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1']['body'])
def test_GET_with_multiple_timestamps_on_nodes(self):
ts_iter = make_timestamp_iter()
ts_1, ts_2, ts_3 = [next(ts_iter) for _ in range(3)]
objs = {'obj1': dict(timestamp=ts_1, body='body1'),
'obj2': dict(timestamp=ts_2, body='body2'),
'obj3': dict(timestamp=ts_3, body='body3')}
# newer non-durable frags do not prevent proxy getting the durable obj1
node_state = {
0: [dict(ref='obj3', frag_index=0, durable=False),
dict(ref='obj2', frag_index=0, durable=False),
dict(ref='obj1', frag_index=0, durable=True)],
1: [dict(ref='obj3', frag_index=1, durable=False),
dict(ref='obj2', frag_index=1, durable=False),
dict(ref='obj1', frag_index=1, durable=True)],
2: [dict(ref='obj3', frag_index=2, durable=False),
dict(ref='obj2', frag_index=2, durable=False),
dict(ref='obj1', frag_index=2, durable=True)],
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1']['body'])
# durable frags at two timestamps: in this scenario proxy is guaranteed
# to see the durable at ts_2 with one of the first 2 responses, so will
# then prefer that when requesting from third obj server
node_state = {
0: [dict(ref='obj3', frag_index=0, durable=False),
dict(ref='obj2', frag_index=0, durable=False),
dict(ref='obj1', frag_index=0, durable=True)],
1: [dict(ref='obj3', frag_index=1, durable=False),
dict(ref='obj2', frag_index=1, durable=True)],
2: [dict(ref='obj3', frag_index=2, durable=False),
dict(ref='obj2', frag_index=2, durable=True)],
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj2']['body'])
def test_GET_with_same_frag_index_on_multiple_nodes(self):
ts_iter = make_timestamp_iter()
# this is a trick to be able to get identical frags placed onto
# multiple nodes: since we cannot *copy* frags, we generate three sets
# of identical frags at same timestamp so we have enough to *move*
ts_1 = next(ts_iter)
objs = {'obj1a': dict(timestamp=ts_1, body='body'),
'obj1b': dict(timestamp=ts_1, body='body'),
'obj1c': dict(timestamp=ts_1, body='body')}
# arrange for duplicate frag indexes across nodes: because the object
# server prefers the highest available frag index, proxy will first get
# back two responses with frag index 1, and will then return to node 0
# for frag_index 0.
node_state = {
0: [dict(ref='obj1a', frag_index=0, durable=False),
dict(ref='obj1a', frag_index=1, durable=False)],
1: [dict(ref='obj1b', frag_index=1, durable=True)],
2: [dict(ref='obj1c', frag_index=1, durable=True)]
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1a']['body'])
# if all we have across nodes are frags with same index then expect a
# 404 (the third, 'extra', obj server GET will return 404 because it
# will be sent frag prefs that exclude frag_index 1)
node_state = {
0: [dict(ref='obj1a', frag_index=1, durable=False)],
1: [dict(ref='obj1b', frag_index=1, durable=True)],
2: [dict(ref='obj1c', frag_index=1, durable=False)]
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 404)
class TestObjectDisconnectCleanup(unittest.TestCase):
# update this if you need to make more different devices in do_setup
device_pattern = re.compile('sd[a-z][0-9]')
def _cleanup_devices(self):
# make sure all the object data is cleaned up
for dev in os.listdir(_testdir):
if not self.device_pattern.match(dev):
continue
device_path = os.path.join(_testdir, dev)
for datadir in os.listdir(device_path):
if 'object' not in datadir:
continue
data_path = os.path.join(device_path, datadir)
rmtree(data_path, ignore_errors=True)
mkdirs(data_path)
def setUp(self):
debug.hub_exceptions(False)
self._cleanup_devices()
def tearDown(self):
debug.hub_exceptions(True)
self._cleanup_devices()
def _check_disconnect_cleans_up(self, policy_name, is_chunked=False):
proxy_port = _test_sockets[0].getsockname()[1]
def put(path, headers=None, body=None):
conn = httplib.HTTPConnection('localhost', proxy_port)
try:
conn.connect()
conn.putrequest('PUT', path)
for k, v in (headers or {}).items():
conn.putheader(k, v)
conn.endheaders()
body = body or ['']
for chunk in body:
if is_chunked:
chunk = '%x\r\n%s\r\n' % (len(chunk), chunk)
conn.send(chunk)
resp = conn.getresponse()
body = resp.read()
finally:
# seriously - shut this mother down
if conn.sock:
conn.sock.fd._sock.close()
return resp, body
# ensure container
container_path = '/v1/a/%s-disconnect-test' % policy_name
resp, _body = put(container_path, headers={
'Connection': 'close',
'X-Storage-Policy': policy_name,
'Content-Length': '0',
})
self.assertIn(resp.status, (201, 202))
def exploding_body():
for i in range(3):
yield '\x00' * (64 * 2 ** 10)
raise Exception('kaboom!')
headers = {}
if is_chunked:
headers['Transfer-Encoding'] = 'chunked'
else:
headers['Content-Length'] = 64 * 2 ** 20
obj_path = container_path + '/disconnect-data'
try:
resp, _body = put(obj_path, headers=headers,
body=exploding_body())
except Exception as e:
if str(e) != 'kaboom!':
raise
else:
self.fail('obj put connection did not ka-splod')
sleep(0.1)
def find_files(self):
found_files = defaultdict(list)
for root, dirs, files in os.walk(_testdir):
for fname in files:
filename, ext = os.path.splitext(fname)
found_files[ext].append(os.path.join(root, fname))
return found_files
def test_repl_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('zero')
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
def test_ec_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('ec')
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
def test_repl_chunked_transfer_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('zero', is_chunked=True)
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
def test_ec_chunked_transfer_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('ec', is_chunked=True)
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
class TestObjectECRangedGET(unittest.TestCase):
def setUp(self):
_test_servers[0].logger._clear()
self.app = proxy_server.Application(
None, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
def tearDown(self):
prosrv = _test_servers[0]
self.assertFalse(prosrv.logger.get_lines_for_level('error'))
self.assertFalse(prosrv.logger.get_lines_for_level('warning'))
@classmethod
def setUpClass(cls):
cls.obj_name = 'range-get-test'
cls.tiny_obj_name = 'range-get-test-tiny'
cls.aligned_obj_name = 'range-get-test-aligned'
# Note: only works if called with unpatched policies
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: ec\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
assert headers[:len(exp)] == exp, "container PUT failed"
seg_size = POLICIES.get_by_name("ec").ec_segment_size
cls.seg_size = seg_size
# EC segment size is 4 KiB, hence this gives 4 segments, which we
# then verify with a quick sanity check
cls.obj = ' my hovercraft is full of eels '.join(
str(s) for s in range(431))
assert seg_size * 4 > len(cls.obj) > seg_size * 3, \
"object is wrong number of segments"
cls.obj_etag = md5(cls.obj).hexdigest()
cls.tiny_obj = 'tiny, tiny object'
assert len(cls.tiny_obj) < seg_size, "tiny_obj too large"
cls.aligned_obj = "".join(
"abcdEFGHijkl%04d" % x for x in range(512))
assert len(cls.aligned_obj) % seg_size == 0, "aligned obj not aligned"
for obj_name, obj in ((cls.obj_name, cls.obj),
(cls.tiny_obj_name, cls.tiny_obj),
(cls.aligned_obj_name, cls.aligned_obj)):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n%s' % (obj_name, len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"object PUT failed %s" % obj_name
def _get_obj(self, range_value, obj_name=None):
if obj_name is None:
obj_name = self.obj_name
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Range: %s\r\n'
'\r\n' % (obj_name, range_value))
fd.flush()
headers = readuntil2crlfs(fd)
# e.g. "HTTP/1.1 206 Partial Content\r\n..."
status_code = int(headers[9:12])
headers = parse_headers_string(headers)
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
# if we get this wrong, clients will either get truncated data or
# they'll hang waiting for bytes that aren't coming, so it warrants
# being asserted for every test case
if 'Content-Length' in headers:
self.assertEqual(int(headers['Content-Length']), len(gotten_obj))
# likewise, if we say MIME and don't send MIME or vice versa,
# clients will be horribly confused
if headers.get('Content-Type', '').startswith('multipart/byteranges'):
self.assertEqual(gotten_obj[:2], "--")
else:
# In general, this isn't true, as you can start an object with
# "--". However, in this test, we don't start any objects with
# "--", or even include "--" in their contents anywhere.
self.assertNotEqual(gotten_obj[:2], "--")
return (status_code, headers, gotten_obj)
def _parse_multipart(self, content_type, body):
parser = email.parser.FeedParser()
parser.feed("Content-Type: %s\r\n\r\n" % content_type)
parser.feed(body)
root_message = parser.close()
self.assertTrue(root_message.is_multipart())
byteranges = root_message.get_payload()
self.assertFalse(root_message.defects)
for i, message in enumerate(byteranges):
self.assertFalse(message.defects, "Part %d had defects" % i)
self.assertFalse(message.is_multipart(),
"Nested multipart at %d" % i)
return byteranges
def test_bogus(self):
status, headers, gotten_obj = self._get_obj("tacos=3-5")
self.assertEqual(status, 200)
self.assertEqual(len(gotten_obj), len(self.obj))
self.assertEqual(gotten_obj, self.obj)
def test_unaligned(self):
# One segment's worth of data, but straddling two segment boundaries
# (so it has data from three segments)
status, headers, gotten_obj = self._get_obj("bytes=3783-7878")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 3783-7878/14513")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.obj[3783:7879])
def test_aligned_left(self):
# First byte is aligned to a segment boundary, last byte is not
status, headers, gotten_obj = self._get_obj("bytes=0-5500")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "5501")
self.assertEqual(headers['Content-Range'], "bytes 0-5500/14513")
self.assertEqual(len(gotten_obj), 5501)
self.assertEqual(gotten_obj, self.obj[:5501])
def test_aligned_range(self):
# Ranged GET that wants exactly one segment
status, headers, gotten_obj = self._get_obj("bytes=4096-8191")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 4096-8191/14513")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.obj[4096:8192])
def test_aligned_range_end(self):
# Ranged GET that wants exactly the last segment
status, headers, gotten_obj = self._get_obj("bytes=12288-14512")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "2225")
self.assertEqual(headers['Content-Range'], "bytes 12288-14512/14513")
self.assertEqual(len(gotten_obj), 2225)
self.assertEqual(gotten_obj, self.obj[12288:])
def test_aligned_range_aligned_obj(self):
# Ranged GET that wants exactly the last segment, which is full-size
status, headers, gotten_obj = self._get_obj("bytes=4096-8191",
self.aligned_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 4096-8191/8192")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.aligned_obj[4096:8192])
def test_byte_0(self):
# Just the first byte, but it's index 0, so that's easy to get wrong
status, headers, gotten_obj = self._get_obj("bytes=0-0")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "1")
self.assertEqual(headers['Content-Range'], "bytes 0-0/14513")
self.assertEqual(gotten_obj, self.obj[0])
def test_unsatisfiable(self):
# Goes just one byte too far off the end of the object, so it's
# unsatisfiable. This should be close enough that the object servers
# actually responded 206
obj_len = len(self.obj)
status, headers, _junk = self._get_obj(
"bytes=%d-%d" % (obj_len, obj_len + 100))
self.assertEqual(status, 416)
self.assertEqual(self.obj_etag, headers.get('Etag'))
self.assertEqual('bytes', headers.get('Accept-Ranges'))
self.assertIn('Content-Range', headers)
self.assertEqual('bytes */%d' % obj_len, headers['Content-Range'])
# Goes *way* too far off the end of the object, so we're looking at
# the (massaged) 416 from an object server
status, headers, _junk = self._get_obj(
"bytes=%d-" % (obj_len + 2 ** 30))
self.assertEqual(status, 416)
self.assertEqual(self.obj_etag, headers.get('Etag'))
self.assertEqual('bytes', headers.get('Accept-Ranges'))
self.assertIn('Content-Range', headers)
self.assertEqual('bytes */%d' % obj_len, headers['Content-Range'])
def test_off_end(self):
# Ranged GET that's mostly off the end of the object, but overlaps
# it in just the last byte
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) + 100))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '1')
self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
self.assertEqual(gotten_obj, self.obj[-1])
def test_aligned_off_end(self):
# Ranged GET that starts on a segment boundary but asks for a whole lot
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (8192, len(self.obj) + 100))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '6321')
self.assertEqual(headers['Content-Range'], 'bytes 8192-14512/14513')
self.assertEqual(gotten_obj, self.obj[8192:])
def test_way_off_end(self):
# Ranged GET that's mostly off the end of the object, but overlaps
# it in just the last byte, and wants multiple segments' worth off
# the end
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) * 1000))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '1')
self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
self.assertEqual(gotten_obj, self.obj[-1])
def test_boundaries(self):
# Wants the last byte of segment 1 + the first byte of segment 2
status, headers, gotten_obj = self._get_obj("bytes=4095-4096")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '2')
self.assertEqual(headers['Content-Range'], 'bytes 4095-4096/14513')
self.assertEqual(gotten_obj, self.obj[4095:4097])
def test_until_end(self):
# Wants the last byte of segment 1 + the rest
status, headers, gotten_obj = self._get_obj("bytes=4095-")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '10418')
self.assertEqual(headers['Content-Range'], 'bytes 4095-14512/14513')
self.assertEqual(gotten_obj, self.obj[4095:])
def test_small_suffix(self):
# Small range-suffix GET: the last 100 bytes (less than one segment)
status, headers, gotten_obj = self._get_obj("bytes=-100")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '100')
self.assertEqual(headers['Content-Range'], 'bytes 14413-14512/14513')
self.assertEqual(len(gotten_obj), 100)
self.assertEqual(gotten_obj, self.obj[-100:])
def test_small_suffix_aligned(self):
# Small range-suffix GET: the last 100 bytes, last segment is
# full-size
status, headers, gotten_obj = self._get_obj("bytes=-100",
self.aligned_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '100')
self.assertEqual(headers['Content-Range'], 'bytes 8092-8191/8192')
self.assertEqual(len(gotten_obj), 100)
def test_suffix_two_segs(self):
# Ask for enough data that we need the last two segments. The last
# segment is short, though, so this ensures we compensate for that.
#
# Note that the total range size is less than one full-size segment.
suffix_len = len(self.obj) % self.seg_size + 1
status, headers, gotten_obj = self._get_obj("bytes=-%d" % suffix_len)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], str(suffix_len))
self.assertEqual(headers['Content-Range'],
'bytes %d-%d/%d' % (len(self.obj) - suffix_len,
len(self.obj) - 1,
len(self.obj)))
self.assertEqual(len(gotten_obj), suffix_len)
def test_large_suffix(self):
# Large range-suffix GET: the last 5000 bytes (more than one segment)
status, headers, gotten_obj = self._get_obj("bytes=-5000")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '5000')
self.assertEqual(headers['Content-Range'], 'bytes 9513-14512/14513')
self.assertEqual(len(gotten_obj), 5000)
self.assertEqual(gotten_obj, self.obj[-5000:])
def test_overlarge_suffix(self):
# The last N+1 bytes of an N-byte object
status, headers, gotten_obj = self._get_obj(
"bytes=-%d" % (len(self.obj) + 1))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '14513')
self.assertEqual(headers['Content-Range'], 'bytes 0-14512/14513')
self.assertEqual(len(gotten_obj), len(self.obj))
self.assertEqual(gotten_obj, self.obj)
def test_small_suffix_tiny_object(self):
status, headers, gotten_obj = self._get_obj(
"bytes=-5", self.tiny_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '5')
self.assertEqual(headers['Content-Range'], 'bytes 12-16/17')
self.assertEqual(gotten_obj, self.tiny_obj[12:])
def test_overlarge_suffix_tiny_object(self):
status, headers, gotten_obj = self._get_obj(
"bytes=-1234567890", self.tiny_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '17')
self.assertEqual(headers['Content-Range'], 'bytes 0-16/17')
self.assertEqual(len(gotten_obj), len(self.tiny_obj))
self.assertEqual(gotten_obj, self.tiny_obj)
def test_multiple_ranges(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,4490-5010", self.obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers["Content-Length"], str(len(gotten_obj)))
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertIsNotNone(boundary)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4490-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4490:5011])
def test_multiple_ranges_overlapping_in_segment(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-9,20-29,40-49,60-69,80-89")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 5)
def test_multiple_ranges_off_end(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,14500-14513") # there is no byte 14513, only 0-14512
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 14500-14512/14513")
def test_multiple_ranges_suffix_off_end(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,-13")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 14500-14512/14513")
def test_multiple_ranges_one_barely_unsatisfiable(self):
# The thing about 14515-14520 is that it comes from the last segment
# in the object. When we turn this range into a fragment range,
# it'll be for the last fragment, so the object servers see
# something satisfiable.
#
# Basically, we'll get 3 byteranges from the object server, but we
# have to filter out the unsatisfiable one on our own.
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,14515-14520,40-50")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[0].get_payload(), self.obj[0:11])
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 40-50/14513")
self.assertEqual(got_byteranges[1].get_payload(), self.obj[40:51])
def test_multiple_ranges_some_unsatisfiable(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,4090-5010,999999-9999999", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertIsNotNone(boundary)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4090-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4090:5011])
def test_two_ranges_one_unsatisfiable(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,999999-9999999", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
# According to RFC 7233, this could be either a multipart/byteranges
# response with one part or it could be a single-part response (just
# the bytes, no MIME). We're locking it down here: single-part
# response. That's what replicated objects do, and we don't want any
# client-visible differences between EC objects and replicated ones.
self.assertEqual(content_type, 'donuts')
self.assertEqual(gotten_obj, self.obj[:101])
def test_two_ranges_one_unsatisfiable_same_segment(self):
# Like test_two_ranges_one_unsatisfiable(), but where both ranges
# fall within the same EC segment.
status, headers, gotten_obj = self._get_obj(
"bytes=14500-14510,14520-14530")
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
self.assertEqual(content_type, 'donuts')
self.assertEqual(gotten_obj, self.obj[14500:14511])
def test_multiple_ranges_some_unsatisfiable_out_of_order(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,99999998-99999999,4090-5010", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertIsNotNone(boundary)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4090-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4090:5011])
@patch_policies([
StoragePolicy(0, 'zero', True, object_ring=FakeRing(base_port=3000)),
StoragePolicy(1, 'one', False, object_ring=FakeRing(base_port=3000)),
StoragePolicy(2, 'two', False, True, object_ring=FakeRing(base_port=3000))
])
class TestContainerController(unittest.TestCase):
"Test swift.proxy_server.ContainerController"
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(base_port=2000),
logger=debug_logger())
def test_convert_policy_to_index(self):
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
expected = {
'zero': 0,
'ZeRo': 0,
'one': 1,
'OnE': 1,
}
for name, index in expected.items():
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': name})
self.assertEqual(controller._convert_policy_to_index(req), index)
# default test
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.assertIsNone(controller._convert_policy_to_index(req))
# negative test
req = Request.blank('/a/c',
headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': 'nada'})
self.assertRaises(HTTPException, controller._convert_policy_to_index,
req)
# storage policy two is deprecated
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': 'two'})
self.assertRaises(HTTPException, controller._convert_policy_to_index,
req)
def test_convert_index_to_name(self):
policy = random.choice(list(POLICIES))
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 200,
headers={'X-Backend-Storage-Policy-Index': int(policy)},
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Storage-Policy'], policy.name)
def test_no_convert_index_to_name_when_container_not_found(self):
policy = random.choice(list(POLICIES))
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 404, 404, 404,
headers={'X-Backend-Storage-Policy-Index':
int(policy)}) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 404)
self.assertIsNone(resp.headers['X-Storage-Policy'])
def test_error_convert_index_to_name(self):
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 200,
headers={'X-Backend-Storage-Policy-Index': '-1'}) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 200)
self.assertIsNone(resp.headers['X-Storage-Policy'])
error_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(2, len(error_lines))
for msg in error_lines:
expected = "Could not translate " \
"X-Backend-Storage-Policy-Index ('-1')"
self.assertIn(expected, msg)
def test_transfer_headers(self):
src_headers = {'x-remove-versions-location': 'x',
'x-container-read': '*:user',
'x-remove-container-sync-key': 'x'}
dst_headers = {'x-versions-location': 'backup'}
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-versions-location': '',
'x-container-read': '*:user',
'x-container-sync-key': ''}
self.assertEqual(dst_headers, expected_headers)
def assert_status_map(self, method, statuses, expected,
raise_exc=False, missing_container=False):
with save_globals():
kwargs = {}
if raise_exc:
kwargs['raise_exc'] = raise_exc
kwargs['missing_container'] = missing_container
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
def test_HEAD_GET(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def test_status_map(statuses, expected,
c_expected=None, a_expected=None, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
self.app.update_request(req)
res = controller.HEAD(req)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
infocache = res.environ.get('swift.infocache', {})
if expected < 400:
self.assertIn('x-works', res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
if expected < 300:
self.assertIn('last-modified', res.headers)
self.assertEqual(res.headers['last-modified'], '1')
if c_expected:
self.assertIn('container/a/c', infocache)
self.assertEqual(
infocache['container/a/c']['status'],
c_expected)
else:
self.assertNotIn('container/a/c', infocache)
if a_expected:
self.assertIn('account/a', infocache)
self.assertEqual(infocache['account/a']['status'],
a_expected)
else:
self.assertNotIn('account/a', res.environ)
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
infocache = res.environ.get('swift.infocache', {})
if expected < 400:
self.assertIn('x-works', res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
if expected < 300:
self.assertIn('last-modified', res.headers)
self.assertEqual(res.headers['last-modified'], '1')
if c_expected:
self.assertIn('container/a/c', infocache)
self.assertEqual(
infocache['container/a/c']['status'],
c_expected)
else:
self.assertNotIn('container/a/c', infocache)
if a_expected:
self.assertIn('account/a', infocache)
self.assertEqual(infocache['account/a']['status'],
a_expected)
else:
self.assertNotIn('account/a', infocache)
# In all the following tests cache 200 for account
# return and cache vary for container
# return 200 and cache 200 for account and container
test_status_map((200, 200, 404, 404), 200, 200, 200)
test_status_map((200, 200, 500, 404), 200, 200, 200)
# return 304 don't cache container
test_status_map((200, 304, 500, 404), 304, None, 200)
# return 404 and cache 404 for container
test_status_map((200, 404, 404, 404), 404, 404, 200)
test_status_map((200, 404, 404, 500), 404, 404, 200)
# return 503, don't cache container
test_status_map((200, 500, 500, 500), 503, None, 200)
self.assertFalse(self.app.account_autocreate)
# return 404 (as account is not found) and don't cache container
test_status_map((404, 404, 404), 404, None, 404)
# cache a 204 for the account because it's sort of like it
# exists
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, None, 204)
def test_PUT_policy_headers(self):
backend_requests = []
def capture_requests(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
if method == 'PUT':
backend_requests.append(headers)
def test_policy(requested_policy):
with save_globals():
mock_conn = set_http_connect(200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/test', method='PUT',
headers={'Content-Length': 0})
if requested_policy:
expected_policy = requested_policy
req.headers['X-Storage-Policy'] = policy.name
else:
expected_policy = POLICIES.default
res = req.get_response(self.app)
if expected_policy.is_deprecated:
self.assertEqual(res.status_int, 400)
self.assertEqual(0, len(backend_requests))
expected = 'is deprecated'
self.assertIn(expected, res.body,
'%r did not include %r' % (
res.body, expected))
return
self.assertEqual(res.status_int, 201)
self.assertEqual(
expected_policy.object_ring.replicas,
len(backend_requests))
for headers in backend_requests:
if not requested_policy:
self.assertNotIn('X-Backend-Storage-Policy-Index',
headers)
self.assertIn('X-Backend-Storage-Policy-Default',
headers)
self.assertEqual(
int(expected_policy),
int(headers['X-Backend-Storage-Policy-Default']))
else:
self.assertIn('X-Backend-Storage-Policy-Index',
headers)
self.assertEqual(int(headers
['X-Backend-Storage-Policy-Index']),
int(policy))
# make sure all mocked responses are consumed
self.assertRaises(StopIteration, mock_conn.code_iter.next)
test_policy(None) # no policy header
for policy in POLICIES:
backend_requests = [] # reset backend requests
test_policy(policy)
def test_PUT(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 201, 201, 201), 201, missing_container=True)
test_status_map((200, 201, 201, 500), 201, missing_container=True)
test_status_map((200, 204, 404, 404), 404, missing_container=True)
test_status_map((200, 204, 500, 404), 503, missing_container=True)
self.assertFalse(self.app.account_autocreate)
test_status_map((404, 404, 404), 404, missing_container=True)
self.app.account_autocreate = True
# fail to retrieve account info
test_status_map(
(503, 503, 503), # account_info fails on 503
404, missing_container=True)
# account fail after creation
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
404, 404, 404), # account_info fail
404, missing_container=True)
test_status_map(
(503, 503, 404, # account_info fails on 404
503, 503, 503, # PUT account
503, 503, 404), # account_info fail
404, missing_container=True)
# put fails
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
503, 503, 201), # put container fail
503, missing_container=True)
# all goes according to plan
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
201, 201, 201), # put container success
201, missing_container=True)
test_status_map(
(503, 404, 404, # account_info fails on 404
503, 201, 201, # PUT account
503, 200, # account_info success
503, 201, 201), # put container success
201, missing_container=True)
def test_PUT_autocreate_account_with_sysmeta(self):
# x-account-sysmeta headers in a container PUT request should be
# transferred to the account autocreate PUT request
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, headers=None, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {}, headers=headers)
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
self.app.account_autocreate = True
calls = []
callback = _make_callback_func(calls)
key, value = 'X-Account-Sysmeta-Blah', 'something'
headers = {key: value}
# all goes according to plan
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
201, 201, 201), # put container success
201, missing_container=True,
headers=headers,
give_connect=callback)
self.assertEqual(10, len(calls))
for call in calls[3:6]:
self.assertEqual('/account', call['path'])
self.assertIn(key, call['headers'],
'%s call, key %s missing in headers %s' % (
call['method'], key, call['headers']))
self.assertEqual(value, call['headers'][key])
def test_POST(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
req.content_length = 0
self.app.update_request(req)
res = controller.POST(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 201, 201, 201), 201, missing_container=True)
test_status_map((200, 201, 201, 500), 201, missing_container=True)
test_status_map((200, 204, 404, 404), 404, missing_container=True)
test_status_map((200, 204, 500, 404), 503, missing_container=True)
self.assertFalse(self.app.account_autocreate)
test_status_map((404, 404, 404), 404, missing_container=True)
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, missing_container=True)
def test_PUT_max_containers_per_account(self):
with save_globals():
self.app.max_containers_per_account = 12346
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
self.app.max_containers_per_account = 12345
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, 'account',
'container_new')
self.assert_status_map(controller.PUT, (200, 404, 404, 404), 403,
missing_container=True)
self.app.max_containers_per_account = 12345
self.app.max_containers_whitelist = ['account']
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
def test_PUT_max_container_name_length(self):
with save_globals():
limit = constraints.MAX_CONTAINER_NAME_LENGTH
controller = proxy_server.ContainerController(self.app, 'account',
'1' * limit)
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, 'account',
'2' * (limit + 1))
self.assert_status_map(controller.PUT, (201, 201, 201), 400,
missing_container=True)
def test_PUT_connect_exceptions(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT, (200, 201, 201, -1), 201,
missing_container=True)
self.assert_status_map(controller.PUT, (200, 201, -1, -1), 503,
missing_container=True)
self.assert_status_map(controller.PUT, (200, 503, 503, -1), 503,
missing_container=True)
def test_acc_missing_returns_404(self):
for meth in ('DELETE', 'PUT'):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
self.app._error_limiting = {}
controller = proxy_server.ContainerController(self.app,
'account',
'container')
if meth == 'PUT':
set_http_connect(200, 200, 200, 200, 200, 200,
missing_container=True)
else:
set_http_connect(200, 200, 200, 200)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
self.app.update_request(req)
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 200)
set_http_connect(404, 404, 404, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, 404)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, raise_exc=True)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(self.app, dev,
self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200, 200, 200, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
def test_put_locking(self):
class MockMemcache(FakeMemcache):
def __init__(self, allow_lock=None):
self.allow_lock = allow_lock
super(MockMemcache, self).__init__()
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
if self.allow_lock:
yield True
else:
raise NotImplementedError
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.app.memcache = MockMemcache(allow_lock=True)
set_http_connect(200, 201, 201, 201,
missing_container=True)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 201)
def test_error_limiting(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
container_ring = controller.app.container_ring
controller.app.sort_nodes = lambda l, *args, **kwargs: l
self.assert_status_map(controller.HEAD, (200, 503, 200, 200), 200,
missing_container=False)
self.assertEqual(
node_error_count(controller.app, container_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, container_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD,
(200, 503, 503, 503), 503)
self.assertEqual(
node_error_count(controller.app, container_ring.devs[0]),
self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 503)
self.assertTrue(
node_last_error(controller.app, container_ring.devs[0])
is not None)
self.assert_status_map(controller.PUT, (200, 201, 201, 201), 503,
missing_container=True)
self.assert_status_map(controller.DELETE,
(200, 204, 204, 204), 503)
self.app.error_suppression_interval = -300
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 200)
self.assert_status_map(controller.DELETE, (200, 204, 204, 204),
404, raise_exc=True)
def test_DELETE(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.DELETE,
(200, 204, 204, 204), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 204, 503), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 503, 503), 503)
self.assert_status_map(controller.DELETE,
(200, 204, 404, 404), 404)
self.assert_status_map(controller.DELETE,
(200, 404, 404, 404), 404)
self.assert_status_map(controller.DELETE,
(200, 204, 503, 404), 503)
self.app.memcache = FakeMemcacheReturnsNone()
# 200: Account check, 404x3: Container check
self.assert_status_map(controller.DELETE,
(200, 404, 404, 404), 404)
def test_response_get_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c?format=json')
self.app.update_request(req)
res = controller.GET(req)
self.assertIn('accept-ranges', res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c?format=json')
self.app.update_request(req)
res = controller.HEAD(req)
self.assertIn('accept-ranges', res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_PUT_metadata(self):
self.metadata_helper('PUT')
def test_POST_metadata(self):
self.metadata_helper('POST')
def metadata_helper(self, method):
for test_header, test_value in (
('X-Container-Meta-TestHeader', 'TestValue'),
('X-Container-Meta-TestHeader', ''),
('X-Remove-Container-Meta-TestHeader', 'anything'),
('X-Container-Read', '.r:*'),
('X-Remove-Container-Read', 'anything'),
('X-Container-Write', 'anyone'),
('X-Remove-Container-Write', 'anything')):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c':
find_header = test_header
find_value = test_value
if find_header.lower().startswith('x-remove-'):
find_header = \
find_header.lower().replace('-remove', '', 1)
find_value = ''
for k, v in headers.items():
if k.lower() == find_header.lower() and \
v == find_value:
break
else:
test_errors.append('%s: %s not in %s' %
(find_header, find_value, headers))
with save_globals():
controller = \
proxy_server.ContainerController(self.app, 'a', 'c')
set_http_connect(200, 201, 201, 201, give_connect=test_connect)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': method, 'swift_owner': True},
headers={test_header: test_value})
self.app.update_request(req)
getattr(controller, method)(req)
self.assertEqual(test_errors, [])
def test_PUT_bad_metadata(self):
self.bad_metadata_helper('PUT')
def test_POST_bad_metadata(self):
self.bad_metadata_helper('POST')
def bad_metadata_helper(self, method):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
set_http_connect(200, 201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (constraints.MAX_META_OVERALL_SIZE - 4
- constraints.MAX_META_VALUE_LENGTH):
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Container-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Container-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
def test_POST_calls_clean_acl(self):
called = [False]
def clean_acl(header, value):
called[0] = True
raise ValueError('fake error')
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Container-Read': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
called[0] = False
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Container-Write': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_PUT_calls_clean_acl(self):
called = [False]
def clean_acl(header, value):
called[0] = True
raise ValueError('fake error')
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Container-Read': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
called[0] = False
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Container-Write': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
def test_GET_no_content(self):
with save_globals():
set_http_connect(200, 204, 204, 204)
controller = proxy_server.ContainerController(self.app, 'a', 'c')
req = Request.blank('/v1/a/c')
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status_int, 204)
ic = res.environ['swift.infocache']
self.assertEqual(ic['container/a/c']['status'], 204)
self.assertEqual(res.content_length, 0)
self.assertNotIn('transfer-encoding', res.headers)
def test_GET_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'a', 'c')
req = Request.blank('/v1/a/c')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(
res.environ['swift.infocache']['container/a/c']['status'],
201)
self.assertTrue(called[0])
def test_HEAD_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'a', 'c')
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.HEAD(req)
self.assertTrue(called[0])
def test_unauthorized_requests_when_account_not_found(self):
# verify unauthorized container requests always return response
# from swift.authorize
called = [0, 0]
def authorize(req):
called[0] += 1
return HTTPUnauthorized(request=req)
def account_info(*args):
called[1] += 1
return None, None, None
def _do_test(method):
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = app.handle_request(req)
return res
for method in ('PUT', 'POST', 'DELETE'):
# no delay_denial on method, expect one call to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(401, res.status_int)
self.assertEqual([1, 0], called)
for method in ('HEAD', 'GET'):
# delay_denial on method, expect two calls to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(401, res.status_int)
self.assertEqual([2, 1], called)
def test_authorized_requests_when_account_not_found(self):
# verify authorized container requests always return 404 when
# account not found
called = [0, 0]
def authorize(req):
called[0] += 1
def account_info(*args):
called[1] += 1
return None, None, None
def _do_test(method):
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = app.handle_request(req)
return res
for method in ('PUT', 'POST', 'DELETE', 'HEAD', 'GET'):
# expect one call to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(404, res.status_int)
self.assertEqual([1, 1], called)
def test_OPTIONS_get_info_drops_origin(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
count = [0]
def my_get_info(app, env, account, container=None,
ret_not_found=False, swift_source=None):
if count[0] > 11:
return {}
count[0] += 1
if not container:
return {'some': 'stuff'}
return proxy_base.was_get_info(
app, env, account, container, ret_not_found, swift_source)
proxy_base.was_get_info = proxy_base.get_info
with mock.patch.object(proxy_base, 'get_info', my_get_info):
proxy_base.get_info = my_get_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
controller.OPTIONS(req)
self.assertLess(count[0], 11)
def test_OPTIONS(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def my_empty_container_info(*args):
return {}
controller.container_info = my_empty_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_empty_origin_container_info(*args):
return {'cors': {'allow_origin': None}}
controller.container_info = my_empty_origin_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_container_info(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar:8080 https://foo.bar',
'max_age': '999',
}
}
controller.container_info = my_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
'https://foo.bar',
resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertIn(verb,
resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
6)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertIn(verb, resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 6)
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
controller.app.cors_allow_origin = ['http://foo.bar', ]
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
def my_container_info_wildcard(*args):
return {
'cors': {
'allow_origin': '*',
'max_age': '999',
}
}
controller.container_info = my_container_info_wildcard
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Headers': ' , ,,',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('*', resp.headers['access-control-allow-origin'])
self.assertNotIn('access-control-allow-headers', resp.headers)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertIn(verb,
resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
6)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Headers':
'x-foo, x-bar, , x-auth-token',
'Access-Control-Request-Method': 'GET'}
)
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
sortHeaderNames('x-foo, x-bar, x-auth-token'),
sortHeaderNames(resp.headers['access-control-allow-headers']))
self.assertEqual('Access-Control-Request-Headers',
resp.headers.get('vary'))
def test_CORS_valid(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def stubContainerInfo(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar'
}
}
controller.container_info = stubContainerInfo
def containerGET(controller, req):
return Response(headers={
'X-Container-Meta-Color': 'red',
'X-Super-Secret': 'hush',
})
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(containerGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('red', resp.headers['x-container-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set([
'cache-control', 'content-language', 'content-type', 'expires',
'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id',
'x-openstack-request-id', 'x-container-meta-color'])
self.assertEqual(expected_exposed, exposed)
def _gather_x_account_headers(self, controller_call, req, *connect_args,
**kwargs):
seen_headers = []
to_capture = ('X-Account-Partition', 'X-Account-Host',
'X-Account-Device')
def capture_headers(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
captured = {}
for header in to_capture:
captured[header] = headers.get(header)
seen_headers.append(captured)
with save_globals():
self.app.allow_account_management = True
set_http_connect(*connect_args, give_connect=capture_headers,
**kwargs)
resp = controller_call(req)
self.assertEqual(2, resp.status_int // 100) # sanity check
# don't care about the account HEAD, so throw away the
# first element
return sorted(seen_headers[1:],
key=lambda d: d['X-Account-Host'] or 'Z')
def test_PUT_x_account_headers_with_fewer_account_replicas(self):
self.app.account_ring.set_replicas(2)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.PUT, req,
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
'X-Account-Device': None}
])
def test_PUT_x_account_headers_with_more_account_replicas(self):
self.app.account_ring.set_replicas(4)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.PUT, req,
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
def test_DELETE_x_account_headers_with_fewer_account_replicas(self):
self.app.account_ring.set_replicas(2)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.DELETE, req,
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
'X-Account-Device': None}
])
def test_DELETE_x_account_headers_with_more_account_replicas(self):
self.app.account_ring.set_replicas(4)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.DELETE, req,
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
def test_PUT_backed_x_timestamp_header(self):
timestamps = []
def capture_timestamps(*args, **kwargs):
headers = kwargs['headers']
timestamps.append(headers.get('X-Timestamp'))
req = Request.blank('/v1/a/c', method='PUT', headers={'': ''})
with save_globals():
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
# sanity
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
self.assertTrue(re.match('[0-9]{10}\.[0-9]{5}', timestamp))
def test_DELETE_backed_x_timestamp_header(self):
timestamps = []
def capture_timestamps(*args, **kwargs):
headers = kwargs['headers']
timestamps.append(headers.get('X-Timestamp'))
req = Request.blank('/v1/a/c', method='DELETE', headers={'': ''})
self.app.update_request(req)
with save_globals():
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
# sanity
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
self.assertTrue(re.match('[0-9]{10}\.[0-9]{5}', timestamp))
def test_node_read_timeout_retry_to_container(self):
with save_globals():
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'GET'})
self.app.node_timeout = 0.1
set_http_connect(200, 200, 200, body='abcdef', slow=[1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
def setUp(self):
conf = {'error_suppression_interval': 0}
self.app = proxy_server.Application(conf, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
def assert_status_map(self, method, statuses, expected, env_expected=None,
headers=None, **kwargs):
headers = headers or {}
with save_globals():
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a', {}, headers=headers)
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
infocache = res.environ.get('swift.infocache', {})
if env_expected:
self.assertEqual(infocache['account/a']['status'],
env_expected)
set_http_connect(*statuses)
req = Request.blank('/v1/a/', {})
self.app.update_request(req)
res = method(req)
infocache = res.environ.get('swift.infocache', {})
self.assertEqual(res.status_int, expected)
if env_expected:
self.assertEqual(infocache['account/a']['status'],
env_expected)
def test_OPTIONS(self):
with save_globals():
self.app.allow_account_management = False
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST HEAD'.split():
self.assertIn(verb, resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 4)
# Test a CORS OPTIONS request (i.e. including Origin and
# Access-Control-Request-Method headers)
self.app.allow_account_management = False
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank(
'/v1/account', {'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST HEAD'.split():
self.assertIn(verb, resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 4)
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertIn(verb, resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 6)
def test_GET(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
# GET returns after the first successful call to an Account Server
self.assert_status_map(controller.GET, (200,), 200, 200)
self.assert_status_map(controller.GET, (503, 200), 200, 200)
self.assert_status_map(controller.GET, (503, 503, 200), 200, 200)
self.assert_status_map(controller.GET, (204,), 204, 204)
self.assert_status_map(controller.GET, (503, 204), 204, 204)
self.assert_status_map(controller.GET, (503, 503, 204), 204, 204)
self.assert_status_map(controller.GET, (404, 200), 200, 200)
self.assert_status_map(controller.GET, (404, 404, 200), 200, 200)
self.assert_status_map(controller.GET, (404, 503, 204), 204, 204)
# If Account servers fail, if autocreate = False, return majority
# response
self.assert_status_map(controller.GET, (404, 404, 404), 404, 404)
self.assert_status_map(controller.GET, (404, 404, 503), 404, 404)
self.assert_status_map(controller.GET, (404, 503, 503), 503)
self.app.memcache = FakeMemcacheReturnsNone()
self.assert_status_map(controller.GET, (404, 404, 404), 404, 404)
def test_GET_autocreate(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
self.app.memcache = FakeMemcacheReturnsNone()
self.assertFalse(self.app.account_autocreate)
# Repeat the test for autocreate = False and 404 by all
self.assert_status_map(controller.GET,
(404, 404, 404), 404)
self.assert_status_map(controller.GET,
(404, 503, 404), 404)
# When autocreate is True, if none of the nodes respond 2xx
# And quorum of the nodes responded 404,
# ALL nodes are asked to create the account
# If successful, the GET request is repeated.
controller.app.account_autocreate = True
self.assert_status_map(controller.GET,
(404, 404, 404), 204)
self.assert_status_map(controller.GET,
(404, 503, 404), 204)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.GET,
(500, 500, 400), 503)
def test_HEAD(self):
# Same behaviour as GET
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
self.assert_status_map(controller.HEAD, (200,), 200, 200)
self.assert_status_map(controller.HEAD, (503, 200), 200, 200)
self.assert_status_map(controller.HEAD, (503, 503, 200), 200, 200)
self.assert_status_map(controller.HEAD, (204,), 204, 204)
self.assert_status_map(controller.HEAD, (503, 204), 204, 204)
self.assert_status_map(controller.HEAD, (204, 503, 503), 204, 204)
self.assert_status_map(controller.HEAD, (204,), 204, 204)
self.assert_status_map(controller.HEAD, (404, 404, 404), 404, 404)
self.assert_status_map(controller.HEAD, (404, 404, 200), 200, 200)
self.assert_status_map(controller.HEAD, (404, 200), 200, 200)
self.assert_status_map(controller.HEAD, (404, 404, 503), 404, 404)
self.assert_status_map(controller.HEAD, (404, 503, 503), 503)
self.assert_status_map(controller.HEAD, (404, 503, 204), 204, 204)
def test_HEAD_autocreate(self):
# Same behaviour as GET
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
self.app.memcache = FakeMemcacheReturnsNone()
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.HEAD,
(404, 404, 404), 404)
controller.app.account_autocreate = True
self.assert_status_map(controller.HEAD,
(404, 404, 404), 204)
self.assert_status_map(controller.HEAD,
(500, 404, 404), 204)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.HEAD,
(500, 500, 400), 503)
def test_POST_autocreate(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
self.app.memcache = FakeMemcacheReturnsNone()
# first test with autocreate being False
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.POST,
(404, 404, 404), 404)
# next turn it on and test account being created than updated
controller.app.account_autocreate = True
self.assert_status_map(
controller.POST,
(404, 404, 404, 202, 202, 202, 201, 201, 201), 201)
# account_info PUT account POST account
self.assert_status_map(
controller.POST,
(404, 404, 503, 201, 201, 503, 204, 204, 504), 204)
# what if create fails
self.assert_status_map(
controller.POST,
(404, 404, 404, 403, 403, 403, 400, 400, 400), 400)
def test_POST_autocreate_with_sysmeta(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
self.app.memcache = FakeMemcacheReturnsNone()
# first test with autocreate being False
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.POST,
(404, 404, 404), 404)
# next turn it on and test account being created than updated
controller.app.account_autocreate = True
calls = []
callback = _make_callback_func(calls)
key, value = 'X-Account-Sysmeta-Blah', 'something'
headers = {key: value}
self.assert_status_map(
controller.POST,
(404, 404, 404, 202, 202, 202, 201, 201, 201), 201,
# POST , autocreate PUT, POST again
headers=headers,
give_connect=callback)
self.assertEqual(9, len(calls))
for call in calls:
self.assertIn(key, call['headers'],
'%s call, key %s missing in headers %s' %
(call['method'], key, call['headers']))
self.assertEqual(value, call['headers'][key])
def test_connection_refused(self):
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1 # can't connect on this port
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 503)
def test_other_socket_error(self):
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = -1 # invalid port number
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 503)
def test_response_get_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/a?format=json')
self.app.update_request(req)
res = controller.GET(req)
self.assertIn('accept-ranges', res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/a?format=json')
self.app.update_request(req)
res = controller.HEAD(req)
res.body
self.assertIn('accept-ranges', res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_PUT(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 405)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 201)
test_status_map((201, 201, 500), 201)
test_status_map((201, 500, 500), 503)
test_status_map((204, 500, 404), 503)
def test_PUT_max_account_name_length(self):
with save_globals():
self.app.allow_account_management = True
limit = constraints.MAX_ACCOUNT_NAME_LENGTH
controller = proxy_server.AccountController(self.app, '1' * limit)
self.assert_status_map(controller.PUT, (201, 201, 201), 201)
controller = proxy_server.AccountController(
self.app, '2' * (limit + 1))
self.assert_status_map(controller.PUT, (201, 201, 201), 400)
def test_PUT_connect_exceptions(self):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.PUT, (201, 201, -1), 201)
self.assert_status_map(controller.PUT, (201, -1, -1), 503)
self.assert_status_map(controller.PUT, (503, 503, -1), 503)
def test_PUT_status(self):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.PUT, (201, 201, 202), 202)
def test_PUT_metadata(self):
self.metadata_helper('PUT')
def test_POST_metadata(self):
self.metadata_helper('POST')
def metadata_helper(self, method):
for test_header, test_value in (
('X-Account-Meta-TestHeader', 'TestValue'),
('X-Account-Meta-TestHeader', ''),
('X-Remove-Account-Meta-TestHeader', 'anything')):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a':
find_header = test_header
find_value = test_value
if find_header.lower().startswith('x-remove-'):
find_header = \
find_header.lower().replace('-remove', '', 1)
find_value = ''
for k, v in headers.items():
if k.lower() == find_header.lower() and \
v == find_value:
break
else:
test_errors.append('%s: %s not in %s' %
(find_header, find_value, headers))
with save_globals():
self.app.allow_account_management = True
controller = \
proxy_server.AccountController(self.app, 'a')
set_http_connect(201, 201, 201, give_connect=test_connect)
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': method},
headers={test_header: test_value})
self.app.update_request(req)
getattr(controller, method)(req)
self.assertEqual(test_errors, [])
def test_PUT_bad_metadata(self):
self.bad_metadata_helper('PUT')
def test_POST_bad_metadata(self):
self.bad_metadata_helper('POST')
def bad_metadata_helper(self, method):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'a')
set_http_connect(200, 201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (constraints.MAX_META_OVERALL_SIZE - 4
- constraints.MAX_META_VALUE_LENGTH):
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Account-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Account-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Account-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
def test_DELETE(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a', {'REQUEST_METHOD': 'DELETE'})
req.content_length = 0
self.app.update_request(req)
res = controller.DELETE(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 405)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 201)
test_status_map((201, 201, 500), 201)
test_status_map((201, 500, 500), 503)
test_status_map((204, 500, 404), 503)
def test_DELETE_with_query_string(self):
# Extra safety in case someone typos a query string for an
# account-level DELETE request that was really meant to be caught by
# some middleware.
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a?whoops',
environ={'REQUEST_METHOD': 'DELETE'})
req.content_length = 0
self.app.update_request(req)
res = controller.DELETE(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 400)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 400)
test_status_map((201, 201, 500), 400)
test_status_map((201, 500, 500), 400)
test_status_map((204, 500, 404), 400)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountControllerFakeGetResponse(unittest.TestCase):
"""
Test all the faked-out GET responses for accounts that don't exist. They
have to match the responses for empty accounts that really exist.
"""
def setUp(self):
conf = {'account_autocreate': 'yes'}
self.app = proxy_server.Application(conf, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
self.app.memcache = FakeMemcacheReturnsNone()
def test_GET_autocreate_accept_json(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank(
'/v1/a', headers={'Accept': 'application/json'},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/json; charset=utf-8',
resp.headers['Content-Type'])
self.assertEqual("[]", resp.body)
def test_GET_autocreate_format_json(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=json',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=json'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/json; charset=utf-8',
resp.headers['Content-Type'])
self.assertEqual("[]", resp.body)
def test_GET_autocreate_accept_xml(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "text/xml"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('text/xml; charset=utf-8',
resp.headers['Content-Type'])
empty_xml_listing = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<account name="a">\n</account>')
self.assertEqual(empty_xml_listing, resp.body)
def test_GET_autocreate_format_xml(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=xml',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=xml'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/xml; charset=utf-8',
resp.headers['Content-Type'])
empty_xml_listing = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<account name="a">\n</account>')
self.assertEqual(empty_xml_listing, resp.body)
def test_GET_autocreate_accept_unknown(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "mystery/meat"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(406, resp.status_int)
def test_GET_autocreate_format_invalid_utf8(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=\xff\xfe',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=\xff\xfe'})
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
def test_account_acl_header_access(self):
acl = {
'admin': ['AUTH_alice'],
'read-write': ['AUTH_bob'],
'read-only': ['AUTH_carol'],
}
prefix = get_sys_meta_prefix('account')
privileged_headers = {(prefix + 'core-access-control'): format_acl(
version=2, acl_dict=acl)}
app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
with save_globals():
# Mock account server will provide privileged information (ACLs)
set_http_connect(200, 200, 200, headers=privileged_headers)
req = Request.blank('/v1/a', environ={'REQUEST_METHOD': 'GET'})
resp = app.handle_request(req)
# Not a swift_owner -- ACLs should NOT be in response
header = 'X-Account-Access-Control'
self.assertNotIn(header, resp.headers, '%r was in %r' % (
header, resp.headers))
# Same setup -- mock acct server will provide ACLs
set_http_connect(200, 200, 200, headers=privileged_headers)
req = Request.blank('/v1/a', environ={'REQUEST_METHOD': 'GET',
'swift_owner': True})
resp = app.handle_request(req)
# For a swift_owner, the ACLs *should* be in response
self.assertIn(header, resp.headers, '%r not in %r' % (
header, resp.headers))
def test_account_acls_through_delegation(self):
# Define a way to grab the requests sent out from the AccountController
# to the Account Server, and a way to inject responses we'd like the
# Account Server to return.
resps_to_send = []
@contextmanager
def patch_account_controller_method(verb):
old_method = getattr(proxy_server.AccountController, verb)
new_method = lambda self, req, *_, **__: resps_to_send.pop(0)
try:
setattr(proxy_server.AccountController, verb, new_method)
yield
finally:
setattr(proxy_server.AccountController, verb, old_method)
def make_test_request(http_method, swift_owner=True):
env = {
'REQUEST_METHOD': http_method,
'swift_owner': swift_owner,
}
acl = {
'admin': ['foo'],
'read-write': ['bar'],
'read-only': ['bas'],
}
headers = {} if http_method in ('GET', 'HEAD') else {
'x-account-access-control': format_acl(version=2, acl_dict=acl)
}
return Request.blank('/v1/a', environ=env, headers=headers)
# Our AccountController will invoke methods to communicate with the
# Account Server, and they will return responses like these:
def make_canned_response(http_method):
acl = {
'admin': ['foo'],
'read-write': ['bar'],
'read-only': ['bas'],
}
headers = {'x-account-sysmeta-core-access-control': format_acl(
version=2, acl_dict=acl)}
canned_resp = Response(headers=headers)
canned_resp.environ = {
'PATH_INFO': '/acct',
'REQUEST_METHOD': http_method,
}
resps_to_send.append(canned_resp)
app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
app.allow_account_management = True
ext_header = 'x-account-access-control'
with patch_account_controller_method('GETorHEAD_base'):
# GET/HEAD requests should remap sysmeta headers from acct server
for verb in ('GET', 'HEAD'):
make_canned_response(verb)
req = make_test_request(verb)
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
# swift_owner = False: GET/HEAD shouldn't return sensitive info
make_canned_response(verb)
req = make_test_request(verb, swift_owner=False)
resp = app.handle_request(req)
h = resp.headers
self.assertIsNone(h.get(ext_header))
# swift_owner unset: GET/HEAD shouldn't return sensitive info
make_canned_response(verb)
req = make_test_request(verb, swift_owner=False)
del req.environ['swift_owner']
resp = app.handle_request(req)
h = resp.headers
self.assertIsNone(h.get(ext_header))
# Verify that PUT/POST requests remap sysmeta headers from acct server
with patch_account_controller_method('make_requests'):
make_canned_response('PUT')
req = make_test_request('PUT')
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
make_canned_response('POST')
req = make_test_request('POST')
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
class FakeObjectController(object):
def __init__(self):
self.app = self
self.logger = self
self.account_name = 'a'
self.container_name = 'c'
self.object_name = 'o'
self.trans_id = 'tx1'
self.object_ring = FakeRing()
self.node_timeout = 1
self.rate_limit_after_segment = 3
self.rate_limit_segments_per_sec = 2
self.GETorHEAD_base_args = []
def exception(self, *args):
self.exception_args = args
self.exception_info = sys.exc_info()
def GETorHEAD_base(self, *args):
self.GETorHEAD_base_args.append(args)
req = args[0]
path = args[4]
body = data = path[-1] * int(path[-1])
if req.range:
r = req.range.ranges_for_length(len(data))
if r:
(start, stop) = r[0]
body = data[start:stop]
resp = Response(app_iter=iter(body))
return resp
def iter_nodes(self, ring, partition):
for node in ring.get_part_nodes(partition):
yield node
for node in ring.get_more_nodes(partition):
yield node
def sort_nodes(self, nodes):
return nodes
def set_node_timing(self, node, timing):
return
class TestProxyObjectPerformance(unittest.TestCase):
def setUp(self):
# This is just a simple test that can be used to verify and debug the
# various data paths between the proxy server and the object
# server. Used as a play ground to debug buffer sizes for sockets.
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# Client is transmitting in 2 MB chunks
fd = sock.makefile('wb', 2 * 1024 * 1024)
# Small, fast for testing
obj_len = 2 * 64 * 1024
# Use 1 GB or more for measurements
# obj_len = 2 * 512 * 1024 * 1024
self.path = '/v1/a/c/o.large'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.path, str(obj_len)))
fd.write('a' * obj_len)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
self.obj_len = obj_len
def test_GET_debug_large_file(self):
for i in range(10):
start = time.time()
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# Client is reading in 2 MB chunks
fd = sock.makefile('wb', 2 * 1024 * 1024)
fd.write('GET %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.path)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
total = 0
while True:
buf = fd.read(100000)
if not buf:
break
total += len(buf)
self.assertEqual(total, self.obj_len)
end = time.time()
print("Run %02d took %07.03f" % (i, end - start))
@patch_policies([StoragePolicy(0, 'migrated', object_ring=FakeRing()),
StoragePolicy(1, 'ernie', True, object_ring=FakeRing()),
StoragePolicy(2, 'deprecated', is_deprecated=True,
object_ring=FakeRing()),
StoragePolicy(3, 'bert', object_ring=FakeRing())])
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_registered_defaults(self):
proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
si = utils.get_swift_info()['swift']
self.assertIn('version', si)
self.assertEqual(si['max_file_size'], constraints.MAX_FILE_SIZE)
self.assertEqual(si['max_meta_name_length'],
constraints.MAX_META_NAME_LENGTH)
self.assertEqual(si['max_meta_value_length'],
constraints.MAX_META_VALUE_LENGTH)
self.assertEqual(si['max_meta_count'], constraints.MAX_META_COUNT)
self.assertEqual(si['max_header_size'], constraints.MAX_HEADER_SIZE)
self.assertEqual(si['max_meta_overall_size'],
constraints.MAX_META_OVERALL_SIZE)
self.assertEqual(si['account_listing_limit'],
constraints.ACCOUNT_LISTING_LIMIT)
self.assertEqual(si['container_listing_limit'],
constraints.CONTAINER_LISTING_LIMIT)
self.assertEqual(si['max_account_name_length'],
constraints.MAX_ACCOUNT_NAME_LENGTH)
self.assertEqual(si['max_container_name_length'],
constraints.MAX_CONTAINER_NAME_LENGTH)
self.assertEqual(si['max_object_name_length'],
constraints.MAX_OBJECT_NAME_LENGTH)
self.assertIn('strict_cors_mode', si)
self.assertFalse(si['allow_account_management'])
self.assertFalse(si['account_autocreate'])
# This setting is by default excluded by disallowed_sections
self.assertEqual(si['valid_api_versions'],
constraints.VALID_API_VERSIONS)
# this next test is deliberately brittle in order to alert if
# other items are added to swift info
self.assertEqual(len(si), 18)
self.assertIn('policies', si)
sorted_pols = sorted(si['policies'], key=operator.itemgetter('name'))
self.assertEqual(len(sorted_pols), 3)
for policy in sorted_pols:
self.assertNotEqual(policy['name'], 'deprecated')
self.assertEqual(sorted_pols[0]['name'], 'bert')
self.assertEqual(sorted_pols[1]['name'], 'ernie')
self.assertEqual(sorted_pols[2]['name'], 'migrated')
class TestSocketObjectVersions(unittest.TestCase):
def setUp(self):
global _test_sockets
self.prolis = prolis = listen_zero()
self._orig_prolis = _test_sockets[0]
allowed_headers = ', '.join([
'content-encoding',
'x-object-manifest',
'content-disposition',
'foo'
])
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers': allowed_headers}
prosrv = versioned_writes.VersionedWritesMiddleware(
copy.ServerSideCopyMiddleware(
proxy_logging.ProxyLoggingMiddleware(
_test_servers[0], conf,
logger=_test_servers[0].logger), conf),
{})
self.coro = spawn(wsgi.server, prolis, prosrv, NullLogger())
# replace global prosrv with one that's filtered with version
# middleware
self.sockets = list(_test_sockets)
self.sockets[0] = prolis
_test_sockets = tuple(self.sockets)
def tearDown(self):
self.coro.kill()
# put the global state back
global _test_sockets
self.sockets[0] = self._orig_prolis
_test_sockets = tuple(self.sockets)
def test_version_manifest(self, oc='versions', vc='vers', o='name'):
versions_to_create = 3
# Create a container for our versioned object testing
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
pre = quote('%03x' % len(o))
osub = '%s/sub' % o
presub = quote('%03x' % len(osub))
osub = quote(osub)
presub = quote(presub)
oc = quote(oc)
vc = quote(vc)
def put_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\nX-Versions-Location: %s\r\n\r\n'
% (oc, vc))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
headers = put_container()
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def get_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# check that the header was set
headers, body = get_container()
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('X-Versions-Location: %s' % vc, headers)
def put_version_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % vc)
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# make the container for the object versions
headers = put_version_container()
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def put(version):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish%s'
'\r\n\r\n%05d\r\n' % (oc, o, version, version))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
def get(container=oc, obj=o):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n'
'\r\n' % (container, obj))
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# Create the versioned file
headers = put(0)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create the object versions
for version in range(1, versions_to_create):
sleep(.01) # guarantee that the timestamp changes
headers = put(version)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Ensure retrieving the manifest file gets the latest version
headers, body = get()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('Content-Type: text/jibberish%s' % version, headers)
self.assertNotIn('X-Object-Meta-Foo: barbaz', headers)
self.assertEqual(body, '%05d' % version)
def get_version_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n\r\n' % vc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# Ensure we have the right number of versions saved
headers, body = get_version_container()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), versions_to_create - 1)
def delete():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r'
'\nConnection: close\r\nX-Storage-Token: t\r\n\r\n'
% (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
def copy():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('COPY /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: '
't\r\nDestination: %s/copied_name\r\n'
'Content-Length: 0\r\n\r\n' % (oc, o, oc))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# copy a version and make sure the version info is stripped
headers = copy()
exp = 'HTTP/1.1 2' # 2xx series response to the COPY
self.assertEqual(headers[:len(exp)], exp)
def get_copy():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/copied_name HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\n'
'X-Auth-Token: t\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
headers, body = get_copy()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertEqual(body, '%05d' % version)
def post():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('POST /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: '
't\r\nContent-Type: foo/bar\r\nContent-Length: 0\r\n'
'X-Object-Meta-Bar: foo\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# post and make sure it's updated
headers = post()
exp = 'HTTP/1.1 2' # 2xx series response to the POST
self.assertEqual(headers[:len(exp)], exp)
headers, body = get()
self.assertIn('Content-Type: foo/bar', headers)
self.assertIn('X-Object-Meta-Bar: foo', headers)
self.assertEqual(body, '%05d' % version)
# check container listing
headers, body = get_container()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
# Delete the object versions
for segment in range(versions_to_create - 1, 0, -1):
headers = delete()
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
# Ensure retrieving the manifest file gets the latest version
headers, body = get()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('Content-Type: text/jibberish%s' % (segment - 1),
headers)
self.assertEqual(body, '%05d' % (segment - 1))
# Ensure we have the right number of versions saved
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r'
'\n' % (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), segment - 1)
# there is now one version left (in the manifest)
# Ensure we have no saved versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204 No Content'
self.assertEqual(headers[:len(exp)], exp)
# delete the last version
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
# Ensure it's all gone
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
# make sure manifest files are also versioned
for _junk in range(0, versions_to_create):
sleep(.01) # guarantee that the timestamp changes
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 0\r\n'
'Content-Type: text/jibberish0\r\n'
'Foo: barbaz\r\nX-Object-Manifest: %s/%s/\r\n\r\n'
% (oc, o, oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nhost: '
'localhost\r\nconnection: close\r\nx-auth-token: t\r\n\r\n'
% (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200 OK'
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split('\n') if x]
self.assertEqual(versions_to_create - 1, len(versions))
# DELETE v1/a/c/obj shouldn't delete v1/a/c/obj/sub versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\n00000\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\n00001\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\nsub1\r\n' % (oc, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\nsub2\r\n' % (oc, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (vc, presub, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), 1)
# Check for when the versions target container doesn't exist
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\nX-Versions-Location: none\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create the versioned file
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\n\r\n00000\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create another version
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\n\r\n00001\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
# Delete the object
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%swhoops/foo HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx response
self.assertEqual(headers[:len(exp)], exp)
def test_version_manifest_utf8(self):
oc = '0_oc_non_ascii\xc2\xa3'
vc = '0_vc_non_ascii\xc2\xa3'
o = '0_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_container(self):
oc = '1_oc_non_ascii\xc2\xa3'
vc = '1_vc_ascii'
o = '1_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_version_container(self):
oc = '2_oc_ascii'
vc = '2_vc_non_ascii\xc2\xa3'
o = '2_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_containers(self):
oc = '3_oc_non_ascii\xc2\xa3'
vc = '3_vc_non_ascii\xc2\xa3'
o = '3_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_object(self):
oc = '4_oc_ascii'
vc = '4_vc_ascii'
o = '4_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_version_container_utf_object(self):
oc = '5_oc_ascii'
vc = '5_vc_non_ascii\xc2\xa3'
o = '5_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_container_utf_object(self):
oc = '6_oc_non_ascii\xc2\xa3'
vc = '6_vc_ascii'
o = '6_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
if __name__ == '__main__':
unittest.main()
|
notmyname/swift
|
test/unit/proxy/test_server.py
|
Python
|
apache-2.0
| 441,849
|
[
"MOOSE"
] |
875547fbb5def84b817107f26ab42bf5fdf1ed5bcc10f93241ab7303c1ea2585
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This module provides the Plugin class for document generator plugins.
"""
from . import Plugin
from .docgen import TextDoc, DrawDoc
class DocGenPlugin(Plugin):
"""
This class represents a plugin for generating documents from Gramps
"""
def __init__(self, name, description, basedoc,
paper, style, extension, docoptclass, basedocname):
"""
:param name: A friendly name to call this plugin.
Example: "Plain Text"
:type name: string
:param description: A short description of the plugin.
Example: "This plugin will generate text documents in plain text."
:type description: string
:param basedoc: A class that implements the BaseDoc
interface.
:type basedoc: BaseDoc
:param paper: Indicates whether the plugin uses paper or not.
True = use paper; False = do not use paper
:type paper: bool
:param style: Indicates whether the plugin uses styles or not.
True = use styles; False = do not use styles
:type style: bool
:param extension: The extension for the output file.
Example: "txt"
:type extension: str
:param docoptclass: either None or a subclass of DocOptions
:type docoptclass: either None or a DocOptions subclass
:param basedocname: The BaseDoc name of this plugin.
Example: "AsciiDoc"
:type basedocname: string
:return: nothing
"""
Plugin.__init__(self, name, description, basedoc.__module__)
self.__basedoc = basedoc
self.__paper = paper
self.__style = style
self.__extension = extension
self.__docoptclass = docoptclass
self.__basedocname = basedocname
def get_basedoc(self):
"""
Get the :class:`.BaseDoc` class for this plugin.
:return: the :class:`.BaseDoc` class passed into :meth:`__init__`
"""
return self.__basedoc
def get_paper_used(self):
"""
Get the paper flag for this plugin.
:return: bool - True = use paper; False = do not use paper
"""
return self.__paper
def get_style_support(self):
"""
Get the style flag for this plugin.
:return: bool - True = use styles; False = do not use styles
"""
return self.__style
def get_extension(self):
"""
Get the file extension for the output file.
:return: str
"""
return self.__extension
def get_doc_option_class(self):
"""
Get the :class:`.DocOptions` subclass for this plugin, if any
:return: the :class:`.DocOptions` subclass passed into :meth:`__init__`
"""
return self.__docoptclass
def get_basedocname(self):
"""
Get the :class:`.BaseDoc` name for this plugin.
:return: the :class:`.BaseDoc` name passed into :meth:`__init__`
"""
return self.__basedocname
def get_text_support(self):
"""
Check if the plugin supports the :class:`.TextDoc` interface.
:return: bool: True if :class:`.TextDoc` is supported; False if
:class:`.TextDoc` is not supported.
"""
return bool(issubclass(self.__basedoc, TextDoc))
def get_draw_support(self):
"""
Check if the plugin supports the :class:`.DrawDoc` interface.
:return: bool: True if :class:`.DrawDoc` is supported; False if
:class:`.DrawDoc` is not supported.
"""
return bool(issubclass(self.__basedoc, DrawDoc))
|
Nick-Hall/gramps
|
gramps/gen/plug/_docgenplugin.py
|
Python
|
gpl-2.0
| 4,506
|
[
"Brian"
] |
717b2991d65eda42877ce521ee1bec3f047c2ec4fd2948d4d0d019f783a19855
|
#!/usr/bin/env python
# $Id: cleanup.py 2459 2008-11-12 17:31:10Z oliver $
"""Remove files from a staged SGE python script: parses the SGE log
file to ssh into the node and remove the stage dir. Can also parse
some of my older Charmm/SGE log scripts that contain hostname and
scratch dir information.
"""
import sys,os
import sre
def init_pattern(key):
"""Return a SRE compiled pattern; the match can be accessed in the
match object as
m = P[key].match(string)
m.group(key)
"""
return sre.compile('^init\(\): %(key)s: *(?P<%(key)s>.*)$' % locals())
INIT_KEYS = ['hostname','stagedir','JOB_ID','JOB_NAME']
P = {key: init_pattern(key) for key in INIT_KEYS}
Q = {'hostname':sre.compile('^host: *(?P<hostname>.*)$'),
'stagedir':sre.compile('^\+\+ temp_dir=(?P<stagedir>/scratch/oliver/.*)$'),
'WDIR':sre.compile('^WDIR: *(?P<WDIR>.*)$'),
}
# note: the SRE's are matched patterns, ie anchored at beginning of line!!
def scan_log(logfile,P):
STATUS = {'abort':sre.compile('(?P<abort>Abort|abort)'),
}
Vars = {}
StatusVars = {}
log = open(logfile,"r")
print "== %(logfile)s ==" % locals()
for line in log:
l = line.strip()
for key,pattern in P.items():
m = pattern.match(l)
if m:
Vars[key] = m.group(key)
break
for key,pattern in STATUS.items():
m = pattern.search(l)
if m:
StatusVars[key] = m.group(key)
log.close()
return Vars, StatusVars
def cleanup(logfile):
Vars,Status = scan_log(logfile,P)
if len(Vars) == 0:
print "Trying older format"
Vars,Status = scan_log(logfile,Q)
if len(Vars) == 0 and len(Status) == 0:
raise ValueError('No proper tags in '+logfile)
# all data in Var (I hope)
print "Recognized variables: %r" % Vars
print "Status: %r" % Status
try:
# fixing older scripts which had host: instead of hostname:
if 'hostname' not in Vars:
Vars['hostname'] = Vars['host']
# fix dims scripts WDIR
if 'stagedir' not in Vars:
Vars['stagedir'] = Vars['WDIR']
cmd = "ssh %(hostname)s rm -vr %(stagedir)s" % Vars
except KeyError,errmsg:
print "Variable not found (%s)" % errmsg
if 'abort' in Status:
print "Job was aborted, no cleaning up necessary except log file"
print ">>> rm "+logfile
os.unlink(logfile)
return
print "Probably nfs problem with job and it never ran --- will leave log file for inspection"
return
#raise
print ">>> "+cmd
os.system(cmd)
print ">>> rm "+logfile
os.unlink(logfile)
if __name__ == '__main__':
usage = "usage: %s log.oXXXXX ...\n" % sys.argv[0] + '\n' + __doc__
try:
logfile = sys.argv[1]
except IndexError:
raise ValueError("No input file.\n"+usage)
for logfile in sys.argv[1:]:
cleanup(logfile)
|
pslacerda/GromacsWrapper
|
staging/extra/cleanup.py
|
Python
|
gpl-3.0
| 3,023
|
[
"CHARMM"
] |
e278eb1fc59ff0416820cbd87a43e3eca4a7533cefb7140598152ae08f5ff78d
|
"""Implementation of magic functions for interaction with the OS.
Note: this module is named 'osm' instead of 'os' to avoid a collision with the
builtin.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os
import re
import sys
from pprint import pformat
from IPython.core import magic_arguments
from IPython.core import oinspect
from IPython.core import page
from IPython.core.alias import AliasError, Alias
from IPython.core.error import UsageError
from IPython.core.magic import (
Magics, compress_dhist, magics_class, line_magic, cell_magic, line_cell_magic
)
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils.openpy import source_to_unicode
from IPython.utils.process import abbrev_cwd
from IPython.utils.terminal import set_term_title
@magics_class
class OSMagics(Magics):
"""Magics to interact with the underlying OS (shell-type functionality).
"""
@skip_doctest
@line_magic
def alias(self, parameter_s=''):
"""Define an alias for a system command.
'%alias alias_name cmd' defines 'alias_name' as an alias for 'cmd'
Then, typing 'alias_name params' will execute the system command 'cmd
params' (from your underlying operating system).
Aliases have lower precedence than magic functions and Python normal
variables, so if 'foo' is both a Python variable and an alias, the
alias can not be executed until 'del foo' removes the Python variable.
You can use the %l specifier in an alias definition to represent the
whole line when the alias is called. For example::
In [2]: alias bracket echo "Input in brackets: <%l>"
In [3]: bracket hello world
Input in brackets: <hello world>
You can also define aliases with parameters using %s specifiers (one
per parameter)::
In [1]: alias parts echo first %s second %s
In [2]: %parts A B
first A second B
In [3]: %parts A
Incorrect number of arguments: 2 expected.
parts is an alias to: 'echo first %s second %s'
Note that %l and %s are mutually exclusive. You can only use one or
the other in your aliases.
Aliases expand Python variables just like system calls using ! or !!
do: all expressions prefixed with '$' get expanded. For details of
the semantic rules, see PEP-215:
http://www.python.org/peps/pep-0215.html. This is the library used by
IPython for variable expansion. If you want to access a true shell
variable, an extra $ is necessary to prevent its expansion by
IPython::
In [6]: alias show echo
In [7]: PATH='A Python string'
In [8]: show $PATH
A Python string
In [9]: show $$PATH
/usr/local/lf9560/bin:/usr/local/intel/compiler70/ia32/bin:...
You can use the alias facility to access all of $PATH. See the %rehashx
function, which automatically creates aliases for the contents of your
$PATH.
If called with no parameters, %alias prints the current alias table
for your system. For posix systems, the default aliases are 'cat',
'cp', 'mv', 'rm', 'rmdir', and 'mkdir', and other platform-specific
aliases are added. For windows-based systems, the default aliases are
'copy', 'ddir', 'echo', 'ls', 'ldir', 'mkdir', 'ren', and 'rmdir'.
You can see the definition of alias by adding a question mark in the
end::
In [1]: cat?
Repr: <alias cat for 'cat'>"""
par = parameter_s.strip()
if not par:
aliases = sorted(self.shell.alias_manager.aliases)
# stored = self.shell.db.get('stored_aliases', {} )
# for k, v in stored:
# atab.append(k, v[0])
print("Total number of aliases:", len(aliases))
sys.stdout.flush()
return aliases
# Now try to define a new one
try:
alias,cmd = par.split(None, 1)
except TypeError:
print(oinspect.getdoc(self.alias))
return
try:
self.shell.alias_manager.define_alias(alias, cmd)
except AliasError as e:
print(e)
# end magic_alias
@line_magic
def unalias(self, parameter_s=''):
"""Remove an alias"""
aname = parameter_s.strip()
try:
self.shell.alias_manager.undefine_alias(aname)
except ValueError as e:
print(e)
return
stored = self.shell.db.get('stored_aliases', {} )
if aname in stored:
print("Removing %stored alias",aname)
del stored[aname]
self.shell.db['stored_aliases'] = stored
@line_magic
def rehashx(self, parameter_s=''):
"""Update the alias table with all executable files in $PATH.
rehashx explicitly checks that every entry in $PATH is a file
with execute access (os.X_OK).
Under Windows, it checks executability as a match against a
'|'-separated string of extensions, stored in the IPython config
variable win_exec_ext. This defaults to 'exe|com|bat'.
This function also resets the root module cache of module completer,
used on slow filesystems.
"""
from IPython.core.alias import InvalidAliasError
# for the benefit of module completer in ipy_completers.py
del self.shell.db['rootmodules_cache']
path = [os.path.abspath(os.path.expanduser(p)) for p in
os.environ.get('PATH','').split(os.pathsep)]
syscmdlist = []
# Now define isexec in a cross platform manner.
if os.name == 'posix':
isexec = lambda fname:os.path.isfile(fname) and \
os.access(fname,os.X_OK)
else:
try:
winext = os.environ['pathext'].replace(';','|').replace('.','')
except KeyError:
winext = 'exe|com|bat|py'
if 'py' not in winext:
winext += '|py'
execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE)
isexec = lambda fname:os.path.isfile(fname) and execre.match(fname)
savedir = os.getcwd()
# Now walk the paths looking for executables to alias.
try:
# write the whole loop for posix/Windows so we don't have an if in
# the innermost part
if os.name == 'posix':
for pdir in path:
try:
os.chdir(pdir)
dirlist = os.listdir(pdir)
except OSError:
continue
for ff in dirlist:
if isexec(ff):
try:
# Removes dots from the name since ipython
# will assume names with dots to be python.
if not self.shell.alias_manager.is_alias(ff):
self.shell.alias_manager.define_alias(
ff.replace('.',''), ff)
except InvalidAliasError:
pass
else:
syscmdlist.append(ff)
else:
no_alias = Alias.blacklist
for pdir in path:
try:
os.chdir(pdir)
dirlist = os.listdir(pdir)
except OSError:
continue
for ff in dirlist:
base, ext = os.path.splitext(ff)
if isexec(ff) and base.lower() not in no_alias:
if ext.lower() == '.exe':
ff = base
try:
# Removes dots from the name since ipython
# will assume names with dots to be python.
self.shell.alias_manager.define_alias(
base.lower().replace('.',''), ff)
except InvalidAliasError:
pass
syscmdlist.append(ff)
self.shell.db['syscmdlist'] = syscmdlist
finally:
os.chdir(savedir)
@skip_doctest
@line_magic
def pwd(self, parameter_s=''):
"""Return the current working directory path.
Examples
--------
::
In [9]: pwd
Out[9]: '/home/tsuser/sprint/ipython'
"""
try:
return os.getcwd()
except FileNotFoundError:
raise UsageError("CWD no longer exists - please use %cd to change directory.")
@skip_doctest
@line_magic
def cd(self, parameter_s=''):
"""Change the current working directory.
This command automatically maintains an internal list of directories
you visit during your IPython session, in the variable _dh. The
command %dhist shows this history nicely formatted. You can also
do 'cd -<tab>' to see directory history conveniently.
Usage:
cd 'dir': changes to directory 'dir'.
cd -: changes to the last visited directory.
cd -<n>: changes to the n-th directory in the directory history.
cd --foo: change to directory that matches 'foo' in history
cd -b <bookmark_name>: jump to a bookmark set by %bookmark
(note: cd <bookmark_name> is enough if there is no
directory <bookmark_name>, but a bookmark with the name exists.)
'cd -b <tab>' allows you to tab-complete bookmark names.
Options:
-q: quiet. Do not print the working directory after the cd command is
executed. By default IPython's cd command does print this directory,
since the default prompts do not display path information.
Note that !cd doesn't work for this purpose because the shell where
!command runs is immediately discarded after executing 'command'.
Examples
--------
::
In [10]: cd parent/child
/home/tsuser/parent/child
"""
try:
oldcwd = os.getcwd()
except FileNotFoundError:
# Happens if the CWD has been deleted.
oldcwd = None
numcd = re.match(r'(-)(\d+)$',parameter_s)
# jump in directory history by number
if numcd:
nn = int(numcd.group(2))
try:
ps = self.shell.user_ns['_dh'][nn]
except IndexError:
print('The requested directory does not exist in history.')
return
else:
opts = {}
elif parameter_s.startswith('--'):
ps = None
fallback = None
pat = parameter_s[2:]
dh = self.shell.user_ns['_dh']
# first search only by basename (last component)
for ent in reversed(dh):
if pat in os.path.basename(ent) and os.path.isdir(ent):
ps = ent
break
if fallback is None and pat in ent and os.path.isdir(ent):
fallback = ent
# if we have no last part match, pick the first full path match
if ps is None:
ps = fallback
if ps is None:
print("No matching entry in directory history")
return
else:
opts = {}
else:
opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
# jump to previous
if ps == '-':
try:
ps = self.shell.user_ns['_dh'][-2]
except IndexError:
raise UsageError('%cd -: No previous directory to change to.')
# jump to bookmark if needed
else:
if not os.path.isdir(ps) or 'b' in opts:
bkms = self.shell.db.get('bookmarks', {})
if ps in bkms:
target = bkms[ps]
print('(bookmark:%s) -> %s' % (ps, target))
ps = target
else:
if 'b' in opts:
raise UsageError("Bookmark '%s' not found. "
"Use '%%bookmark -l' to see your bookmarks." % ps)
# at this point ps should point to the target dir
if ps:
try:
os.chdir(os.path.expanduser(ps))
if hasattr(self.shell, 'term_title') and self.shell.term_title:
set_term_title(self.shell.term_title_format.format(cwd=abbrev_cwd()))
except OSError:
print(sys.exc_info()[1])
else:
cwd = os.getcwd()
dhist = self.shell.user_ns['_dh']
if oldcwd != cwd:
dhist.append(cwd)
self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
else:
os.chdir(self.shell.home_dir)
if hasattr(self.shell, 'term_title') and self.shell.term_title:
set_term_title(self.shell.term_title_format.format(cwd="~"))
cwd = os.getcwd()
dhist = self.shell.user_ns['_dh']
if oldcwd != cwd:
dhist.append(cwd)
self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
if not 'q' in opts and self.shell.user_ns['_dh']:
print(self.shell.user_ns['_dh'][-1])
@line_magic
def env(self, parameter_s=''):
"""Get, set, or list environment variables.
Usage:\\
%env: lists all environment variables/values
%env var: get value for var
%env var val: set value for var
%env var=val: set value for var
%env var=$val: set value for var, using python expansion if possible
"""
if parameter_s.strip():
split = '=' if '=' in parameter_s else ' '
bits = parameter_s.split(split)
if len(bits) == 1:
key = parameter_s.strip()
if key in os.environ:
return os.environ[key]
else:
err = "Environment does not have key: {0}".format(key)
raise UsageError(err)
if len(bits) > 1:
return self.set_env(parameter_s)
return dict(os.environ)
@line_magic
def set_env(self, parameter_s):
"""Set environment variables. Assumptions are that either "val" is a
name in the user namespace, or val is something that evaluates to a
string.
Usage:\\
%set_env var val: set value for var
%set_env var=val: set value for var
%set_env var=$val: set value for var, using python expansion if possible
"""
split = '=' if '=' in parameter_s else ' '
bits = parameter_s.split(split, 1)
if not parameter_s.strip() or len(bits)<2:
raise UsageError("usage is 'set_env var=val'")
var = bits[0].strip()
val = bits[1].strip()
if re.match(r'.*\s.*', var):
# an environment variable with whitespace is almost certainly
# not what the user intended. what's more likely is the wrong
# split was chosen, ie for "set_env cmd_args A=B", we chose
# '=' for the split and should have chosen ' '. to get around
# this, users should just assign directly to os.environ or use
# standard magic {var} expansion.
err = "refusing to set env var with whitespace: '{0}'"
err = err.format(val)
raise UsageError(err)
os.environ[var] = val
print('env: {0}={1}'.format(var,val))
@line_magic
def pushd(self, parameter_s=''):
"""Place the current dir on stack and change directory.
Usage:\\
%pushd ['dirname']
"""
dir_s = self.shell.dir_stack
tgt = os.path.expanduser(parameter_s)
cwd = os.getcwd().replace(self.shell.home_dir,'~')
if tgt:
self.cd(parameter_s)
dir_s.insert(0,cwd)
return self.shell.magic('dirs')
@line_magic
def popd(self, parameter_s=''):
"""Change to directory popped off the top of the stack.
"""
if not self.shell.dir_stack:
raise UsageError("%popd on empty stack")
top = self.shell.dir_stack.pop(0)
self.cd(top)
print("popd ->",top)
@line_magic
def dirs(self, parameter_s=''):
"""Return the current directory stack."""
return self.shell.dir_stack
@line_magic
def dhist(self, parameter_s=''):
"""Print your history of visited directories.
%dhist -> print full history\\
%dhist n -> print last n entries only\\
%dhist n1 n2 -> print entries between n1 and n2 (n2 not included)\\
This history is automatically maintained by the %cd command, and
always available as the global list variable _dh. You can use %cd -<n>
to go to directory number <n>.
Note that most of time, you should view directory history by entering
cd -<TAB>.
"""
dh = self.shell.user_ns['_dh']
if parameter_s:
try:
args = map(int,parameter_s.split())
except:
self.arg_err(self.dhist)
return
if len(args) == 1:
ini,fin = max(len(dh)-(args[0]),0),len(dh)
elif len(args) == 2:
ini,fin = args
fin = min(fin, len(dh))
else:
self.arg_err(self.dhist)
return
else:
ini,fin = 0,len(dh)
print('Directory history (kept in _dh)')
for i in range(ini, fin):
print("%d: %s" % (i, dh[i]))
@skip_doctest
@line_magic
def sc(self, parameter_s=''):
"""Shell capture - run shell command and capture output (DEPRECATED use !).
DEPRECATED. Suboptimal, retained for backwards compatibility.
You should use the form 'var = !command' instead. Example:
"%sc -l myfiles = ls ~" should now be written as
"myfiles = !ls ~"
myfiles.s, myfiles.l and myfiles.n still apply as documented
below.
--
%sc [options] varname=command
IPython will run the given command using commands.getoutput(), and
will then update the user's interactive namespace with a variable
called varname, containing the value of the call. Your command can
contain shell wildcards, pipes, etc.
The '=' sign in the syntax is mandatory, and the variable name you
supply must follow Python's standard conventions for valid names.
(A special format without variable name exists for internal use)
Options:
-l: list output. Split the output on newlines into a list before
assigning it to the given variable. By default the output is stored
as a single string.
-v: verbose. Print the contents of the variable.
In most cases you should not need to split as a list, because the
returned value is a special type of string which can automatically
provide its contents either as a list (split on newlines) or as a
space-separated string. These are convenient, respectively, either
for sequential processing or to be passed to a shell command.
For example::
# Capture into variable a
In [1]: sc a=ls *py
# a is a string with embedded newlines
In [2]: a
Out[2]: 'setup.py\\nwin32_manual_post_install.py'
# which can be seen as a list:
In [3]: a.l
Out[3]: ['setup.py', 'win32_manual_post_install.py']
# or as a whitespace-separated string:
In [4]: a.s
Out[4]: 'setup.py win32_manual_post_install.py'
# a.s is useful to pass as a single command line:
In [5]: !wc -l $a.s
146 setup.py
130 win32_manual_post_install.py
276 total
# while the list form is useful to loop over:
In [6]: for f in a.l:
...: !wc -l $f
...:
146 setup.py
130 win32_manual_post_install.py
Similarly, the lists returned by the -l option are also special, in
the sense that you can equally invoke the .s attribute on them to
automatically get a whitespace-separated string from their contents::
In [7]: sc -l b=ls *py
In [8]: b
Out[8]: ['setup.py', 'win32_manual_post_install.py']
In [9]: b.s
Out[9]: 'setup.py win32_manual_post_install.py'
In summary, both the lists and strings used for output capture have
the following special attributes::
.l (or .list) : value as list.
.n (or .nlstr): value as newline-separated string.
.s (or .spstr): value as space-separated string.
"""
opts,args = self.parse_options(parameter_s, 'lv')
# Try to get a variable name and command to run
try:
# the variable name must be obtained from the parse_options
# output, which uses shlex.split to strip options out.
var,_ = args.split('=', 1)
var = var.strip()
# But the command has to be extracted from the original input
# parameter_s, not on what parse_options returns, to avoid the
# quote stripping which shlex.split performs on it.
_,cmd = parameter_s.split('=', 1)
except ValueError:
var,cmd = '',''
# If all looks ok, proceed
split = 'l' in opts
out = self.shell.getoutput(cmd, split=split)
if 'v' in opts:
print('%s ==\n%s' % (var, pformat(out)))
if var:
self.shell.user_ns.update({var:out})
else:
return out
@line_cell_magic
def sx(self, line='', cell=None):
"""Shell execute - run shell command and capture output (!! is short-hand).
%sx command
IPython will run the given command using commands.getoutput(), and
return the result formatted as a list (split on '\\n'). Since the
output is _returned_, it will be stored in ipython's regular output
cache Out[N] and in the '_N' automatic variables.
Notes:
1) If an input line begins with '!!', then %sx is automatically
invoked. That is, while::
!ls
causes ipython to simply issue system('ls'), typing::
!!ls
is a shorthand equivalent to::
%sx ls
2) %sx differs from %sc in that %sx automatically splits into a list,
like '%sc -l'. The reason for this is to make it as easy as possible
to process line-oriented shell output via further python commands.
%sc is meant to provide much finer control, but requires more
typing.
3) Just like %sc -l, this is a list with special attributes:
::
.l (or .list) : value as list.
.n (or .nlstr): value as newline-separated string.
.s (or .spstr): value as whitespace-separated string.
This is very useful when trying to use such lists as arguments to
system commands."""
if cell is None:
# line magic
return self.shell.getoutput(line)
else:
opts,args = self.parse_options(line, '', 'out=')
output = self.shell.getoutput(cell)
out_name = opts.get('out', opts.get('o'))
if out_name:
self.shell.user_ns[out_name] = output
else:
return output
system = line_cell_magic('system')(sx)
bang = cell_magic('!')(sx)
@line_magic
def bookmark(self, parameter_s=''):
"""Manage IPython's bookmark system.
%bookmark <name> - set bookmark to current dir
%bookmark <name> <dir> - set bookmark to <dir>
%bookmark -l - list all bookmarks
%bookmark -d <name> - remove bookmark
%bookmark -r - remove all bookmarks
You can later on access a bookmarked folder with::
%cd -b <name>
or simply '%cd <name>' if there is no directory called <name> AND
there is such a bookmark defined.
Your bookmarks persist through IPython sessions, but they are
associated with each profile."""
opts,args = self.parse_options(parameter_s,'drl',mode='list')
if len(args) > 2:
raise UsageError("%bookmark: too many arguments")
bkms = self.shell.db.get('bookmarks',{})
if 'd' in opts:
try:
todel = args[0]
except IndexError:
raise UsageError(
"%bookmark -d: must provide a bookmark to delete")
else:
try:
del bkms[todel]
except KeyError:
raise UsageError(
"%%bookmark -d: Can't delete bookmark '%s'" % todel)
elif 'r' in opts:
bkms = {}
elif 'l' in opts:
bks = sorted(bkms)
if bks:
size = max(map(len, bks))
else:
size = 0
fmt = '%-'+str(size)+'s -> %s'
print('Current bookmarks:')
for bk in bks:
print(fmt % (bk, bkms[bk]))
else:
if not args:
raise UsageError("%bookmark: You must specify the bookmark name")
elif len(args)==1:
bkms[args[0]] = os.getcwd()
elif len(args)==2:
bkms[args[0]] = args[1]
self.shell.db['bookmarks'] = bkms
@line_magic
def pycat(self, parameter_s=''):
"""Show a syntax-highlighted file through a pager.
This magic is similar to the cat utility, but it will assume the file
to be Python source and will show it with syntax highlighting.
This magic command can either take a local filename, an url,
an history range (see %history) or a macro as argument ::
%pycat myscript.py
%pycat 7-27
%pycat myMacro
%pycat http://www.example.com/myscript.py
"""
if not parameter_s:
raise UsageError('Missing filename, URL, input history range, '
'or macro.')
try :
cont = self.shell.find_user_code(parameter_s, skip_encoding_cookie=False)
except (ValueError, IOError):
print("Error: no such file, variable, URL, history range or macro")
return
page.page(self.shell.pycolorize(source_to_unicode(cont)))
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-a', '--append', action='store_true', default=False,
help='Append contents of the cell to an existing file. '
'The file will be created if it does not exist.'
)
@magic_arguments.argument(
'filename', type=str,
help='file to write'
)
@cell_magic
def writefile(self, line, cell):
"""Write the contents of the cell to a file.
The file will be overwritten unless the -a (--append) flag is specified.
"""
args = magic_arguments.parse_argstring(self.writefile, line)
filename = os.path.expanduser(args.filename)
if os.path.exists(filename):
if args.append:
print("Appending to %s" % filename)
else:
print("Overwriting %s" % filename)
else:
print("Writing %s" % filename)
mode = 'a' if args.append else 'w'
with io.open(filename, mode, encoding='utf-8') as f:
f.write(cell)
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/IPython/core/magics/osm.py
|
Python
|
bsd-2-clause
| 28,494
|
[
"VisIt"
] |
d25f7d73a416800a81081bc5a6ad3aac26c2844bb5357d98b69b34aec06d8df2
|
# Copyright (c) RedFantom 2017
# For license see LICENSE
from ttkwidgets import LinkLabel
from tests import BaseWidgetTest
import tkinter as tk
class TestLinkLabel(BaseWidgetTest):
def test_linklabel_init(self):
label = LinkLabel(self.window, link="www.google.com", text="Visit Google")
label.pack()
self.window.update()
def test_linklabel_events(self):
label = LinkLabel(self.window, link="www.google.com", text="Visit Google")
label.pack()
self.window.update()
label._on_enter()
self.window.update()
label._on_leave()
self.window.update()
label.open_link()
self.window.update()
def test_linklabel_config(self):
label = LinkLabel(self.window, link="www.google.com", text="Visit Google")
label.pack()
self.window.update()
label.keys()
self.window.update()
label.configure(link="www.wikipedia.fr")
self.window.update()
label.cget("hover_color")
self.window.update()
value = label["normal_color"]
self.window.update()
label["clicked_color"] = "purple"
self.window.update()
|
RedFantom/ttkwidgets
|
tests/test_linklabel.py
|
Python
|
gpl-3.0
| 1,187
|
[
"VisIt"
] |
63dd51ba75f68fa40cb2370b499e9bb8e52eb26002c04a370b8f6a6bacb02cc2
|
# coding=utf-8
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from feedback.auth import VeranstalterBackend, TakeoverBackend, FSAccountBackend
from feedback.models import Person
from feedback.tests.tools import get_veranstaltung
class VeranstalterBackendTest(TestCase):
def setUp(self):
self.b = VeranstalterBackend()
self.u = User.objects.create_user(username=settings.USERNAME_VERANSTALTER)
self.p = Person.objects.create(vorname='Brian', nachname='Cohen')
self.s, self.v = get_veranstaltung('v')
self.v.access_token = '0123456789abcdef'
self.v.veranstalter.add(self.p)
self.v.save()
def test_authenticate(self):
vid = self.v.id
self.assertFalse(self.b.authenticate(request=None, vid=vid, token=None))
self.assertFalse(self.b.authenticate(request=None, vid=None, token='0123456789abcdef'))
self.assertFalse(self.b.authenticate(request=None, vid=vid, token='000'))
self.assertEqual(self.b.authenticate(request=None, vid=vid, token='0123456789abcdef'), self.u)
self.u.delete()
self.assertFalse(self.b.authenticate(request=None, vid=vid, token='0123456789abcdef'))
class TakeoverBackendTest(TestCase):
def setUp(self):
self.b = TakeoverBackend()
self.ub = User.objects.create_user(username='brian')
self.uj = User.objects.create_user(username='judith')
self.uj.is_superuser = True
def test_authenticate(self):
self.assertFalse(self.b.authenticate(request=None, user=self.ub))
self.assertFalse(self.b.authenticate(request=None, user=self.ub, current_user=self.ub))
self.assertEqual(self.b.authenticate(request=None, user=self.ub, current_user=self.uj), self.ub)
self.assertEqual(self.b.authenticate(request=None, user=self.ub, reset=True), self.ub)
class FSAccountBackendTest(TestCase):
def setUp(self):
self.b = FSAccountBackend()
self.u = User.objects.create_user('brian')
def test_configure_user(self):
self.assertTrue(self.b.configure_user(self.u).is_superuser)
u_db = User.objects.get(username='brian')
self.assertTrue(u_db.is_superuser)
|
d120/pyfeedback
|
src/feedback/tests/test_auth.py
|
Python
|
agpl-3.0
| 2,267
|
[
"Brian"
] |
44419b6a91b8eb77afddf17e1f62d1249ee5c5449eec4f29ae1c2e3e7d0991eb
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
import os
import time
from nikola import filters
# !! This is the configuration of Nikola. !! #
# !! You should edit it to your liking. !! #
# ! Some settings can be different in different languages.
# ! A comment stating (translatable) is used to denote those.
# ! There are two ways to specify a translatable setting:
# ! (a) BLOG_TITLE = "My Blog"
# ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"}
# ! Option (a) is used when you don't want that setting translated.
# ! Option (b) is used for settings that are different in different languages.
# Data about this site
BLOG_AUTHOR = 'Manuel Kaufmann' # (translatable)
BLOG_TITLE = {
'es': 'Argentina en Python',
'en': 'Argentina in Python',
'pt': 'Argentina em Python',
}
# This is the main URL for your site. It will be used
# in a prominent link
SITE_URL = 'https://argentinaenpython.com/'
# This is the URL where Nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "http://argentinaenpython.com/"
BLOG_EMAIL = 'argentinaenpython@openmailbox.org'
BLOG_DESCRIPTION = {
'es': 'Recorre latinoamerica y el mundo compartiendo conocimiento, Python y su filosofía',
'en': 'Travel around latinamerica and the world sharing knowledge, Python and its philosophy',
'pt': 'Explore a América Latina e o mundo, compartilhando conhecimento, Python e sua filosofia',
} # yapf: disable
# Nikola is multilingual!
#
# Currently supported languages are:
#
# en English
# ar Arabic
# az Azerbaijani
# bg Bulgarian
# bs Bosnian
# ca Catalan
# cs Czech [ALTERNATIVELY cz]
# da Danish
# de German
# el Greek [NOT gr]
# eo Esperanto
# es Spanish
# et Estonian
# eu Basque
# fa Persian
# fi Finnish
# fr French
# gl Galician
# he Hebrew
# hi Hindi
# hr Croatian
# hu Hungarian
# id Indonesian
# it Italian
# ja Japanese [NOT jp]
# ko Korean
# lt Lithuanian
# nb Norwegian (Bokmål)
# nl Dutch
# pa Punjabi
# pl Polish
# pt Portuguese
# pt_br Portuguese (Brazil)
# ru Russian
# sk Slovak
# sl Slovene
# sq Albanian
# sr Serbian (Cyrillic)
# sr_latin Serbian (Latin)
# sv Swedish
# te Telugu
# th Thai
# tr Turkish [NOT tr_TR]
# uk Ukrainian
# ur Urdu
# zh_cn Chinese (Simplified)
# zh_tw Chinese (Traditional)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (cf. the modules at nikola/data/themes/base/messages/).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = 'es'
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: '',
# Example for another language:
# "es": "./es",
'en': './en',
'pt': './pt',
}
# What will translated input files be named like?
# If you have a page something.rst, then something.pl.rst will be considered
# its Polish translation.
# (in the above example: path == "something", ext == "rst", lang == "pl")
# this pattern is also used for metadata:
# something.meta -> something.pl.meta
TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}'
# Links for the sidebar / navigation bar. (translatable)
# This is a dict. The keys are languages, and values are tuples.
#
# For regular links:
# ('https://getnikola.com/', 'Nikola Homepage')
#
# For submenus:
# (
# (
# ('https://apple.com/', 'Apple'),
# ('https://orange.com/', 'Orange'),
# ),
# 'Fruits'
# )
#
# WARNING: Support for submenus is theme-dependent.
# Only one level of submenus is supported.
# WARNING: Some themes, including the default Bootstrap 3 theme,
# may present issues if the menu is too large.
# (in Bootstrap, the navbar can grow too large and cover contents.)
# WARNING: If you link to directories, make sure to follow
# ``STRIP_INDEXES``. If it’s set to ``True``, end your links
# with a ``/``, otherwise end them with ``/index.html`` — or
# else they won’t be highlighted when active.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
('/el-equipo/', 'El equipo'),
('/donde-esta-humitos/', '¿Dónde está humitos?'),
(
(
('/eventos/', 'Eventos'),
('/eventos/charlas/', 'Charlas'),
('/galeria/', 'Galería'),
('/eventos/organizar/', 'Organizar'),
),
'Eventos'
),
(
(
('/donaciones/', 'Donaciones'),
('/donaciones/colaboradores/', 'Colaboradores'),
),
'Donaciones',
),
(
(
('/historia/', 'Historia'),
('/historia/etapas/', 'Etapas'),
('/historia/prensa/', 'Prensa'),
),
'Historia'
),
(
(
('/nuestro-zen/', 'Nuestro Zen'),
('/quiero-aprender-python/', 'Quiero aprender Python'),
('/django-girls/tutorial/', 'Tutorial Django Girls'),
('/django-girls/extensiones-tutorial/', 'Extensiones Tutorial Django Girls'),
('https://elblogdehumitos.com/etiquetas/argentina-en-python/', 'Blog'),
('/como-colaborar/', '¿Cómo colaborar?'),
('/material-de-difusion/', 'Material de difusión'),
('/mapear-con-osmtracker/', 'Mapear con OSMTracker'),
('/mapas-de-openstreetmap-para-garmin/', 'Mapas OSM para Garmin'),
('/encuesta/', 'Encuesta'),
# ("/remeras/", "Remeras"),
),
'Extras'
),
('/contacto/', 'Contacto'),
),
'en': (
('/en/the-team/', 'The team'),
('/en/where-is-humitos/', 'Where is humitos?'),
(
(
('/en/events/', 'Events'),
('/eventos/charlas/', 'Talks'),
('/galeria/', 'Gallery'),
),
'Events'
),
(
(
('/en/donations/', 'Donations'),
('/en/donations/collaborators/', 'Collaborators'),
),
'Donations',
),
(
(
('/historia/', 'History'),
('/historia/etapas/', 'Stages'),
('/historia/prensa/', 'Press'),
),
'History'
),
(
(
('/en/our-zen/', 'Our Zen'),
('https://elblogdehumitos.com/etiquetas/argentina-en-python/', 'Blog'),
('/en/como-colaborar/', 'How to contribute?'),
('/en/material-de-difusion/', 'Broadcasting Material'),
('/mapear-con-osmtracker/', 'Mapping with OSMTracker'),
('/mapas-de-openstreetmap-para-garmin/', 'Mapas OSM para Garmin'),
),
'Extras'
),
('/en/contact/', 'Contact'),
),
} # yapf: disable
# Name of the theme to use.
THEME = 'custom'
# Primary color of your theme. This will be used to customize your theme and
# auto-generate related colors in POSTS_SECTION_COLORS. Must be a HEX value.
THEME_COLOR = '#5670d4'
# POSTS and PAGES contains (wildcard, destination, template) tuples.
# (translatable)
#
# The wildcard is used to generate a list of source files
# (whatever/thing.rst, for example).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and optionally translated files (example for Spanish, with code "es"):
# whatever/thing.es.rst and whatever/thing.es.meta
#
# This assumes you use the default TRANSLATIONS_PATTERN.
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combined with the template to produce rendered
# pages, which will be placed at
# output/TRANSLATIONS[lang]/destination/pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
# The page might also be placed in /destination/pagename/index.html
# if PRETTY_URLS are enabled.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds, indexes, tag lists and archives and are considered part
# of a blog, while PAGES are just independent HTML pages.
#
# Finally, note that destination can be translated, i.e. you can
# specify a different translation folder per language. Example:
# PAGES = (
# ("pages/*.rst", {"en": "pages", "de": "seiten"}, "story.tmpl"),
# ("pages/*.md", {"en": "pages", "de": "seiten"}, "story.tmpl"),
# )
POSTS = ()
PAGES = (
('stories/*.rst', '', 'story.tmpl'),
('stories/*.txt', '', 'story.tmpl'),
) # yapf: disable
# Below this point, everything is optional
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# (e.g. 'Europe/Zurich')
# Also, if you want to use a different time zone in some of your posts,
# you can use the ISO 8601/RFC 3339 format (ex. 2012-03-30T23:00:00+02:00)
TIMEZONE = 'America/Argentina/Buenos_Aires'
# If you want to use ISO 8601 (also valid RFC 3339) throughout Nikola
# (especially in new_post), set this to True.
# Note that this does not affect DATE_FORMAT.
# FORCE_ISO8601 = False
# Date format used to display post dates. (translatable)
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# Date format used to display post dates, if local dates are used. (translatable)
# (str used by moment.js)
# JS_DATE_FORMAT = 'YYYY-MM-DD HH:mm'
# Date fanciness.
#
# 0 = using DATE_FORMAT and TIMEZONE
# 1 = using JS_DATE_FORMAT and local user time (via moment.js)
# 2 = using a string like “2 days ago”
#
# Your theme must support it, Bootstrap already does.
DATE_FANCINESS = 2
# While Nikola can select a sensible locale for each language,
# sometimes explicit control can come handy.
# In this file we express locales in the string form that
# python's locales will accept in your OS, by example
# "en_US.utf8" in Unix-like OS, "English_United States" in Windows.
# LOCALES = dict mapping language --> explicit locale for the languages
# in TRANSLATIONS. You can omit one or more keys.
# LOCALE_FALLBACK = locale to use when an explicit locale is unavailable
# LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if
# not set the default Nikola mapping is used.
LOCALES = {
'es': 'es_AR.utf8',
'en': 'en_US.utf8',
}
# LOCALE_FALLBACK = None
# LOCALE_DEFAULT = None
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of {source: relative destination}.
# Default is:
# FILES_FOLDERS = {'files': ''}
# Which means copy 'files' into 'output'
# One or more folders containing code listings to be processed and published on
# the site. The format is a dictionary of {source: relative destination}.
# Default is:
# LISTINGS_FOLDERS = {'listings': 'listings'}
# Which means process listings from 'listings' into 'output/listings'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is Markdown
# 'html' assumes the file is HTML and just copies it
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm'),
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ('.php',),
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ('.rst', '.md', '.txt'),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# Preferred metadata format for new posts
# "Nikola": reST comments, wrapped in a HTML comment if needed (default)
# "YAML": YAML wrapped in "---"
# "TOML": TOML wrapped in "+++"
# "Pelican": Native markdown metadata or reST docinfo fields. Nikola style for other formats.
# METADATA_FORMAT = "Nikola"
# Use date-based path when creating posts?
# Can be enabled on a per-post basis with `nikola new_post -d`.
# The setting is ignored when creating pages.
# NEW_POST_DATE_PATH = False
# What format to use when creating posts with date paths?
# Default is '%Y/%m/%d', other possibilities include '%Y' or '%Y/%m'.
# NEW_POST_DATE_PATH_FORMAT = '%Y/%m/%d'
# If this is set to True, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# If this is set to False, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# SHOW_UNTRANSLATED_POSTS = True
# Nikola supports logo display. If you have one, you can put the URL here.
# Final output is <img src="LOGO_URL" id="logo" alt="BLOG_TITLE">.
# The URL may be relative to the site root.
# LOGO_URL = ''
# If you want to hide the title of your website (for example, if your logo
# already contains the text), set this to False.
# SHOW_BLOG_TITLE = True
# Writes tag cloud data in form of tag_cloud_data.json.
WRITE_TAG_CLOUD = False
# Generate pages for each section. The site must have at least two sections
# for this option to take effect. It wouldn't build for just one section.
POSTS_SECTIONS = True
# Setting this to False generates a list page instead of an index. Indexes
# are the default and will apply GENERATE_ATOM if set.
# POSTS_SECTIONS_ARE_INDEXES = True
# Final locations are:
# output / TRANSLATION[lang] / SECTION_PATH / SECTION_NAME / index.html (list of posts for a section)
# output / TRANSLATION[lang] / SECTION_PATH / SECTION_NAME / RSS_FILENAME_BASE RSS_EXTENSION (RSS feed for a section)
# (translatable)
# SECTION_PATH = ""
# Each post and section page will have an associated color that can be used
# to style them with a recognizable color detail across your site. A color
# is assigned to each section based on shifting the hue of your THEME_COLOR
# at least 7.5 % while leaving the lightness and saturation untouched in the
# HUSL colorspace. You can overwrite colors by assigning them colors in HEX.
# POSTS_SECTION_COLORS = {
# DEFAULT_LANG: {
# 'posts': '#49b11bf',
# 'reviews': '#ffe200',
# },
# }
# Associate a description with a section. For use in meta description on
# section index pages or elsewhere in themes.
# POSTS_SECTION_DESCRIPTIONS = {
# DEFAULT_LANG: {
# 'how-to': 'Learn how-to things properly with these amazing tutorials.',
# },
# }
# Sections are determined by their output directory as set in POSTS by default,
# but can alternatively be determined from file metadata instead.
# POSTS_SECTION_FROM_META = False
# Names are determined from the output directory name automatically or the
# metadata label. Unless overwritten below, names will use title cased and
# hyphens replaced by spaces.
# POSTS_SECTION_NAME = {
# DEFAULT_LANG: {
# 'posts': 'Blog Posts',
# 'uncategorized': 'Odds and Ends',
# },
# }
# Titles for per-section index pages. Can be either one string where "{name}"
# is substituted or the POSTS_SECTION_NAME, or a dict of sections. Note
# that the INDEX_PAGES option is also applied to section page titles.
# POSTS_SECTION_TITLE = {
# DEFAULT_LANG: {
# 'how-to': 'How-to and Tutorials',
# },
# }
# A list of dictionaries specifying sections which translate to each other.
# For example:
# [
# {'en': 'private', 'de': 'Privat'},
# {'en': 'work', 'fr': 'travail', 'de': 'Arbeit'},
# ]
# POSTS_SECTION_TRANSLATIONS = []
# If set to True, a section in a language will be treated as a translation
# of the literally same section in all other languages. Enable this if you
# do not translate sections, for example.
# POSTS_SECTION_TRANSLATIONS_ADD_DEFAULTS = True
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag RSS_EXTENSION (RSS feed for a tag)
# (translatable)
# TAG_PATH = "categories"
# By default, the list of tags is stored in
# output / TRANSLATION[lang] / TAG_PATH / index.html
# (see explanation for TAG_PATH). This location can be changed to
# output / TRANSLATION[lang] / TAGS_INDEX_PATH
# with an arbitrary relative path TAGS_INDEX_PATH.
# (translatable)
# TAGS_INDEX_PATH = "tags.html"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = False
# Set descriptions for tag pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the tag list or index page’s title.
# TAG_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
# }
# Set special titles for tag pages. The default is "Posts about TAG".
# TAG_PAGES_TITLES = {
# DEFAULT_LANG: {
# "blogging": "Meta-posts about blogging",
# "open source": "Posts about open source software"
# },
# }
# If you do not want to display a tag publicly, you can mark it as hidden.
# The tag will not be displayed on the tag list page, the tag cloud and posts.
# Tag pages will still be generated.
HIDDEN_TAGS = ['mathjax']
# Only include tags on the tag list/overview page if there are at least
# TAGLIST_MINIMUM_POSTS number of posts or more with every tag. Every tag
# page is still generated, linked from posts, and included in the sitemap.
# However, more obscure tags can be hidden from the tag index page.
# TAGLIST_MINIMUM_POSTS = 1
# A list of dictionaries specifying tags which translate to each other.
# Format: a list of dicts {language: translation, language2: translation2, …}
# See POSTS_SECTION_TRANSLATIONS example above.
# TAG_TRANSLATIONS = []
# If set to True, a tag in a language will be treated as a translation
# of the literally same tag in all other languages. Enable this if you
# do not translate tags, for example.
# TAG_TRANSLATIONS_ADD_DEFAULTS = True
# Final locations are:
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html (list of categories)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.html (list of posts for a category)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category RSS_EXTENSION (RSS feed for a category)
# (translatable)
# CATEGORY_PATH = "categories"
# CATEGORY_PREFIX = "cat_"
# By default, the list of categories is stored in
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html
# (see explanation for CATEGORY_PATH). This location can be changed to
# output / TRANSLATION[lang] / CATEGORIES_INDEX_PATH
# with an arbitrary relative path CATEGORIES_INDEX_PATH.
# (translatable)
# CATEGORIES_INDEX_PATH = "categories.html"
# If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized in
# hierarchies. For a post, the whole path in the hierarchy must be specified,
# using a forward slash ('/') to separate paths. Use a backslash ('\') to escape
# a forward slash or a backslash (i.e. '\//\\' is a path specifying the
# subcategory called '\' of the top-level category called '/').
CATEGORY_ALLOW_HIERARCHIES = False
# If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to output
# contains only the name of the leaf category and not the whole path.
CATEGORY_OUTPUT_FLAT_HIERARCHY = False
# If CATEGORY_PAGES_ARE_INDEXES is set to True, each category's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# CATEGORY_PAGES_ARE_INDEXES = False
# Set descriptions for category pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the category list or index page’s title.
# CATEGORY_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
# }
# Set special titles for category pages. The default is "Posts about CATEGORY".
# CATEGORY_PAGES_TITLES = {
# DEFAULT_LANG: {
# "blogging": "Meta-posts about blogging",
# "open source": "Posts about open source software"
# },
# }
# If you do not want to display a category publicly, you can mark it as hidden.
# The category will not be displayed on the category list page.
# Category pages will still be generated.
HIDDEN_CATEGORIES = []
# A list of dictionaries specifying categories which translate to each other.
# Format: a list of dicts {language: translation, language2: translation2, …}
# See POSTS_SECTION_TRANSLATIONS example above.
# CATEGORY_TRANSLATIONS = []
# If set to True, a category in a language will be treated as a translation
# of the literally same category in all other languages. Enable this if you
# do not translate categories, for example.
# CATEGORY_TRANSLATIONS_ADD_DEFAULTS = True
# If ENABLE_AUTHOR_PAGES is set to True and there is more than one
# author, author pages are generated.
# ENABLE_AUTHOR_PAGES = True
# Path to author pages. Final locations are:
# output / TRANSLATION[lang] / AUTHOR_PATH / index.html (list of authors)
# output / TRANSLATION[lang] / AUTHOR_PATH / author.html (list of posts by an author)
# output / TRANSLATION[lang] / AUTHOR_PATH / author RSS_EXTENSION (RSS feed for an author)
# (translatable)
# AUTHOR_PATH = "authors"
# If AUTHOR_PAGES_ARE_INDEXES is set to True, each author's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# AUTHOR_PAGES_ARE_INDEXES = False
# Set descriptions for author pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the author list or index page’s title.
# AUTHOR_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "Juanjo Conti": "Python coder and writer.",
# "Roberto Alsina": "Nikola father."
# },
# }
# If you do not want to display an author publicly, you can mark it as hidden.
# The author will not be displayed on the author list page and posts.
# Tag pages will still be generated.
HIDDEN_AUTHORS = ['Guest']
# Final location for the main blog page and sibling paginated pages is
# output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# (translatable)
INDEX_PATH = 'blog'
# Optional HTML that displayed on “main” blog index.html files.
# May be used for a greeting. (translatable)
FRONT_INDEX_HEADER = {
DEFAULT_LANG: ''
}
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
# CREATE_SINGLE_ARCHIVE = False
# Create year, month, and day archives each with a (long) list of posts
# (overrides both CREATE_MONTHLY_ARCHIVE and CREATE_SINGLE_ARCHIVE)
# CREATE_FULL_ARCHIVES = False
# If monthly archives or full archives are created, adds also one archive per day
# CREATE_DAILY_ARCHIVE = False
# Create previous, up, next navigation links for archives
# CREATE_ARCHIVE_NAVIGATION = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / DAY / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# If ARCHIVES_ARE_INDEXES is set to True, each archive page which contains a list
# of posts will contain the posts themselves. If set to False, it will be just a
# list of links.
# ARCHIVES_ARE_INDEXES = False
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
# If USE_BASE_TAG is True, then all HTML files will include
# something like <base href=http://foo.var.com/baz/bat> to help
# the browser resolve relative links.
# Most people don’t need this tag; major websites don’t use it. Use
# only if you know what you’re doing. If this is True, your website
# will not be fully usable by manually opening .html files in your web
# browser (`nikola serve` or `nikola auto` is mandatory). Also, if you
# have mirrors of your site, they will point to SITE_URL everywhere.
USE_BASE_TAG = True
# Extension for RSS feed files
# RSS_EXTENSION = ".xml"
# RSS filename base (without extension); used for indexes, sections and galleries.
# (translatable)
# RSS_FILENAME_BASE = "rss"
# Final location for the blog main RSS feed is:
# output / TRANSLATION[lang] / RSS_PATH / RSS_FILENAME_BASE RSS_EXTENSION
# (translatable)
# RSS_PATH = ""
# Final location for the blog main Atom feed is:
# output / TRANSLATION[lang] / ATOM_PATH / index.atom
# (translatable)
# ATOM_PATH = ""
# Slug the Tag URL. Easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# Slug the Author URL. Easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_AUTHOR_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = [
('donaciones/misiones/index.html', '/donaciones/arreglo-de-vehiculo/'),
('en/donaciones/index.html', '/en/donations/'),
('en/donaciones/medios/index.html', '/en/donations/payment-methods/'),
('en/donaciones/medios/ways/index.html', '/en/donations/payment-methods/'),
('en/donde-esta-humitos/index.html', '/en/where-is-humitos/'),
('en/nuestro-zen/index.html', '/en/our-zen/'),
('en/eventos/index.html', '/en/events/'),
('galeria/track-teen/index.html', '/galeria/track-teen-scipy-la-2015/'),
('workshop-django-girls-cochabamba/index.html', '/django-girls-cochabamba/'),
('django-girls-en-pyconar-2016/index.html', '/django-girls-pyconar-2016/'),
('django-girls-cuenca/index.html', '/django-girls/2017/03/cuenca/'),
('django-girls/2017/02/cuenca/index.html', '/django-girls/2017/03/cuenca/'),
('django-girls/index.html', '/django-girls/tutorial/'),
('django-girls-tutorial-es/index.html', '/django-girls/tutorial/'),
('django-girls-tutorial-es_v2/index.html', '/django-girls/tutorial/'),
] # yapf: disable
# Presets of commands to execute to deploy. Can be anything, for
# example, you may use rsync:
# "rsync -rav --delete output/ joe@my.site:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola plugin -i ping`). Or run `nikola check -l`.
# You may also want to use github_deploy (see below).
# You can define multiple presets and specify them as arguments
# to `nikola deploy`. If no arguments are specified, a preset
# named `default` will be executed. You can use as many presets
# in a `nikola deploy` command as you like.
DEPLOY_COMMANDS = {
'default': [
'python geolocation.py --verbose --symlinks',
# './create-django-girls-tutorial.sh',
# './create-django-girls-tutorial-extensions.sh',
# './create-django-girls-coach-manual.sh',
# './create-django-girls-recursos.sh',
'./deploy-rsync.sh',
],
}
# github_deploy configuration
# For more details, read the manual:
# https://getnikola.com/handbook.html#deploying-to-github
# You will need to configure the deployment branch on GitHub.
GITHUB_SOURCE_BRANCH = 'src'
GITHUB_DEPLOY_BRANCH = 'master'
# The name of the remote where you wish to push to, using github_deploy.
GITHUB_REMOTE_NAME = 'origin'
# Whether or not github_deploy should commit to the source branch automatically
# before deploying.
GITHUB_COMMIT_SOURCE = True
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
CACHE_FOLDER = '.cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, only .php files uses filters to inject PHP into
# Nikola’s templates. All other filters must be enabled through FILTERS.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <https://getnikola.com/handbook.html#post-processing-filters>
#
# Executable for the "yui_compressor" filter (defaults to 'yui-compressor').
# YUI_COMPRESSOR_EXECUTABLE = 'yui-compressor'
# Executable for the "closure_compiler" filter (defaults to 'closure-compiler').
# CLOSURE_COMPILER_EXECUTABLE = 'closure-compiler'
# Executable for the "optipng" filter (defaults to 'optipng').
# OPTIPNG_EXECUTABLE = 'optipng'
# Executable for the "jpegoptim" filter (defaults to 'jpegoptim').
# JPEGOPTIM_EXECUTABLE = 'jpegoptim'
# Executable for the "html_tidy_withconfig", "html_tidy_nowrap",
# "html_tidy_wrap", "html_tidy_wrap_attr" and "html_tidy_mini" filters
# (defaults to 'tidy5').
# HTML_TIDY_EXECUTABLE = 'tidy5'
# List of XPath expressions which should be used for finding headers
# ({hx} is replaced by headers h1 through h6).
# You must change this if you use a custom theme that does not use
# "e-content entry-content" as a class for post and page contents.
# HEADER_PERMALINKS_XPATH_LIST = ['*//div[@class="e-content entry-content"]//{hx}']
# Include *every* header (not recommended):
# HEADER_PERMALINKS_XPATH_LIST = ['*//{hx}']
# File blacklist for header permalinks. Contains output path
# (eg. 'output/index.html')
# HEADER_PERMALINKS_FILE_BLACKLIST = []
def rpl_email(filename):
old_email = BLOG_EMAIL
new_email = BLOG_EMAIL.replace('@', 'ð').replace('.', 'ø')
with open(filename, 'r') as fh:
content = fh.read()
content = content.replace(old_email, new_email)
with open(filename, 'w') as fh:
fh.write(content)
def resize_historia_prensa_images(filename):
abspath = os.path.abspath(filename)
# TODO: improve the path check
if 'historia/prensa' in abspath:
# from doit import tools
# tools.set_trace()
# TODO: use the OUTPUT variable here
_, ext = os.path.splitext(filename)
cmd = "convert -resize 340x -crop 340x255+0 '{}' output/historia/prensa/`basename '{}' {}`-340x255{}"
cmd = cmd.format(abspath, abspath, ext, ext)
os.system(cmd)
# from nikola import filters
FILTERS = {
'.html': [rpl_email, filters.typogrify],
# ".js": [filters.closure_compiler],
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# ".html": ["rpl {old_email} {new_email} %s".format(
# old_email=BLOG_EMAIL,
# new_email=BLOG_EMAIL.replace('@', 'ð').replace('.', 'ø')
# )],
'.png': [resize_historia_prensa_images],
'.jpeg': [resize_historia_prensa_images],
'.jpg': [resize_historia_prensa_images],
}
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.atom', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# #############################################################################
# Image Gallery Options
# #############################################################################
# One or more folders containing galleries. The format is a dictionary of
# {"source": "relative_destination"}, where galleries are looked for in
# "source/" and the results will be located in
# "OUTPUT_PATH/relative_destination/gallery_name"
# Default is:
GALLERY_FOLDERS = {'galleries': 'galeria'}
# More gallery options:
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
USE_FILENAME_AS_TITLE = False
# EXTRA_IMAGE_EXTENSIONS = []
#
# If set to False, it will sort by filename instead. Defaults to True
GALLERY_SORT_BY_DATE = True
# If set to True, EXIF data will be copied when an image is thumbnailed or
# resized.
PRESERVE_EXIF_DATA = True
# If you have enabled PRESERVE_EXIF_DATA, this option lets you choose EXIF
# fields you want to keep in images. (See also PRESERVE_EXIF_DATA)
#
# For a full list of field names, please see here:
# http://www.cipa.jp/std/documents/e/DC-008-2012_E.pdf
#
# This is a dictionary of lists. Each key in the dictionary is the
# name of a IDF, and each list item is a field you want to preserve.
# If you have a IDF with only a '*' item, *EVERY* item in it will be
# preserved. If you don't want to preserve anything in a IDF, remove it
# from the setting. By default, no EXIF information is kept.
# Setting the whitelist to anything other than {} implies
# PRESERVE_EXIF_DATA is set to True
# To preserve ALL EXIF data, set EXIF_WHITELIST to {"*": "*"}
EXIF_WHITELIST = {'*': '*'}
# Some examples of EXIF_WHITELIST settings:
# Basic image information:
# EXIF_WHITELIST['0th'] = [
# "Orientation",
# "XResolution",
# "YResolution",
# ]
# If you want to keep GPS data in the images:
# EXIF_WHITELIST['GPS'] = ["*"]
# Embedded thumbnail information:
# EXIF_WHITELIST['1st'] = ["*"]
# If set to True, any ICC profile will be copied when an image is thumbnailed or
# resized.
# PRESERVE_ICC_PROFILES = False
# Folders containing images to be used in normal posts or pages.
# IMAGE_FOLDERS is a dictionary of the form {"source": "destination"},
# where "source" is the folder containing the images to be published, and
# "destination" is the folder under OUTPUT_PATH containing the images copied
# to the site. Thumbnail images will be created there as well.
# To reference the images in your posts, include a leading slash in the path.
# For example, if IMAGE_FOLDERS = {'images': 'images'}, write
#
# .. image:: /images/tesla.jpg
#
# See the Nikola Handbook for details (in the “Embedding Images” and
# “Thumbnails” sections)
# Images will be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE
# options, but will have to be referenced manually to be visible on the site
# (the thumbnail has ``.thumbnail`` added before the file extension by default,
# but a different naming template can be configured with IMAGE_THUMBNAIL_FORMAT).
IMAGE_FOLDERS = {'images': ''}
IMAGE_THUMBNAIL_SIZE = 580
IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}'
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes.
# INDEXES_PAGES defaults to ' old posts, page %d' or ' page %d' (translated),
# depending on the value of INDEXES_PAGES_MAIN.
#
# (translatable) If the following is empty, defaults to BLOG_TITLE:
# INDEXES_TITLE = ""
#
# (translatable) If the following is empty, defaults to ' [old posts,] page %d' (see above):
# INDEXES_PAGES = ""
#
# If the following is True, INDEXES_PAGES is also displayed on the main (the
# newest) index page (index.html):
INDEXES_PAGES_MAIN = True
#
# If the following is True, index-1.html has the oldest posts, index-2.html the
# second-oldest posts, etc., and index.html has the newest posts. This ensures
# that all posts on index-x.html will forever stay on that page, now matter how
# many new posts are added.
# If False, index-1.html has the second-newest posts, index-2.html the third-newest,
# and index-n.html the oldest posts. When this is active, old posts can be moved
# to other index pages when new posts are added.
# INDEXES_STATIC = True
#
# (translatable) If PRETTY_URLS is set to True, this setting will be used to create
# prettier URLs for index pages, such as page/2/index.html instead of index-2.html.
# Valid values for this settings are:
# * False,
# * a list or tuple, specifying the path to be generated,
# * a dictionary mapping languages to lists or tuples.
# Every list or tuple must consist of strings which are used to combine the path;
# for example:
# ['page', '{number}', '{index_file}']
# The replacements
# {number} --> (logical) page number;
# {old_number} --> the page number inserted into index-n.html before (zero for
# the main page);
# {index_file} --> value of option INDEX_FILE
# are made.
# Note that in case INDEXES_PAGES_MAIN is set to True, a redirection will be created
# for the full URL with the page number of the main page to the normal (shorter) main
# page URL.
# INDEXES_PRETTY_PAGE_URL = False
#
# If the following is true, a page range navigation will be inserted to indices.
# Please note that this will undo the effect of INDEXES_STATIC, as all index pages
# must be recreated whenever the number of pages changes.
# SHOW_INDEX_PAGE_NAVIGATION = False
# If the following is True, a meta name="generator" tag is added to pages. The
# generator tag is used to specify the software used to generate the page
# (it promotes Nikola).
# META_GENERATOR_TAG = True
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored. Leave empty to disable.
# Can be any of:
# algol, algol_nu, autumn, borland, bw, colorful, default, emacs, friendly,
# fruity, igor, lovelace, manni, monokai, murphy, native, paraiso-dark,
# paraiso-light, pastie, perldoc, rrt, tango, trac, vim, vs, xcode
# This list MAY be incomplete since pygments adds styles every now and then.
# Check with list(pygments.styles.get_all_styles()) in an interpreter.
# CODE_COLOR_SCHEME = 'default'
# FAVICONS contains (name, file, size) tuples.
# Used to create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
FAVICONS = {
('icon', '/favicon.ico', '16x9'),
('icon', '/favicon_32.ico', '32x18'),
('icon', '/icon_128.png', '128x71'),
}
# Show teasers (instead of full posts) in indexes? Defaults to False.
INDEX_TEASERS = True
# HTML fragments with the Read more... links.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {reading_time} An estimate of how long it will take to read the post.
# {remaining_reading_time} An estimate of how long it will take to read the post, sans the teaser.
# {min_remaining_read} The string “{remaining_reading_time} min remaining to read” in the current language.
# {paragraph_count} The amount of paragraphs in the post.
# {remaining_paragraph_count} The amount of paragraphs in the post, sans the teaser.
# {post_title} The title of the post.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)
INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# 'Read more...' for the feeds, if FEED_TEASERS is True (translatable)
FEED_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
# Append a URL query to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced
# option used for traffic source tracking.
# Minimum example for use with Piwik: "pk_campaign=feed"
# The following tags exist and are replaced for you:
# {feedRelUri} A relative link to the feed.
# {feedFormat} The name of the syndication format.
# Example using replacement for use with Google Analytics:
# "utm_source={feedRelUri}&utm_medium=nikola_feed&utm_campaign={feedFormat}_feed"
FEED_LINKS_APPEND_QUERY = False
# A HTML fragment describing the license, for the sidebar.
# (translatable)
# LICENSE = ""
# I recommend using the Creative Commons' wizard:
# http://creativecommons.org/choose/
LICENSE = """
<a style="float: right;" rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/">
<img alt="Creative Commons License BY-SA"
style="border-width:0; margin-bottom:12px;"
src="/cc_by_sa-88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = '<small>© 2014-{date} <a href="https://elblogdehumitos.com/">{author}</a> - Powered by <a href="https://getnikola.com" rel="nofollow">Nikola</a> {license}</small>'
# Things that will be passed to CONTENT_FOOTER.format(). This is done
# for translatability, as dicts are not formattable. Nikola will
# intelligently format the setting properly.
# The setting takes a dict. The keys are languages. The values are
# tuples of tuples of positional arguments and dicts of keyword arguments
# to format(). For example, {'en': (('Hello'), {'target': 'World'})}
# results in CONTENT_FOOTER['en'].format('Hello', target='World').
# WARNING: If you do not use multiple languages with CONTENT_FOOTER, this
# still needs to be a dict of this format. (it can be empty if you
# do not need formatting)
# (translatable)
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# A simple copyright tag for inclusion in RSS feeds that works just
# like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS
RSS_COPYRIGHT = 'Contents © {date} <a href="mailto:{email}">{author}</a> {license}'
RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}'
RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS
# To use comments, you can choose between different third party comment
# systems. The following comment systems are supported by Nikola:
# disqus, facebook, intensedebate, isso, livefyre, muut
# You can leave this option blank to disable comments.
COMMENT_SYSTEM = 'disqus'
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = 'argentinaenpython'
# Create index.html for page folders?
# WARNING: if a page would conflict with the index file (usually
# caused by setting slug to `index`), the PAGE_INDEX
# will not be generated for that directory.
# PAGE_INDEX = False
# Enable comments on pages (i.e. not posts)?
# COMMENTS_IN_PAGES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# Use a thumbnail (defined by ".. previewimage:") in list of galleries
# for each gallery
GALLERIES_USE_THUMBNAIL = True
# Image to use as thumbnail for those galleries that doesn't have one
# None: show a grey square
# '/url/to/file': show the image in that url
GALLERIES_DEFAULT_THUMBNAIL = None
# HTML title for /GALLERY_FOLDERS/index.html (translatable)
GALLERY_INDEX_TITLE = 'Galería'
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
STRIP_INDEXES = True
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# List of files relative to the server root (!) that will be asked to be excluded
# from indexing and other robotic spidering. * is supported. Will only be effective
# if SITE_URL points to server root. The list is used to exclude resources from
# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.
ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"]
# Instead of putting files in <slug>.html, put them in <slug>/index.html.
# No web server configuration is required. Also enables STRIP_INDEXES.
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata.
PRETTY_URLS = True
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
DEPLOY_DRAFTS = False
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts (not pages!) by default
# SCHEDULE_ALL = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you want support for the $.$ syntax (which may conflict with running
# text!), just use this config:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ],
# processEscapes: true
# },
# displayAlign: 'center', // Change this to 'left' if you want left-aligned equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Want to use KaTeX instead of MathJax? While KaTeX may not support every
# feature yet, it's faster and the output looks better.
# USE_KATEX = False
# KaTeX auto-render settings. If you want support for the $.$ syntax (wihch may
# conflict with running text!), just use this config:
# KATEX_AUTO_RENDER = """
# delimiters: [
# {left: "$$", right: "$$", display: true},
# {left: "\\\\[", right: "\\\\]", display: true},
# {left: "\\\\begin{equation*}", right: "\\\\end{equation*}", display: true},
# {left: "$", right: "$", display: false},
# {left: "\\\\(", right: "\\\\)", display: false}
# ]
# """
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuration you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'}}
# What Markdown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# Note: most Nikola-specific extensions are done via the Nikola plugin system,
# with the MarkdownExtension class and should not be added here.
# The default is ['fenced_code', 'codehilite']
MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra']
# Options to be passed to markdown extensions (See https://python-markdown.github.io/reference/)
# Default is {} (no config at all)
# MARKDOWN_EXTENSION_CONFIGS = {}
# Extra options to pass to the pandoc command.
# by default, it's empty, is a list of strings, for example
# ['-F', 'pandoc-citeproc', '--bibliography=/Users/foo/references.bib']
# Pandoc does not demote headers by default. To enable this, you can use, for example
# ['--base-header-level=2']
# PANDOC_OPTIONS = []
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty (which is
# the default right now)
# (translatable)
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script src="https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Show link to source for the posts?
SHOW_SOURCELINK = False
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
# COPY_SOURCES = True
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# By default, Nikola generates RSS files for the website and for tags, and
# links to it. Set this to False to disable everything RSS-related.
GENERATE_RSS = False
# By default, Nikola does not generates Atom files for indexes and links to
# them. Generate Atom for tags by setting TAG_PAGES_ARE_INDEXES to True.
# Atom feeds are built based on INDEX_DISPLAY_POST_COUNT and not FEED_LENGTH
# Switch between plain-text summaries and full HTML content using the
# FEED_TEASER option. FEED_LINKS_APPEND_QUERY is also respected. Atom feeds
# are generated even for old indexes and have pagination link relations
# between each other. Old Atom feeds with no changes are marked as archived.
# GENERATE_ATOM = False
# Extension for Atom feed files
# ATOM_EXTENSION = ".atom"
# Only include teasers in Atom and RSS feeds. Disabling include the full
# content. Defaults to True.
# FEED_TEASERS = True
# Strip HTML from Atom and RSS feed summaries and content. Defaults to False.
# FEED_PLAIN = False
# Number of posts in Atom and RSS feeds.
# FEED_LENGTH = 10
# Include preview image as a <figure><img></figure> at the top of the entry.
# Requires FEED_PLAIN = False. If the preview image is found in the content,
# it will not be included again. Image will be included as-is, aim to optmize
# the image source for Feedly, Apple News, Flipboard, and other popular clients.
# FEED_PREVIEWIMAGE = True
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a FeedBurner feed or something else.
# RSS_LINK = None
# A search form to search this site, for the sidebar. You can use a Google
# custom search (https://www.google.com/cse/)
# Or a DuckDuckGo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# (translatable)
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
# SEARCH_FORM = """
# <!-- DuckDuckGo custom search -->
# <form method="get" id="search" action="https://duckduckgo.com/"
# class="navbar-form pull-left">
# <input type="hidden" name="sites" value="%s">
# <input type="hidden" name="k8" value="#444444">
# <input type="hidden" name="k9" value="#D51920">
# <input type="hidden" name="kt" value="h">
# <input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;">
# <input type="submit" value="DuckDuckGo Search" style="visibility: hidden;">
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
#
# If you prefer a Google search form, here's an example that should just work:
# SEARCH_FORM = """
# <!-- Google custom search -->
# <form method="get" action="https://www.google.com/search" class="navbar-form navbar-right" role="search">
# <div class="form-group">
# <input type="text" name="q" class="form-control" placeholder="Search">
# </div>
# <button type="submit" class="btn btn-primary">
# <span class="glyphicon glyphicon-search"></span>
# </button>
# <input type="hidden" name="sitesearch" value="%s">
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
# Use content distribution networks for jQuery, twitter-bootstrap css and js,
# and html5shiv (for older versions of Internet Explorer)
# If this is True, jQuery and html5shiv are served from the Google CDN and
# Bootstrap is served from BootstrapCDN (provided by MaxCDN)
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Check for USE_CDN compatibility.
# If you are using custom themes, have configured the CSS properly and are
# receiving warnings about incompatibility but believe they are incorrect, you
# can set this to False.
# USE_CDN_WARNING = True
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
EXTRA_HEAD_DATA = """
<link rel="stylesheet" href="/assets/css/font-awesome.min.css">
"""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
BODY_END = """
<!-- Piwik -->
<script type="text/javascript">
var _paq = _paq || [];
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="//elblogdehumitos.com/piwik/";
_paq.push(['setTrackerUrl', u+'piwik.php']);
_paq.push(['setSiteId', 3]);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.type='text/javascript'; g.async=true; g.defer=true; g.src=u+'piwik.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<noscript><p><img src="//elblogdehumitos.com/piwik/piwik.php?idsite=3" style="border:0;" alt="" /></p></noscript>
<!-- End Piwik Code -->
<script type="application/javascript">
// de-obfuscate emails
window.onload = function() {
var e = document.getElementsByClassName('reference external');
for (i=0; i < e.length; i++) {
if (e[i].href.indexOf("mailto:") == 0) {
e[i].href = e[i].href.replace("%C3%B0", "@").replace("%C3%B8", ".");
e[i].text = e[i].text.replace(/ð/, "@").replace(/ø/, ".");
}
}
};
</script>
"""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '.*\/(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.rst'
# (Note the '.*\/' in the beginning -- matches source paths relative to conf.py)
# FILE_METADATA_REGEXP = None
# Should titles fetched from file metadata be unslugified (made prettier?)
FILE_METADATA_UNSLUGIFY_TITLES = True
# If enabled, extract metadata from docinfo fields in reST documents
# USE_REST_DOCINFO_METADATA = False
# If enabled, hide docinfo fields in reST document output
# HIDE_REST_DOCINFO = False
# Map metadata from other formats to Nikola names.
# Supported formats: yaml, toml, rest_docinfo, markdown_metadata
# METADATA_MAPPING = {}
#
# Example for Pelican compatibility:
# METADATA_MAPPING = {
# "rest_docinfo": {"summary": "description", "modified": "updated"},
# "markdown_metadata": {"summary": "description", "modified": "updated"}
# }
# Other examples: https://getnikola.com/handbook.html#mapping-metadata-from-other-formats
# Map metadata between types/values. (Runs after METADATA_MAPPING.)
# Supported formats: nikola, yaml, toml, rest_docinfo, markdown_metadata
# The value on the right should be a dict of callables.
# METADATA_VALUE_MAPPING = {}
# Examples:
# METADATA_VALUE_MAPPING = {
# "yaml": {"keywords": lambda value: ', '.join(value)}, # yaml: 'keywords' list -> str
# "nikola": {
# "widgets": lambda value: value.split(', '), # nikola: 'widgets' comma-separated string -> list
# "tags": str.lower # nikola: force lowercase 'tags' (input would be string)
# }
# }
# Additional metadata that is added to a post when creating a new_post
# ADDITIONAL_METADATA = {}
# Nikola supports Open Graph Protocol data for enhancing link sharing and
# discoverability of your site on Facebook, Google+, and other services.
# Open Graph is enabled by default.
# USE_OPEN_GRAPH = True
# Nikola supports Twitter Card summaries, but they are disabled by default.
# They make it possible for you to attach media to Tweets that link
# to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit https://cards-dev.twitter.com/validator
#
# Uncomment and modify to following lines to match your accounts.
# Images displayed come from the `previewimage` meta tag.
# You can specify the card type by using the `card` parameter in TWITTER_CARD.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards
# # 'card': 'summary', # Card type, you can also use 'summary_large_image',
# # see https://dev.twitter.com/cards/types
# # 'site': '@website', # twitter nick for the website
# # 'creator': '@username', # Username for the content creator / author.
# }
# If webassets is installed, bundle JS and CSS into single files to make
# site loading faster in a HTTP/1.1 environment but is not recommended for
# HTTP/2.0 when caching is used. Defaults to True.
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Special settings to disable only parts of the indexes plugin (to allow RSS
# but no blog indexes, or to allow blog indexes and Atom but no site-wide RSS).
# Use with care.
# DISABLE_INDEXES = False
# DISABLE_MAIN_RSS_FEED = False
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# Add the absolute paths to directories containing themes to use them.
# For example, the `v7` directory of your clone of the Nikola themes
# repository.
# EXTRA_THEMES_DIRS = []
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# Enabling hyphenation has been shown to break math support in some cases,
# use with caution.
# HYPHENATE = False
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# Docutils, by default, will perform a transform in your documents
# extracting unique titles at the top of your document and turning
# them into metadata. This surprises a lot of people, and setting
# this option to True will prevent it.
# NO_DOCUTILS_TITLE_TRANSFORM = False
# If you don’t like slugified file names ([a-z0-9] and a literal dash),
# and would prefer to use all the characters your file system allows.
# USE WITH CARE! This is also not guaranteed to be perfect, and may
# sometimes crash Nikola, your web server, or eat your cat.
# USE_SLUGIFY = True
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
# Add functions here and they will be called with template
# GLOBAL_CONTEXT as parameter when the template is about to be
# rendered
GLOBAL_CONTEXT_FILLER = []
# Default options for fotorama_directive plugin
# To see all the options go to: http://fotorama.io
FOTORAMA_OPTIONS = {
'nav': 'thumbs',
'ratio': '16/9',
'keyboard': 'true',
'thumbwidth': 150,
'thumbheight': 150,
'allowfullscreen': 'native',
}
# Default options for nanogallery_directive plugin
# To see all the options go to: http://nanogallery.brisbois.fr/
NANOGALLERY_OPTIONS = {
'theme': 'clean',
'maxitemsperline': 4,
'thumbnailgutterwidth': 10,
'thumbnailgutterheight': 10,
'locationhash': False,
'colorscheme': 'lightBackground',
'thumbnailwidth': 250,
'thumbnailheight': 'auto',
'thumbnailhovereffect': 'imageScale150',
'thumbnaillabel': {
'display': 'false',
},
}
|
humitos/argentinaenpython.com
|
web/conf.py
|
Python
|
gpl-2.0
| 62,554
|
[
"VisIt"
] |
16c9a0ec8fb3b9a35be22c8922dc7711d4dd1eff7cd5bfb2f2755d602c567039
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
class GeneticCodeError(Exception):
"""Base class exception used by the GeneticCode class"""
pass
class GeneticCodeInitError(ValueError, GeneticCodeError):
"""Exception raised by the GeneticCode class upon a bad initialization"""
pass
class InvalidCodonError(KeyError, GeneticCodeError):
"""Exception raised by the GeneticCode class if __getitem__ fails"""
pass
|
Achuth17/scikit-bio
|
skbio/sequence/_exception.py
|
Python
|
bsd-3-clause
| 810
|
[
"scikit-bio"
] |
4eaf2edad5529b66812aefeed1d6e46b1e104bca925b94147c7652effbe8552f
|
""" Module for similarity boosed QSAR project AZ + TUM
author: Tobias Girschick; tobias.girschick@in.tum.de
TUM - I12 (wwwkramer.in.tum.de/girschic)
dependencies:
"""
import os,sys
import orange
from cinfony import rdk
from rdkit import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit.Chem.AtomPairs import Pairs
import AZOrangeConfig as AZOC
from AZutilities import dataUtilities
#import userDefined Utilites if it exists
if os.path.isfile(os.path.join( os.environ["AZORANGEHOME"], "azorange","AZutilities","extraUtilities.py")):
from AZutilities import extraUtilities
methods = { "RDKit Topological" :'rdk_topo_fps',
"RDKit MACCS keys" :'rdk_MACCS_keys',
"RDKit Circular Connectivity" :'rdk_morgan_fps',
"RDKit Circular Feature" :'rdk_morgan_features_fps',
"RDKit Atom Pairs" :'rdk_atompair_fps'
#"AZO-pharmacophore fps" :'azo_pharmacophore_fps'
}
def getSimDescriptors(InActives, InData, methods, active_ids = None, pharmacophore_file = None, callBack = None):
""" calculates similarity descriptors for a training set (orange object) using the
given similarity methods against the given actives
Possible method strings in methods are the names of the sim_* methods below,
e.g. rdk_topo_fps for sim_rdk_topo_fps
callBack function, if defined, will be called on each step sending the pergentage done (0-100):
e.g. callBack(25)
the callBack function shall return True of False which will indicate to this method if the process it to be continued or Not.
e.g. if callBack(25) == False it indicates the caller want's to stop the process of calculating descriptors
"""
# Pre-process input Data tto standardize the SMILES
SMILESattr = getSMILESAttr(InData)
if not SMILESattr:
return None
#TODO: Create a method in dataUtilities to standardize the attribute smilesName in place having the attr origSmiles as ID
if "AZutilities.extraUtilities" in sys.modules and hasattr(extraUtilities, "StandardizeSMILES"):
# Call a method for standardizing the SMILES in Data.
# The method is expected to change the attribute defined as smiAttr in data object
cleanedData = True
# Process InData
tmpDomain = orange.Domain([orange.StringVariable("OrigSMI_ID")]+[attr for attr in InData.domain])
data = orange.ExampleTable(tmpDomain,InData)
# Fill the OrigSMI_ID
for ex in data:
ex["OrigSMI_ID"] = ex[SMILESattr]
extraUtilities.StandardizeSMILES(data, smiAttr = SMILESattr, cName="OrigSMI_ID")
# Process Input actives
activesDomain = orange.Domain([orange.StringVariable("OrigSMI_ID"), orange.StringVariable("SMILES")],0)
activesData = orange.ExampleTable(activesDomain)
for act in InActives:
activesData.append([act,act])
extraUtilities.StandardizeSMILES(activesData, smiAttr = "SMILES", cName="OrigSMI_ID")
#print activesData.domain
actives = []
for ex in activesData:
actives.append(str(ex["SMILES"].value))
else:
data = InData
actives = InActives
cleanedData = False
# adjust the header
atts = []
for m in methods:
count = 1
for a in actives:
attname = m + '(active_'+ str(count)+ ')'
atts.append(orange.FloatVariable(attname))
count += 1
newdomain = orange.Domain(data.domain.attributes + atts, data.domain.classVar)
newdata = orange.ExampleTable(newdomain, data)
att_idx = 0
# if callBack is defined, it will be called with the percentage done, i.e. 0-100
if active_ids:
nTotalSteps = len(newdata) * ( (len(methods)-1) * len(actives) + len(active_ids) )
else:
nTotalSteps = len(methods) * len(actives) * len(newdata)
stepsDone = 0
# fill up the data
for m in methods:
if m == 'rdk_topo_fps':
count = 1
for a in actives:
attname = m + '(active_'+ str(count)+ ')'
for j in range(len(newdata)):
instance = newdata[j]
tmp = orange.Value(atts[att_idx], orng_sim_rdk_topo_fps(a, instance))
instance[atts[att_idx]] = tmp
if callBack:
stepsDone += 1
if not callBack((100*stepsDone)/nTotalSteps): return None
att_idx += 1
elif m == 'rdk_MACCS_keys':
count = 1
for a in actives:
attname = m + '(active_'+ str(count)+ ')'
for j in range(len(newdata)):
instance = newdata[j]
tmp = orange.Value(atts[att_idx], orng_sim_rdk_MACCS_keys(a, instance))
instance[atts[att_idx]] = tmp
if callBack:
stepsDone += 1
if not callBack((100*stepsDone)/nTotalSteps): return None
att_idx += 1
elif m == 'rdk_morgan_fps':
count = 1
for a in actives:
attname = m + '(active_'+ str(count)+ ')'
for j in range(len(newdata)):
instance = newdata[j]
tmp = orange.Value(atts[att_idx], orng_sim_rdk_morgan_fps(a, instance))
instance[atts[att_idx]] = tmp
if callBack:
stepsDone += 1
if not callBack((100*stepsDone)/nTotalSteps): return None
att_idx += 1
elif m == 'rdk_morgan_features_fps':
count = 1
for a in actives:
attname = m + '(active_'+ str(count)+ ')'
for j in range(len(newdata)):
instance = newdata[j]
tmp = orange.Value(atts[att_idx], orng_sim_rdk_morgan_features_fps(a, instance))
instance[atts[att_idx]] = tmp
if callBack:
stepsDone += 1
if not callBack((100*stepsDone)/nTotalSteps): return None
att_idx += 1
elif m == 'rdk_atompair_fps':
count = 1
for a in actives:
attname = m + '(active_'+ str(count)+ ')'
for j in range(len(newdata)):
instance = newdata[j]
tmp = orange.Value(atts[att_idx], orng_sim_rdk_atompair_fps(a, instance))
instance[atts[att_idx]] = tmp
if callBack:
stepsDone += 1
if not callBack((100*stepsDone)/nTotalSteps): return None
att_idx += 1
elif m == 'azo_pharmacophore_fps':
count = 1
for a in active_ids:
attname = m + '(active_'+ str(count)+ ')'
for j in range(len(newdata)):
instance = newdata[j]
tmp = orange.Value(atts[att_idx], azo_pharmacophore_az_inhouse(a, instance, pharmacophore_file))
instance[atts[att_idx]] = tmp
if callBack:
stepsDone += 1
if not callBack((100*stepsDone)/nTotalSteps): return None
att_idx += 1
if cleanedData:
#Remove the fixed SMILES and revert to the Original SMILES
newdata = dataUtilities.attributeDeselectionData(newdata,[SMILESattr])
newdata.domain["OrigSMI_ID"].name = SMILESattr
return newdata
def azo_pharmacophore_az_inhouse(active_id, train_instance, pharmacophore_file):
""" calculate the pharmacophore fingerprint similarity using the AZ inhouse calculated pharmacophore fp
(the fps are read from a text file for first implementation convenience)
input are the smiles string and a orange data instance
returned is a similarty value
"""
cidName = getCIDAttr(train_instance)
if not cidName: return None
train_id = str(int(train_instance[cidName].value))
#print "act"
fp_A = getPharmacophoreFP(active_id, pharmacophore_file)
#print "train " + str(train_id)
fp_T = getPharmacophoreFP(train_id, pharmacophore_file)
if (fp_A == None or fp_T == None):
print "Couldn't calc both FPs"
else:
sim = getContinuousTanimoto(fp_A,fp_T)
return sim
def getPharmacophoreFP(mol_id, pharmacophore_file):
""" extracts the pharmacophore fingerprint in orange fingerprint format from
the AZ in-house pharmacophore text file via a mol id match
"""
pf = open(pharmacophore_file, 'r')
fp_vals = {}
for line in pf:
splitlist = str(line.strip()).split(" ")
# remove CID, smiles string and bit count
cid = splitlist.pop(0)
splitlist.pop(0)
if (cid.strip() == mol_id.strip()):
#print "mol found"
splitlist.pop(0)
for bit in range(len(splitlist)):
if bit % 2 == 0.0:
fp_vals[splitlist[bit]] = splitlist[bit+1]
break
#print fp_vals
pf.close()
return fp_vals
def getContinuousTanimoto(fp_A, fp_B):
""" calculate the Tanimoto coefficient for countinuous valued fingerprints
according to:
sim(a,b) = sum_i^N x_a*x_b / sum x_a^2 + sum x_b^2 - sum x_a*x_b
fp_A and fp_B are dictionaries with key = bit number and value = bit value
if the bit is set to 0 no key value pair is assumed to be set
"""
sum_b = 0.0
sum_a = 0.0
sum_c = 0.0
for bit,value in fp_A.iteritems():
sum_a = sum_a + int(value)**2
for bit_b,value_b in fp_B.iteritems():
sum_b = sum_b + int(value_b)**2
if (bit_b in fp_A):
sum_c = sum_c + (int(value_b) * int(fp_A[bit_b]))
sim = sum_c / (sum_a + sum_b - sum_c)
#print "A: " + str(sum_a) + " B: " + str(sum_b) + " C: " + str(sum_c) + " SIM: " + str(sim)
return sim
def orng_sim_rdk_topo_fps(smile_active, train_instance):
""" calculate the fingerprint similarity using the RDK topological fingerprints
(The fingerprinting algorithm used is similar to that used in the Daylight fingerprinter)
input are a smiles string and a orange data instance
returned is a similarity value
"""
smilesName = getSMILESAttr(train_instance)
if not smilesName: return None
smile_train = str(train_instance[smilesName].value)
molAct = getMolFromSmiles(smile_active)
molTrain = getMolFromSmiles(smile_train)
if not molAct: return None
if not molTrain: return None
fp_A = FingerprintMols.FingerprintMol(molAct)
fp_T = FingerprintMols.FingerprintMol(molTrain)
sim = DataStructs.FingerprintSimilarity(fp_A,fp_T)
return sim
def orng_sim_rdk_MACCS_keys(smile_active, train_instance):
""" calculate the fingerprint similarity using the RDK MACCS keys
(SMARTS-based implementation of the 166 public MACCS keys)
input are a smiles string and a orange data instance
returned is a similaritie value
"""
smilesName = getSMILESAttr(train_instance)
if not smilesName: return None
smile_train = str(train_instance[smilesName].value)
molAct = getMolFromSmiles(smile_active)
molTrain = getMolFromSmiles(smile_train)
if not molAct: return None
if not molTrain: return None
fp_A = rdk.Chem.MACCSkeys.GenMACCSKeys(molAct)
fp_T = rdk.Chem.MACCSkeys.GenMACCSKeys(molTrain)
sim = DataStructs.FingerprintSimilarity(fp_A,fp_T)
return sim
def orng_sim_rdk_morgan_fps(smile_active, train_instance):
""" calculate the fingerprint similarity using the RDK morgan fingerprints
(circular fingerprints, ECFP, connectivity-based invariant)
input are a smiles string and a orange data instance
returned is a similaritie value
"""
smilesName = getSMILESAttr(train_instance)
if not smilesName: return None
smile_train = str(train_instance[smilesName].value)
molAct = getMolFromSmiles(smile_active)
molTrain = getMolFromSmiles(smile_train)
if not molAct: return None
if not molTrain: return None
fp_A = rdk.AllChem.GetMorganFingerprint(molAct,2)
fp_T = rdk.AllChem.GetMorganFingerprint(molTrain,2)
sim = DataStructs.DiceSimilarity(fp_A,fp_T)
return sim
def orng_sim_rdk_morgan_features_fps(smile_active, train_instance):
""" calculate the fingerprint similarity using the RDK morgan fingerprints
(circular fingerprints, FCFP, feature-based invariant)
input are a smiles string and a orange data instance
returned is a similaritie value
"""
smilesName = getSMILESAttr(train_instance)
if not smilesName: return None
smile_train = str(train_instance[smilesName].value)
molAct = getMolFromSmiles(smile_active)
molTrain = getMolFromSmiles(smile_train)
if not molAct: return None
if not molTrain: return None
fp_A = rdk.AllChem.GetMorganFingerprint(molAct,2,useFeatures=True)
fp_T = rdk.AllChem.GetMorganFingerprint(molTrain,2,useFeatures=True)
sim = DataStructs.DiceSimilarity(fp_A,fp_T)
return sim
def orng_sim_rdk_atompair_fps(smile_active, train_instance):
""" calculate the fingerprint similarity using the RDK atom pair fingerprints
input are a smiles string and a orange data instance
returned is a similaritie value
"""
smilesName = getSMILESAttr(train_instance)
if not smilesName: return None
smile_train = str(train_instance[smilesName].value)
molAct = getMolFromSmiles(smile_active)
molTrain = getMolFromSmiles(smile_train)
if not molAct: return None
if not molTrain: return None
fp_A = Pairs.GetAtomPairFingerprint(molAct)
fp_T = Pairs.GetAtomPairFingerprint(molTrain)
sim = DataStructs.DiceSimilarity(fp_A,fp_T)
return sim
def get_similarity_matrix(actives, trainset, methods):
""" calculates similarity descriptors for a training set (list of smiles) using the
given similarity methods against the given actives
Possible method strings in methods are the names of the sim_* methods below,
e.g. rdk_topo_fps for sim_rdk_topo_fps
"""
sim_matrix = []
for m in methods:
if m == 'rdk_topo_fps':
for a in actives:
sim_matrix.append(sim_rdk_topo_fps(a, trainset))
elif m == 'rdk_MACCS_keys':
for a in actives:
sim_matrix.append(sim_rdk_MACCS_keys(a, trainset))
elif m == 'rdk_morgan_fps':
for a in actives:
sim_matrix.append(sim_rdk_morgan_fps(a, trainset))
elif m == 'rdk_atompair_fps':
for a in actives:
sim_matrix.append(sim_rdk_atompair_fps(a, trainset))
return sim_matrix
def sim_rdk_topo_fps(smiA, smisT):
""" calculate the fingerprint similarity using the RDK atompair fingerprints
input are a smiles string and a list of smiles strings
returned is a list of similarities
"""
fp_A = Pairs.GetAtomPairFingerprint(getMolFromSmiles(smiA))
fps_T = [Pairs.GetAtomPairFingerprint(getMolFromSmiles(y)) for y in smisT]
sim_vector = []
for t in fps_T:
sim_vector.append(DataStructs.DiceSimilarity(fp_A,t))
return sim_vector
def sim_rdk_topo_fps(smiA, smisT):
""" calculate the fingerprint similarity using the RDK topological fingerprints
(The fingerprinting algorithm used is similar to that used in the Daylight fingerprinter)
input are a smiles string and a list of smiles strings
returned is a list of similarities
"""
fp_A = FingerprintMols.FingerprintMol(getMolFromSmiles(smiA))
fps_T = [FingerprintMols.FingerprintMol(getMolFromSmiles(y)) for y in smisT]
sim_vector = []
for t in fps_T:
sim_vector.append(DataStructs.FingerprintSimilarity(fp_A,t))
return sim_vector
def sim_rdk_MACCS_keys(smiA, smisT):
""" calculate the fingerprint similarity using the RDK MACCS keys
(SMARTS-based implementation of the 166 public MACCS keys)
input are a smiles string and a list of smiles strings
returned is a list of similarities
"""
fp_A = rdk.Chem.MACCSkeys.GenMACCSKeys(getMolFromSmiles(smiA))
fps_T = [rdk.Chem.MACCSkeys.GenMACCSKeys(getMolFromSmiles(y)) for y in smisT]
sim_vector = []
for t in fps_T:
sim_vector.append(DataStructs.FingerprintSimilarity(fp_A,t))
return sim_vector
def sim_rdk_morgan_fps(smiA, smisT):
""" calculate the fingerprint similarity using the RDK morgan fingerprints
(circular fingerprints)
input are a smiles string and a list of smiles strings
returned is a list of similarities
"""
fp_A = rdk.AllChem.GetMorganFingerprint(getMolFromSmiles(smiA),2)
fps_T = [rdk.AllChem.GetMorganFingerprint(getMolFromSmiles(y),2) for y in smisT]
sim_vector = []
for t in fps_T:
sim_vector.append(DataStructs.DiceSimilarity(fp_A,t))
return sim_vector
def getCIDAttr(data):
cidName = None
# "PUBCHEM_CID"
for attr in [a.name for a in data.domain] + [a.name for a in data.domain.getmetas().values()]:
if attr in ['"PUBCHEM_CID"',"PUBCHEM_CID", "CID", '"CID"']:
cidName = attr
if not cidName:
print "Warning: The data set does not contain any known compound identifier"
print "No pharmacophoric descriptors added!"
return None
else:
return cidName
def getSMILESAttr(data):
# Check that the data contains a SMILES attribute
smilesName = dataUtilities.getSMILESAttr(data)
if not smilesName:
print "Warning: The data set does not contain any known smiles attribute!"
print "No similarity descriptors added!"
return None
else:
return smilesName
def getMolFromSmiles(SMILES):
""" Create Chem-Mol from SMILES being more forgiven with SMILES standards"""
# TODO: Should be used a clean tool to standerdize the SMILES
# 1) Try the usual way by setting sanitize flag
chemMol = rdk.Chem.MolFromSmiles(SMILES,True)
# 2) Try to unset the sanitize flag. It often helps
if not chemMol:
chemMol = rdk.Chem.MolFromSmiles(SMILES,False)
return chemMol
|
JonnaStalring/AZOrange
|
azorange/AZutilities/SimBoostedQSAR.py
|
Python
|
lgpl-3.0
| 22,063
|
[
"RDKit"
] |
0c26a7f71f71cba8899c7c9f1c511c160d7e372ab7e8891da98495610a4fd335
|
# Copyright 2015 ClusterHQ. See LICENSE file for details.
'''
Python wrappers for **libzfs_core** library.
*libzfs_core* is intended to be a stable, committed interface for programmatic
administration of ZFS.
This wrapper provides one-to-one wrappers for libzfs_core API functions,
but the signatures and types are more natural to Python.
nvlists are wrapped as dictionaries or lists depending on their usage.
Some parameters have default values depending on typical use for
increased convenience.
Output parameters are not used and return values are directly returned.
Enumerations and bit flags become strings and lists of strings in Python.
Errors are reported as exceptions rather than integer errno-style
error codes. The wrapper takes care to provide one-to-many mapping
of the error codes to the exceptions by interpreting a context
in which the error code is produced.
To submit an issue or contribute to development of this package
please visit its `GitHub repository <https://github.com/ClusterHQ/pyzfs>`_.
.. data:: MAXNAMELEN
Maximum length of any ZFS name.
'''
from ._constants import (
MAXNAMELEN,
)
from ._libzfs_core import (
lzc_create,
lzc_clone,
lzc_rollback,
lzc_snapshot,
lzc_snap,
lzc_destroy_snaps,
lzc_bookmark,
lzc_get_bookmarks,
lzc_destroy_bookmarks,
lzc_snaprange_space,
lzc_hold,
lzc_release,
lzc_get_holds,
lzc_send,
lzc_send_space,
lzc_receive,
lzc_recv,
lzc_exists,
is_supported,
lzc_promote,
lzc_rename,
lzc_destroy,
lzc_inherit_prop,
lzc_set_prop,
lzc_get_props,
lzc_list_children,
lzc_list_snaps,
)
__all__ = [
'ctypes',
'exceptions',
'MAXNAMELEN',
'lzc_create',
'lzc_clone',
'lzc_rollback',
'lzc_snapshot',
'lzc_snap',
'lzc_destroy_snaps',
'lzc_bookmark',
'lzc_get_bookmarks',
'lzc_destroy_bookmarks',
'lzc_snaprange_space',
'lzc_hold',
'lzc_release',
'lzc_get_holds',
'lzc_send',
'lzc_send_space',
'lzc_receive',
'lzc_recv',
'lzc_exists',
'is_supported',
'lzc_promote',
'lzc_rename',
'lzc_destroy',
'lzc_inherit_prop',
'lzc_set_prop',
'lzc_get_props',
'lzc_list_children',
'lzc_list_snaps',
]
# vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4
|
ClusterHQ/pyzfs
|
libzfs_core/__init__.py
|
Python
|
apache-2.0
| 2,329
|
[
"VisIt"
] |
3d7c3da7ee8f55ef2f2a0682e991457d4248fbb614752be7ecf6ca5fc32854bc
|
# -*- coding: utf-8 -*-
"""Create pRF time courses models."""
# Part of py_pRF_mapping library
# Copyright (C) 2016 Ingo Marquardt
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import multiprocessing as mp
import h5py
from pyprf.analysis.model_creation_timecourses_par import prf_par
def crt_prf_tcmdl(aryPixConv, strPathMdl, tplVslSpcSze=(200, 200), varNumX=40,
varNumY=40, varExtXmin=-5.19, varExtXmax=5.19,
varExtYmin=-5.19, varExtYmax=5.19, varPrfStdMin=0.1,
varPrfStdMax=7.0, varNumPrfSizes=40, varPar=10):
"""
Create pRF time courses models.
Parameters
----------
aryPixConv : np.array
4D numpy array containing the pixel-wise, HRF-convolved design matrix,
with the following structure: `aryPixConv[aryPixConv[x-pixels,
y-pixels, conditions, volumes]`.
strPathMdl : str
Filepath of pRF time course models (including file name, but without
file extension). If `strPathMdl` is not `None`, model time courses are
saved to disk in hdf5 format during model creation in order to avoid
out of memory problems.
tplVslSpcSze : tuple
Pixel size of visual space model in which the pRF models are created
(x- and y-dimension).
varNumX : int
Number of x-positions in the visual space to model.
varNumY : int
Number of y-positions in the visual space to model.
varExtXmin : float
Extent of visual space from centre of the screen in negative
x-direction (i.e. from the fixation point to the left end of the
screen) in degrees of visual angle.
varExtXmax : float
Extent of visual space from centre of the screen in positive
x-direction (i.e. from the fixation point to the right end of the
screen) in degrees of visual angle.
varExtYmin : float
Extent of visual space from centre of the screen in negative
y-direction (i.e. from the fixation point to the lower end of the
screen) in degrees of visual angle.
varExtYmax : float
Extent of visual space from centre of the screen in positive
y-direction (i.e. from the fixation point to the upper end of the
screen) in degrees of visual angle.
varPrfStdMin : flaot
Minimum pRF model size (standard deviation of 2D Gaussian) in degrees
of visual angle.
varPrfStdMax : flaot
Maximum pRF model size (standard deviation of 2D Gaussian) in degrees
of visual angle.
varNumPrfSizes : int
Number of pRF sizes to model.
varPar : int
Number of processes to run in parallel (multiprocessing).
Returns
-------
aryPrfTc5D : np.array
5D numpy array with pRF time course models, with following dimensions:
`aryPrfTc5D[x-position, y-position, SD, condition, volume]`.
Notes
-----
This function creates the pRF time course models, from which the best-
fitting model for each voxel will be selected.
"""
# Number of conditions:
varNumCon = aryPixConv.shape[2]
# Number of volumes:
varNumVol = aryPixConv.shape[3]
# Only fit pRF models if dimensions of pRF time course models are
# correct.
strErrMsg = ('Aspect ratio of visual space models does not agree with'
+ ' specified number of pRFs to model.')
lgcAssert = ((float(tplVslSpcSze[0]) / float(varNumX))
== (float(tplVslSpcSze[1]) / float(varNumY)))
assert lgcAssert, strErrMsg
# Calculate the scaling factor from degrees of visual angle to pixels in
# the upsampled visual space separately for the x- and the y-directions
# (the two should be the same).
varDgr2PixUpX = float(tplVslSpcSze[0]) / float(varExtXmax - varExtXmin)
varDgr2PixUpY = float(tplVslSpcSze[1]) / float(varExtYmax - varExtYmin)
# The factor relating pixels in the upsampled visual space to degrees of
# visual angle should be roughly the same (allowing for some rounding error
# if the visual stimulus was not square).
strErrMsg = ('The ratio of X and Y dimensions in stimulus space (in '
+ 'degrees of visual angle) and the ratio of X and Y '
+ 'dimensions in the upsampled visual space do not agree.')
lgcAssert = (np.absolute((varDgr2PixUpX - varDgr2PixUpY)) < 0.5)
assert lgcAssert, strErrMsg
# Vector with the x-indicies of the positions in the super-sampled visual
# space at which to create pRF models.
vecX = np.linspace(0,
(tplVslSpcSze[0] - 1),
varNumX,
endpoint=True,
dtype=np.float32)
# Vector with the y-indicies of the positions in the super-sampled visual
# space at which to create pRF models.
vecY = np.linspace(0,
(tplVslSpcSze[1] - 1),
varNumY,
endpoint=True,
dtype=np.float32)
# Vector with pRF sizes to be modelled (still in degree of visual angle):
vecPrfSd = np.linspace(varPrfStdMin,
varPrfStdMax,
varNumPrfSizes,
endpoint=True,
dtype=np.float32)
# We multiply the vector with the pRF sizes to be modelled with the scaling
# factor (for the x-dimensions - as we have just found out, the scaling
# factors for the x- and y-direction are identical, except for rounding
# error). Now the vector with the pRF sizes to be modelled is can directly
# be used for the creation of Gaussian pRF models in upsampled visual
# space.
vecPrfSd = np.multiply(vecPrfSd, varDgr2PixUpX, dtype=np.float32)
# Number of pRF models to be created (i.e. number of possible combinations
# of x-position, y-position, and standard deviation):
varNumMdls = varNumX * varNumY * varNumPrfSizes
# Array for the x-position, y-position, and standard deviations for which
# pRF model time courses are going to be created, where the columns
# correspond to: (0) an index starting from zero, (1) the x-position, (2)
# the y-position, and (3) the standard deviation. The parameters are in
# units of the upsampled visual space.
aryMdlParams = np.zeros((varNumMdls, 4), dtype=np.float32)
# Counter for parameter array:
varCntMdlPrms = 0
# In hdf5-mode (i.e. parameter space too large for RAM), we need an array
# for sorting the hdf5 files.
if not(strPathMdl is None):
# Array for sorting pRF time courses into large hdf5 file, shape:
# arySort[models, 3], where the three columns correspond to indices of
# (1) x position, (2) y position, (3) pRF size (SD). This array can be
# used to look up model parameters based on model index (i.e. positions
# and size of n-th model). Whereas `aryMdlParams` contains the actual
# parameters (e.g. x-position in coordinates of visual space model),
# `arySort` contains the indices for the pRF time course array (e.g.
# model 1234 has index idxX in pRF model time course array).
arySort = np.zeros((varNumMdls, 3), dtype=np.uint32)
# Put all combinations of x-position, y-position, and standard deviations
# into the array:
# Loop through x-positions:
for idxX in range(varNumX):
# Loop through y-positions:
for idxY in range(varNumY):
# Loop through standard deviations (of Gaussian pRF models):
for idxSd in range(varNumPrfSizes):
# Place index and parameters in array:
aryMdlParams[varCntMdlPrms, 0] = float(varCntMdlPrms)
aryMdlParams[varCntMdlPrms, 1] = vecX[idxX]
aryMdlParams[varCntMdlPrms, 2] = vecY[idxY]
aryMdlParams[varCntMdlPrms, 3] = vecPrfSd[idxSd]
# Put position & size indices into array for hdf5 lookup.
if not(strPathMdl is None):
arySort[varCntMdlPrms, 0] = idxX
arySort[varCntMdlPrms, 1] = idxY
arySort[varCntMdlPrms, 2] = idxSd
# Increment parameter index:
varCntMdlPrms = varCntMdlPrms + 1
# The long array with all the combinations of model parameters is put into
# separate chunks for parallelisation, using a list of arrays.
lstMdlParams = [None] * varPar
# Vector with the indicies at which the functional data will be separated
# in order to be chunked up for the parallel processes:
vecIdxChnks = np.linspace(0,
varNumMdls,
num=varPar,
endpoint=False)
vecIdxChnks = np.hstack((vecIdxChnks, varNumMdls))
# Put model parameters into chunks:
for idxChnk in range(0, varPar):
# Index of first combination of model parameters to be included in
# current chunk:
varTmpChnkSrt = int(vecIdxChnks[idxChnk])
# Index of last combination of model parameters to be included in
# current chunk:
varTmpChnkEnd = int(vecIdxChnks[(idxChnk+1)])
# Put voxel array into list:
lstMdlParams[idxChnk] = aryMdlParams[varTmpChnkSrt:varTmpChnkEnd, :]
# Empty list for results from parallel processes (for pRF model time course
# results):
lstOut = [None] * varPar
# Empty list for processes:
lstPrcs = [None] * varPar
# Create a queue to put the results in:
queOut = mp.Queue()
# Make sure datatype of pixeltimecourses is float32:
aryPixConv = aryPixConv.astype(np.float32)
# Create processes:
for idxPrc in range(varPar):
lstPrcs[idxPrc] = mp.Process(target=prf_par,
args=(idxPrc,
lstMdlParams[idxPrc],
tplVslSpcSze,
aryPixConv,
strPathMdl,
queOut)
)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True
# Start processes:
for idxPrc in range(varPar):
lstPrcs[idxPrc].start()
# Collect results from queue:
for idxPrc in range(varPar):
lstOut[idxPrc] = queOut.get(True)
# Join processes:
for idxPrc in range(varPar):
lstPrcs[idxPrc].join()
# lstOut:
# idxPrc : int
# Process ID.
# vecMdlIdx : np.array
# 1D numpy array with model indices (for sorting of models after
# parallel function. Shape: vecMdlIdx[varNumMdls].
# aryPrfTc : np.array or None
# 3D numpy array with pRF model time courses, shape:
# aryPrfTc[varNumMdls, varNumCon, varNumVol]. `None` in case of
# large parameter space (pRF time courses are saved to hdf5 file
# instead).
# Combine model time courses from parallel processes.
lstMdlIdx = [None] * varPar
lstPrfTc = [None] * varPar
# Get vectors with model indicies (vecMdlIdx) and pRF model time courses
# from parallel output list.
for idxPrc in range(varPar):
varPrcId = lstOut[idxPrc][0]
lstMdlIdx[varPrcId] = lstOut[idxPrc][1]
lstPrfTc[varPrcId] = lstOut[idxPrc][2]
# In case of small parameter space, sort pRF time courses and return them
# to partent function.
if (strPathMdl is None):
# List to array, concatenating along model-index-dimension:
vecMdlIdx = np.concatenate(lstMdlIdx, axis=0)
aryPrfTc = np.concatenate(lstPrfTc, axis=0)
# Clean up:
del(aryMdlParams)
del(lstMdlParams)
del(lstPrfTc)
del(lstMdlIdx)
del(lstOut)
# Sort output along the first column (which contains the indicies), so
# that the output is in the same order as the list of combination of
# model parameters which we created before the parallelisation:
aryPrfTc = aryPrfTc[np.argsort(vecMdlIdx, axis=0), :, :]
# Array representing the low-resolution visual space, of the form
# aryPrfTc[x-position, y-position, pRF-size, varNumCon, varNumVol],
# which will hold the pRF model time courses.
aryPrfTc5D = np.zeros([varNumX,
varNumY,
varNumPrfSizes,
varNumCon,
varNumVol],
dtype=np.float32)
# We use the same loop structure for organising the pRF model time
# courses that we used for creating the parameter array. Counter:
varCntMdlPrms = 0
# Put all combinations of x-position, y-position, and standard
# deviations into the array:
# Loop through x-positions:
for idxX in range(varNumX):
# Loop through y-positions:
for idxY in range(varNumY):
# Loop through standard deviations (of Gaussian pRF models):
for idxSd in range(varNumPrfSizes):
# Put the pRF model time course into its correct position
# in the 5D array:
aryPrfTc5D[idxX, idxY, idxSd, :, :] = \
aryPrfTc[varCntMdlPrms, :, :]
# Increment parameter index:
varCntMdlPrms = varCntMdlPrms + 1
else:
print('------Sort pRF model time courses in hdf5 file.')
# In case of a large parameter space, create large hdf5 file and place
# pRF time courses from parallel processes therein.
# Path of hdf5 file:
strPthHdf5 = (strPathMdl + '.hdf5')
# Create hdf5 file:
fleHdf5 = h5py.File(strPthHdf5, 'w')
# Create dataset within hdf5 file (same shape as `aryPrfTc5D`, and
# containing the same data as `aryPrfTc5D`).
dtsPrfTc = fleHdf5.create_dataset('pRF_time_courses',
(varNumX,
varNumY,
varNumPrfSizes,
varNumCon,
varNumVol),
dtype=np.float32)
# Loop through processes:
for idxPrc in range(varPar):
# Path of hdf5 file with chunk of results (from parallel child
# process):
strPthHdf5Par = (strPathMdl + '_' + str(idxPrc) + '.hdf5')
# Read file:
fleHdf5Par = h5py.File(strPthHdf5Par, 'r')
# Access dataset in current hdf5 file:
dtsPrfTcPar = fleHdf5Par['pRF_time_courses']
# Vector with model indices for current data chunk (for sorting of
# pRF time course models). Shape: vecMdlIdx[varNumMdls].
vecMdlIdxPar = lstMdlIdx[idxPrc]
# Number of models in the current chunk:
varNumMdlPar = vecMdlIdxPar.shape[0]
# Indices need to be integer:
vecMdlIdxPar = np.around(vecMdlIdxPar).astype(np.int32)
# Loop through models, and place the respective timecourse in the
# final hdf5 file.
for idxMdl in range(varNumMdlPar):
# Model index (in the range of all models, from all processes)
# of the current model:
varIdxMdlTmp = vecMdlIdxPar[idxMdl]
# Get model indices (wrt pRF model time course array).
idxX = arySort[varIdxMdlTmp, 0]
idxY = arySort[varIdxMdlTmp, 1]
idxSd = arySort[varIdxMdlTmp, 2]
# Get data from chunk hdf5 file (from parallel child process)
# and place them at the correct position in the final hdf5 file
# (whole model space).
dtsPrfTc[idxX, idxY, idxSd, :, :] = dtsPrfTcPar[idxMdl, :, :]
# Close file:
fleHdf5Par.close()
# Close file:
fleHdf5.close()
# Dummy pRF object:
aryPrfTc5D = None
# Return
return aryPrfTc5D
|
ingo-m/py_pRF_mapping
|
pyprf/analysis/model_creation_timecourses.py
|
Python
|
gpl-3.0
| 16,966
|
[
"Gaussian"
] |
e6f51cf6e269740dcdc243ce9654b6eae58f7e9558852c04268f34248a5e5e95
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
class GaussianNBClassifier:
def __init__(self):
"""
This is the constructor responsible for initializing the classifier
"""
self.outputHeader = "#gnb"
self.clf = None
def buildModel(self):
"""
This builds the model of the Gaussian NB classifier
"""
self.clf = GaussianNB()
def trainGaussianNB(self,X, Y):
"""
Training the Gaussian NB Classifier
"""
self.clf.fit(X, Y)
def validateGaussianNB(self,X, Y):
"""
Validate the Gaussian NB Classifier
"""
YPred = self.clf.predict(X)
print accuracy_score(Y, YPred)
def testGaussianNB(self,X, Y):
"""
Test the Gaussian NB Classifier
"""
YPred = self.clf.predict(X)
print accuracy_score(Y, YPred)
|
USCDataScience/NN-fileTypeDetection
|
classifiers/gaussianNB.py
|
Python
|
apache-2.0
| 1,570
|
[
"Gaussian"
] |
bc373d3ea92fab0197f992da28254ba141325ba97c2580802e6d634c0d97e522
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.