text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
from shutil import which
import numpy as np
from monty.serialization import loadfn
from pymatgen.analysis.magnetism import (
CollinearMagneticStructureAnalyzer,
MagneticStructureEnumerator,
Ordering,
magnetic_deformation,
)
from pymatgen.core import Element, Lattice, Species, Structure
from pymatgen.io.cif import CifParser
from pymatgen.util.testing import PymatgenTest
enum_cmd = which("enum.x") or which("multienum.x")
makestr_cmd = which("makestr.x") or which("makeStr.x") or which("makeStr.py")
enumlib_present = enum_cmd and makestr_cmd
class CollinearMagneticStructureAnalyzerTest(unittest.TestCase):
def setUp(self):
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "Fe.cif"))
self.Fe = parser.get_structures()[0]
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "LiFePO4.cif"))
self.LiFePO4 = parser.get_structures()[0]
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "Fe3O4.cif"))
self.Fe3O4 = parser.get_structures()[0]
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "magnetic.ncl.example.GdB4.mcif"))
self.GdB4 = parser.get_structures()[0]
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "magnetic.example.NiO.mcif"))
self.NiO_expt = parser.get_structures()[0]
latt = Lattice.cubic(4.17)
species = ["Ni", "O"]
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
self.NiO = Structure.from_spacegroup(225, latt, species, coords)
latt = Lattice([[2.085, 2.085, 0.0], [0.0, -2.085, -2.085], [-2.085, 2.085, -4.17]])
species = ["Ni", "Ni", "O", "O"]
coords = [[0.5, 0, 0.5], [0, 0, 0], [0.25, 0.5, 0.25], [0.75, 0.5, 0.75]]
self.NiO_AFM_111 = Structure(latt, species, coords, site_properties={"magmom": [-5, 5, 0, 0]})
latt = Lattice([[2.085, 2.085, 0], [0, 0, -4.17], [-2.085, 2.085, 0]])
species = ["Ni", "Ni", "O", "O"]
coords = [[0.5, 0.5, 0.5], [0, 0, 0], [0, 0.5, 0], [0.5, 0, 0.5]]
self.NiO_AFM_001 = Structure(latt, species, coords, site_properties={"magmom": [-5, 5, 0, 0]})
latt = Lattice([[2.085, 2.085, 0], [0, 0, -4.17], [-2.085, 2.085, 0]])
species = ["Ni", "Ni", "O", "O"]
coords = [[0.5, 0.5, 0.5], [0, 0, 0], [0, 0.5, 0], [0.5, 0, 0.5]]
self.NiO_AFM_001_opposite = Structure(latt, species, coords, site_properties={"magmom": [5, -5, 0, 0]})
latt = Lattice([[2.085, 2.085, 0], [0, 0, -4.17], [-2.085, 2.085, 0]])
species = ["Ni", "Ni", "O", "O"]
coords = [[0.5, 0.5, 0.5], [0, 0, 0], [0, 0.5, 0], [0.5, 0, 0.5]]
self.NiO_unphysical = Structure(latt, species, coords, site_properties={"magmom": [-3, 0, 0, 0]})
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_get_representations(self):
# tests to convert between storing magnetic moment information
# on site_properties or on Species 'spin' property
# test we store magnetic moments on site properties
self.Fe.add_site_property("magmom", [5])
msa = CollinearMagneticStructureAnalyzer(self.Fe)
self.assertEqual(msa.structure.site_properties["magmom"][0], 5)
# and that we can retrieve a spin representation
Fe_spin = msa.get_structure_with_spin()
self.assertFalse("magmom" in Fe_spin.site_properties)
self.assertEqual(Fe_spin[0].specie.spin, 5)
# test we can remove magnetic moment information
msa.get_nonmagnetic_structure()
self.assertFalse("magmom" in Fe_spin.site_properties)
# test with disorder on magnetic site
self.Fe[0] = {
Species("Fe", oxidation_state=0, properties={"spin": 5}): 0.5,
"Ni": 0.5,
}
self.assertRaises(NotImplementedError, CollinearMagneticStructureAnalyzer, self.Fe)
def test_matches(self):
self.assertTrue(self.NiO.matches(self.NiO_AFM_111))
self.assertTrue(self.NiO.matches(self.NiO_AFM_001))
# MSA adds magmoms to Structure, so not equal
msa = CollinearMagneticStructureAnalyzer(self.NiO, overwrite_magmom_mode="replace_all")
self.assertFalse(msa.matches_ordering(self.NiO))
self.assertFalse(msa.matches_ordering(self.NiO_AFM_111))
self.assertFalse(msa.matches_ordering(self.NiO_AFM_001))
msa = CollinearMagneticStructureAnalyzer(self.NiO_AFM_001, overwrite_magmom_mode="respect_sign")
self.assertFalse(msa.matches_ordering(self.NiO))
self.assertFalse(msa.matches_ordering(self.NiO_AFM_111))
self.assertTrue(msa.matches_ordering(self.NiO_AFM_001))
self.assertTrue(msa.matches_ordering(self.NiO_AFM_001_opposite))
msa = CollinearMagneticStructureAnalyzer(self.NiO_AFM_111, overwrite_magmom_mode="respect_sign")
self.assertFalse(msa.matches_ordering(self.NiO))
self.assertTrue(msa.matches_ordering(self.NiO_AFM_111))
self.assertFalse(msa.matches_ordering(self.NiO_AFM_001))
self.assertFalse(msa.matches_ordering(self.NiO_AFM_001_opposite))
def test_modes(self):
mode = "none"
msa = CollinearMagneticStructureAnalyzer(self.NiO, overwrite_magmom_mode=mode)
magmoms = msa.structure.site_properties["magmom"]
self.assertEqual(magmoms, [0, 0])
mode = "respect_sign"
msa = CollinearMagneticStructureAnalyzer(self.NiO_unphysical, overwrite_magmom_mode=mode)
magmoms = msa.structure.site_properties["magmom"]
self.assertEqual(magmoms, [-5, 0, 0, 0])
mode = "respect_zeros"
msa = CollinearMagneticStructureAnalyzer(self.NiO_unphysical, overwrite_magmom_mode=mode)
magmoms = msa.structure.site_properties["magmom"]
self.assertEqual(magmoms, [5, 0, 0, 0])
mode = "replace_all"
msa = CollinearMagneticStructureAnalyzer(self.NiO_unphysical, overwrite_magmom_mode=mode, make_primitive=False)
magmoms = msa.structure.site_properties["magmom"]
self.assertEqual(magmoms, [5, 5, 0, 0])
mode = "replace_all_if_undefined"
msa = CollinearMagneticStructureAnalyzer(self.NiO, overwrite_magmom_mode=mode)
magmoms = msa.structure.site_properties["magmom"]
self.assertEqual(magmoms, [5, 0])
mode = "normalize"
msa = CollinearMagneticStructureAnalyzer(msa.structure, overwrite_magmom_mode="normalize")
magmoms = msa.structure.site_properties["magmom"]
self.assertEqual(magmoms, [1, 0])
def test_net_positive(self):
msa = CollinearMagneticStructureAnalyzer(self.NiO_unphysical)
magmoms = msa.structure.site_properties["magmom"]
self.assertEqual(magmoms, [3, 0, 0, 0])
def test_get_ferromagnetic_structure(self):
msa = CollinearMagneticStructureAnalyzer(self.NiO, overwrite_magmom_mode="replace_all_if_undefined")
s1 = msa.get_ferromagnetic_structure()
s1_magmoms = [float(m) for m in s1.site_properties["magmom"]]
s1_magmoms_ref = [5.0, 0.0]
self.assertListEqual(s1_magmoms, s1_magmoms_ref)
_ = CollinearMagneticStructureAnalyzer(self.NiO_AFM_111, overwrite_magmom_mode="replace_all_if_undefined")
s2 = msa.get_ferromagnetic_structure(make_primitive=False)
s2_magmoms = [float(m) for m in s2.site_properties["magmom"]]
s2_magmoms_ref = [5.0, 0.0]
self.assertListEqual(s2_magmoms, s2_magmoms_ref)
s2_prim = msa.get_ferromagnetic_structure(make_primitive=True)
self.assertTrue(CollinearMagneticStructureAnalyzer(s1).matches_ordering(s2_prim))
def test_magnetic_properties(self):
msa = CollinearMagneticStructureAnalyzer(self.GdB4)
self.assertFalse(msa.is_collinear)
msa = CollinearMagneticStructureAnalyzer(self.Fe)
self.assertFalse(msa.is_magnetic)
self.Fe.add_site_property("magmom", [5])
msa = CollinearMagneticStructureAnalyzer(self.Fe)
self.assertTrue(msa.is_magnetic)
self.assertTrue(msa.is_collinear)
self.assertEqual(msa.ordering, Ordering.FM)
msa = CollinearMagneticStructureAnalyzer(
self.NiO,
make_primitive=False,
overwrite_magmom_mode="replace_all_if_undefined",
)
self.assertEqual(msa.number_of_magnetic_sites, 4)
self.assertEqual(msa.number_of_unique_magnetic_sites(), 1)
self.assertEqual(msa.types_of_magnetic_species, (Element.Ni,))
self.assertEqual(msa.get_exchange_group_info(), ("Fm-3m", 225))
def test_str(self):
msa = CollinearMagneticStructureAnalyzer(self.NiO_AFM_001)
ref_msa_str = """Structure Summary
Lattice
abc : 2.948635277547903 4.17 2.948635277547903
angles : 90.0 90.0 90.0
volume : 36.2558565
A : 2.085 2.085 0.0
B : 0.0 0.0 -4.17
C : -2.085 2.085 0.0
Magmoms Sites
+5.00 PeriodicSite: Ni (0.0000, 0.0000, 0.0000) [0.0000, 0.0000, 0.0000]
PeriodicSite: O (0.0000, 0.0000, -2.0850) [0.0000, 0.5000, 0.0000]
PeriodicSite: O (0.0000, 2.0850, 0.0000) [0.5000, 0.0000, 0.5000]
-5.00 PeriodicSite: Ni (0.0000, 2.0850, -2.0850) [0.5000, 0.5000, 0.5000]"""
# just compare lines form 'Magmoms Sites',
# since lattice param string can vary based on machine precision
self.assertEqual(
"\n".join(str(msa).split("\n")[-5:-1]),
"\n".join(ref_msa_str.split("\n")[-5:-1]),
)
def test_round_magmoms(self):
struct = self.NiO_AFM_001.copy()
struct.add_site_property("magmom", [-5.0143, -5.02, 0.147, 0.146])
msa = CollinearMagneticStructureAnalyzer(struct, round_magmoms=0.001, make_primitive=False)
self.assertTrue(np.allclose(msa.magmoms, [5.0171, 5.0171, -0.1465, -0.1465]))
self.assertAlmostEqual(msa.magnetic_species_and_magmoms["Ni"], 5.0171)
self.assertAlmostEqual(msa.magnetic_species_and_magmoms["O"], 0.1465)
struct.add_site_property("magmom", [-5.0143, 4.5, 0.147, 0.146])
msa = CollinearMagneticStructureAnalyzer(struct, round_magmoms=0.001, make_primitive=False)
self.assertTrue(np.allclose(msa.magmoms, [5.0143, -4.5, -0.1465, -0.1465]))
self.assertAlmostEqual(msa.magnetic_species_and_magmoms["Ni"][0], 4.5)
self.assertAlmostEqual(msa.magnetic_species_and_magmoms["Ni"][1], 5.0143)
self.assertAlmostEqual(msa.magnetic_species_and_magmoms["O"], 0.1465)
class MagneticStructureEnumeratorTest(unittest.TestCase):
@unittest.skipIf(not enumlib_present, "enumlib not present")
def test_ordering_enumeration(self):
# simple afm
structure = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "magnetic_orderings/LaMnO3.json"))
enumerator = MagneticStructureEnumerator(structure)
self.assertEqual(enumerator.input_origin, "afm")
# ferrimagnetic (Cr produces net spin)
structure = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "magnetic_orderings/Cr2NiO4.json"))
enumerator = MagneticStructureEnumerator(structure)
print(enumerator.input_origin)
self.assertEqual(enumerator.input_origin, "ferri_by_Cr")
# antiferromagnetic on single magnetic site
structure = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "magnetic_orderings/Cr2WO6.json"))
enumerator = MagneticStructureEnumerator(structure)
self.assertEqual(enumerator.input_origin, "afm_by_Cr")
# afm requiring large cell size
# (enable for further development of workflow, too slow for CI)
# structure = Structure.from_file(os.path.join(ref_dir, "CuO.json"))
# enumerator = MagneticOrderingsenumerator(structure, default_magmoms={'Cu': 1.73},
# transformation_kwargs={'max_cell_size': 4})
# self.assertEqual(enumerator.input_origin, "afm")
# antiferromagnetic by structural motif
structure = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "magnetic_orderings/Ca3Co2O6.json"))
enumerator = MagneticStructureEnumerator(
structure,
strategies=("antiferromagnetic_by_motif",),
# this example just misses default cut-off, so do not truncate
truncate_by_symmetry=False,
transformation_kwargs={"max_cell_size": 2},
)
self.assertEqual(enumerator.input_origin, "afm_by_motif_2a")
class MagneticDeformationTest(unittest.TestCase):
def test_magnetic_deformation(self):
test_structs = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, "magnetic_deformation.json"))
mag_def = magnetic_deformation(test_structs[0], test_structs[1])
self.assertEqual(mag_def.type, "NM-FM")
self.assertAlmostEqual(mag_def.deformation, 5.0130859485170971)
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/analysis/magnetism/tests/test_analyzer.py
|
Python
|
mit
| 13,067
|
[
"pymatgen"
] |
01e724333154f94367cc17913c1c9d247fb70f0b1a09f620cdd2b182598c64c2
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that involve a full pass over the dataset.
This module contains functions that are used in the preprocessing function, to
define a full pass operation such as computing the sum, min, max or unique
values of a tensor over the entire dataset. This is implemented by a reduction
operation in the Beam implementation.
From the user's point of view, an analyzer appears as a regular TensorFlow
function, i.e. it accepts and returns tensors. However it is represented in
the graph as a `Analyzer` which is not a TensorFlow op, but a placeholder for
the computation that takes place outside of TensorFlow.
"""
import functools
import os
import pickle
import re
from typing import Any, Callable, Collection, List, Optional, Tuple, Union
from absl import logging
import numpy as np
import pyarrow as pa
import tensorflow as tf
from tensorflow_transform import analyzer_nodes
from tensorflow_transform import annotators
from tensorflow_transform import common
from tensorflow_transform import common_types
from tensorflow_transform import gaussianization
from tensorflow_transform import nodes
from tensorflow_transform import schema_inference
from tensorflow_transform import tf_utils
from tfx_bsl import sketches
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple
from typing_extensions import Literal
from google.protobuf import descriptor_pb2
__all__ = [
'count_per_key',
'covariance',
'histogram',
'max',
'mean',
'min',
'pca',
'quantiles',
'size',
'sum',
'tukey_location',
'tukey_scale',
'tukey_h_params',
'var',
'vocabulary',
]
# This module defines max and min functions that override the builtins.
builtin_max = max
builtin_min = min
DEFAULT_VOCABULARY_FILE_FORMAT: Literal['text'] = 'text'
ALLOWED_VOCABULARY_FILE_FORMATS = ('text', 'tfrecord_gzip')
VOCAB_FILENAME_PREFIX = 'vocab_'
VOCAB_FREQUENCY_FILENAME_PREFIX = 'vocab_frequency_'
# Experimentally estimated value of top_k after which the exact `tft.vocabulary`
# implementation becomes more efficient than
# `tft.experimental.approximate_vocabulary`.
LARGE_VOCAB_TOP_K = 200_000
# Matches empty strings and strings with \n or \r (including strings with \n or
# \r that contain invalid UTF-8 characters). This has to follow the re2 syntax:
# https://github.com/google/re2/wiki/Syntax.
_EMPTY_STRING_OR_NEWLINE_CHARS_REGEX = r'^$|\C*[\n\r]\C*'
# For some input types, widen the output type of sum analyzer to avoid overflow.
_SUM_OUTPUT_DTYPE_MAP = {
tf.float16: tf.float32,
tf.float32: tf.float32,
tf.float64: tf.float64,
tf.int8: tf.int64,
tf.int16: tf.int64,
tf.int32: tf.int64,
tf.int64: tf.int64,
tf.uint8: tf.uint64,
tf.uint16: tf.uint64,
tf.uint32: tf.uint64,
tf.uint64: tf.uint64,
}
_FLOAT_OUTPUT_DTYPE_MAP = {
tf.float16: tf.float16,
tf.float32: tf.float32,
tf.float64: tf.float64,
tf.int8: tf.float32,
tf.int16: tf.float32,
tf.int32: tf.float32,
tf.int64: tf.float32,
tf.uint8: tf.float32,
tf.uint16: tf.float32,
tf.uint32: tf.float32,
tf.uint64: tf.float32,
}
def apply_cacheable_combine_operation(
combiner: analyzer_nodes.Combiner,
*tensor_inputs: common_types.TensorType) -> Tuple[nodes.ValueNode, ...]:
"""Applies combine operation nodes over the whole dataset.
Applied nodes are subject to analyzer cache optimization.
Args:
combiner: Combiner to be applied.
*tensor_inputs: Tensors representing inputs to the combiner.
Returns:
A tuple of ValueNodes representing outputs of the combiner.
"""
input_values_node = analyzer_nodes.get_input_tensors_value_nodes(
tensor_inputs)
accumulate_outputs_value_nodes = nodes.apply_multi_output_operation(
analyzer_nodes.CacheableCombineAccumulate,
input_values_node,
combiner=combiner)
merge_outputs_value_nodes = nodes.apply_multi_output_operation(
analyzer_nodes.CacheableCombineMerge,
*accumulate_outputs_value_nodes,
combiner=combiner)
return nodes.apply_multi_output_operation(
analyzer_nodes.ExtractCombineMergeOutputs,
*merge_outputs_value_nodes,
output_tensor_info_list=combiner.output_tensor_infos())
def _apply_cacheable_combiner(
combiner: analyzer_nodes.Combiner,
*tensor_inputs: common_types.TensorType) -> Tuple[tf.Tensor, ...]:
"""Applies the combiner over the whole dataset possibly utilizing cache.
Similar to above but returns a tuple of output tensors.
Args:
combiner: Combiner to be applied.
*tensor_inputs: Tensors representing inputs to the combiner.
Returns:
A tuple of tensors representing outputs of the combiner.
"""
outputs_value_nodes = apply_cacheable_combine_operation(
combiner, *tensor_inputs)
return tuple(map(analyzer_nodes.wrap_as_tensor, outputs_value_nodes)) # pytype: disable=bad-return-type
def _apply_cacheable_combiner_per_key(
combiner: analyzer_nodes.Combiner,
*tensor_inputs: common_types.TensorType) -> Tuple[tf.Tensor, ...]:
"""Similar to _apply_cacheable_combiner but this is computed per key."""
input_values_node = analyzer_nodes.get_input_tensors_value_nodes(
tensor_inputs)
accumulate_outputs_value_nodes = nodes.apply_multi_output_operation(
analyzer_nodes.CacheableCombinePerKeyAccumulate,
input_values_node,
combiner=combiner)
merge_output_value_node = nodes.apply_operation(
analyzer_nodes.CacheableCombinePerKeyMerge,
*accumulate_outputs_value_nodes,
combiner=combiner)
output_value_nodes = nodes.apply_multi_output_operation(
analyzer_nodes.CacheableCombinePerKeyFormatKeys,
merge_output_value_node,
combiner=combiner)
return tuple(map(analyzer_nodes.wrap_as_tensor, output_value_nodes))
def _apply_cacheable_combiner_per_key_large(
combiner: analyzer_nodes.Combiner, key_vocabulary_filename: str,
*tensor_inputs: common_types.TensorType
) -> Union[tf.Tensor, common_types.Asset]:
"""Similar to above but saves the combined result to a file."""
input_values_node = analyzer_nodes.get_input_tensors_value_nodes(
tensor_inputs)
accumulate_outputs_value_node = nodes.apply_operation(
analyzer_nodes.CacheableCombinePerKeyAccumulate,
input_values_node,
combiner=combiner)
merge_output_value_node = nodes.apply_operation(
analyzer_nodes.CacheableCombinePerKeyMerge,
accumulate_outputs_value_node,
combiner=combiner)
keys_and_values_node = nodes.apply_operation(
analyzer_nodes.CacheableCombinePerKeyFormatLarge,
merge_output_value_node)
# `store_frequency` is True by default because we want to write some values
# alongside the key "vocabulary". Without doing so it would be equivalent to
# vanilla vocabulary analzyer. `fingerprint_shuffle` is not as important but
# signifies that the values are not required to be ordered here.
key_vocabulary_filename_node = nodes.apply_operation(
analyzer_nodes.VocabularyOrderAndWrite,
keys_and_values_node,
vocab_filename=key_vocabulary_filename,
store_frequency=True,
fingerprint_shuffle=True,
# TODO(b/62379925): Use tfrecord.
file_format='text')
return analyzer_nodes.wrap_as_tensor(key_vocabulary_filename_node)
class NumPyCombiner(analyzer_nodes.Combiner):
"""Combines the PCollection only on the 0th dimension using nparray.
Attributes:
fn: The numpy function representing the reduction to be done.
default_accumulator_value: The default value each accumulator entry is
initialized to.
output_dtypes: The numpy dtype to cast each output to.
output_shapes: List of tuples representing the shapes of the outputs or
Nones if the shapes are not fully defined.
"""
def __init__(self, fn, default_accumulator_value, output_dtypes,
output_shapes):
self._fn = fn
self._default_accumulator_value = default_accumulator_value
self._default_sub_accumulator = np.array(default_accumulator_value)
self._output_dtypes = output_dtypes
if not all(
isinstance(shape, (tuple, type(None))) for shape in output_shapes):
raise TypeError('Expected all tuples or Nones, but got %r' %
output_shapes)
self._output_shapes = output_shapes
if np.isnan(default_accumulator_value):
# This case is needed because np.nan != np.nan.
self._is_default_sub_accumulator = self._equals_to_scalar_nan
else:
self._is_default_sub_accumulator = self._equals_to_default_sub_accumulator
def _equals_to_scalar_nan(self, array):
return not array.shape and np.isnan(array)
def _equals_to_default_sub_accumulator(self, array):
# Note that `np.array_equal` below does at most per-element comparison of
# 0-dim arrays since `_default_sub_accumulator` is a 0-dim array, and
# `np.array_equal` exits early on a shape mismatch.
return np.array_equal(array, self._default_sub_accumulator)
def _is_default_sub_accumulator(self, array):
raise NotImplementedError('Implementation should be set in __init__.')
def create_accumulator(self):
return [
self._create_sub_accumulator(shape)
for shape in self._output_shapes
]
def _create_sub_accumulator(self, shape):
# Returns a default subaccumulator of the given shape if it's fully defined
# and a 0-dim default array otherwise.
if shape is None:
return self._default_sub_accumulator
else:
return np.full(shape, self._default_accumulator_value)
def add_input(self, accumulator, batch_values):
# TODO(b/112414577): Go back to accepting only a single input.
# See comment in _numeric_combine.
# If the first subaccumulator is default, then the accumulator is default
# and can be discarded.
if self._is_default_sub_accumulator(accumulator[0]):
return batch_values
else:
return [
self._fn((sub_accumulator, batch_value), axis=0)
for sub_accumulator, batch_value in zip(accumulator, batch_values)
]
def merge_accumulators(self, accumulators):
# TODO(b/422923883): Operate in place on accumulators[0] or batch values
# internally for vectorization benefits after AccumulateFn is in use.
# If the first subaccumulator is default, then the accumulator is default
# and can be discarded.
non_default_accumulators = [
accumulator for accumulator in accumulators
if not self._is_default_sub_accumulator(accumulator[0])
]
if non_default_accumulators:
return [
# numpy's sum, min, max, etc functions operate on array-like objects,
# but not arbitrary iterables. Convert the provided sub_accumulators
# into a list.
self._fn(list(sub_accumulators), axis=0)
for sub_accumulators in zip(*non_default_accumulators)
]
else:
return self.create_accumulator()
def extract_output(self, accumulator):
# For each output, cast that output to the specified type. Note there
# will be one output for each input tensor to the analyzer.
return [
sub_accumulator.astype(output_dtype) for sub_accumulator, output_dtype
in zip(accumulator, self._output_dtypes)
]
def output_tensor_infos(self):
return [
analyzer_nodes.TensorInfo(tf.as_dtype(dtype), shape, None)
for dtype, shape in zip(self._output_dtypes, self._output_shapes)
]
def _get_output_shape_from_input(x):
if isinstance(x, tf.SparseTensor):
return x.get_shape().as_list()[1:]
# When reducing over batch dimensions, with known shape, the result will be
# the same shape as the input, but without the batch.
if x.shape.rank is not None:
return x.shape.as_list()[1:]
return (None,)
# TODO(b/112414577): Go back to accepting only a single input.
# Currently we accept multiple inputs so that we can implement min and max
# with a single combiner. Once this is done, add a return pytype as well.
def _numeric_combine(inputs: List[tf.Tensor],
fn: Callable[[np.ndarray], np.ndarray],
default_accumulator_value: Union[float, int],
reduce_instance_dims: bool = True,
output_dtypes: Optional[List[tf.DType]] = None,
key: Optional[tf.Tensor] = None,
key_vocabulary_filename: Optional[str] = None):
"""Apply a reduction, defined by a numpy function to multiple inputs.
Args:
inputs: A list of tensors, which will be independently reduced.
fn: A function to reduce tensors across instances/batches, to get a single
output.
default_accumulator_value: The default scalar value that each accumulator
entry is initialized to. Must be properly processed by the reduction
function.
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
output_dtypes: (Optional) A list of dtypes of the output tensors. If None,
the output tensor has the same type as the input one.
key: (Optional) Apply the same operation, but on a per-key basis.
key_vocabulary_filename: (Optional) The file name for the key-output mapping
file. If None and key are provided, this combiner assumes the keys fit in
memory and will not store the result in a file. If empty string, a file
name will be chosen based on the current scope. If not an empty string,
should be unique within a given preprocessing function.
Returns:
Either:
(A) A list of Tensors with the same length as `inputs`, representing the
input Tensors that have been reduced by `fn` across instances and
batches (if key_vocabulary_filename is None).
(B) A Tensor with the filename where the key-value mapping is stored (if
key_vocabulary_filename is not None).
"""
for x in inputs:
if not isinstance(x, tf.Tensor):
raise TypeError('Expected a Tensor, but got %r' % x)
if not np.isscalar(default_accumulator_value):
raise TypeError('Expected a scalar, but got %r' % default_accumulator_value)
if output_dtypes is None:
output_dtypes = [x.dtype for x in inputs]
if reduce_instance_dims:
# If reducing over all dimensions, result is scalar.
output_shapes = [() for _ in inputs]
else:
# Reducing over batch dimensions.
output_shapes = [
(tuple(x.get_shape()) if x.get_shape().is_fully_defined() else None)
for x in inputs
]
combiner = NumPyCombiner(fn, default_accumulator_value,
[dtype.as_numpy_dtype for dtype in output_dtypes],
output_shapes)
if key is None:
return _apply_cacheable_combiner(combiner, *inputs)
if key_vocabulary_filename is None:
return _apply_cacheable_combiner_per_key(combiner, key, *inputs)
return _apply_cacheable_combiner_per_key_large(
combiner, _maybe_get_per_key_vocab_filename(key_vocabulary_filename),
key, *inputs)
@common.log_api_use(common.ANALYZER_COLLECTION)
def min( # pylint: disable=redefined-builtin
x: common_types.TensorType,
reduce_instance_dims: bool = True,
name: Optional[str] = None) -> tf.Tensor:
"""Computes the minimum of the values of a `Tensor` over the whole dataset.
In the case of a `CompositeTensor` missing values will be used in return
value: for float, NaN is used and for other dtypes the max is used.
Args:
x: A `Tensor` or `CompositeTensor`.
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a `Tensor` of the same shape as the input.
name: (Optional) A name for this operation.
Returns:
A `Tensor` with the same type as `x`.
Raises:
TypeError: If the type of `x` is not supported.
"""
with tf.compat.v1.name_scope(name, 'min'):
return _min_and_max(x, reduce_instance_dims, name)[0]
@common.log_api_use(common.ANALYZER_COLLECTION)
def max( # pylint: disable=redefined-builtin
x: common_types.TensorType,
reduce_instance_dims: bool = True,
name: Optional[str] = None) -> tf.Tensor:
"""Computes the maximum of the values of a `Tensor` over the whole dataset.
In the case of a `CompositeTensor` missing values will be used in return
value: for float, NaN is used and for other dtypes the min is used.
Args:
x: A `Tensor` or `CompositeTensor`.
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
name: (Optional) A name for this operation.
Returns:
A `Tensor`. Has the same type as `x`.
Raises:
TypeError: If the type of `x` is not supported.
"""
with tf.compat.v1.name_scope(name, 'max'):
return _min_and_max(x, reduce_instance_dims, name)[1]
def _min_and_max(x: common_types.TensorType,
reduce_instance_dims: bool = True,
name: Optional[str] = None) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes the min and max of the values of a `Tensor` or `CompositeTensor`.
In the case of a `CompositeTensor` missing values will be used in return
value:
for float, NaN is used and for other dtypes the min is used.
Args:
x: A `Tensor` or `CompositeTensor`.
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
name: (Optional) A name for this operation.
Returns:
Two `Tensor`s. Both have the same type as `x`.
Raises:
TypeError: If the type of `x` is not supported.
"""
with tf.compat.v1.name_scope(name, 'min_and_max'):
output_dtype = x.dtype
if (not reduce_instance_dims and isinstance(x, tf.SparseTensor) and
x.dtype.is_floating):
combine_fn = np.nanmax
default_accumulator_value = (np.nan if x.dtype.is_floating else
-output_dtype.max)
elif not reduce_instance_dims and isinstance(x, tf.RaggedTensor):
raise NotImplementedError(
'Elemenwise min_and_max does not support RaggedTensors.')
else:
combine_fn = np.max
default_accumulator_value = (-np.inf if x.dtype.is_floating else
-output_dtype.max)
x_batch_minus_min, x_batch_max = tf_utils.reduce_batch_minus_min_and_max(
x, reduce_instance_dims)
minus_x_min, x_max = _numeric_combine( # pylint: disable=unbalanced-tuple-unpacking
inputs=[x_batch_minus_min, x_batch_max],
fn=combine_fn,
default_accumulator_value=default_accumulator_value,
reduce_instance_dims=reduce_instance_dims)
return tf.cast(0 - minus_x_min, output_dtype), tf.cast(x_max, output_dtype)
def _min_and_max_per_key(
x: common_types.TensorType,
key: common_types.TensorType,
reduce_instance_dims: bool = True,
key_vocabulary_filename: Optional[str] = None,
name: Optional[str] = None
) -> Union[Tuple[tf.Tensor, tf.Tensor, tf.Tensor], tf.Tensor]:
"""Computes the min and max of the values of a `Tensor` or `CompositeTensor`.
In the case of a `CompositeTensor` missing values will be used in return
value: for float, NaN is used and for other dtypes the min is used.
This function operates under the assumption that the size of the key set
is small enough to fit in memory. Anything above a certain size larger is not
guaranteed to be handled properly, but support for larger key sets may be
available in a future version.
Args:
x: A `Tensor` or `CompositeTensor`.
key: A Tensor or `CompositeTensor` of dtype tf.string. If `x` is a
`CompositeTensor`, `key` must exactly match `x` in everything except
values.
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input. The False
case is not currently supported for _min_and_max_per_key.
key_vocabulary_filename: (Optional) The file name for the key-output mapping
file. If None and key are provided, this combiner assumes the keys fit in
memory and will not store the result in a file. If empty string, a file
name will be chosen based on the current scope. If not an empty string,
should be unique within a given preprocessing function.
name: (Optional) A name for this operation.
Returns:
Either:
(A) Three `Tensor`s. The first is the key vocab of type tf.string, and the
second two have same type as `x` (if key_vocabulary_filename is None).
(B) The filename where the key-value mapping is stored (if
key_vocabulary_filename is not None).
Raises:
TypeError: If the type of `x` is not supported.
"""
if key is None:
raise ValueError('A key is required for _min_and_max_per_key')
if not reduce_instance_dims:
raise NotImplementedError('Per-key elementwise reduction not supported')
with tf.compat.v1.name_scope(name, 'min_and_max_per_key'):
output_dtype = x.dtype
if (not reduce_instance_dims and
isinstance(x,
(tf.SparseTensor, tf.RaggedTensor)) and x.dtype.is_floating):
combine_fn = np.nanmax
default_accumulator_value = (np.nan if x.dtype.is_floating else
-output_dtype.max)
else:
combine_fn = np.max
default_accumulator_value = (-np.inf if x.dtype.is_floating else
-output_dtype.max)
key_vocab, x_batch_minus_min, x_batch_max = (
tf_utils.reduce_batch_minus_min_and_max_per_key(x, key))
key_values = _numeric_combine( # pylint: disable=unbalanced-tuple-unpacking
inputs=[x_batch_minus_min, x_batch_max],
fn=combine_fn,
default_accumulator_value=default_accumulator_value,
reduce_instance_dims=reduce_instance_dims,
key=key_vocab,
key_vocabulary_filename=key_vocabulary_filename)
if key_vocabulary_filename is not None:
return key_values
key, minus_x_min, x_max = key_values
return (
key,
tf.cast(0 - minus_x_min, output_dtype),
tf.cast(x_max, output_dtype))
def _sum_combine_fn_and_dtype(
input_dtype: tf.DType
) -> Tuple[tf.DType, Callable[[np.ndarray], np.ndarray]]:
output_dtype = _SUM_OUTPUT_DTYPE_MAP.get(input_dtype)
if output_dtype is None:
raise TypeError('Tensor type %r is not supported' % input_dtype)
return output_dtype, functools.partial(
np.sum, dtype=output_dtype.as_numpy_dtype)
@common.log_api_use(common.ANALYZER_COLLECTION)
def sum( # pylint: disable=redefined-builtin
x: common_types.TensorType,
reduce_instance_dims: bool = True,
name: Optional[str] = None) -> tf.Tensor:
"""Computes the sum of the values of a `Tensor` over the whole dataset.
Args:
x: A `Tensor` or `CompositeTensor`. Its type must be floating point
(float{16|32|64}),integral (int{8|16|32|64}), or
unsigned integral (uint{8|16})
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
name: (Optional) A name for this operation.
Returns:
A `Tensor` containing the sum. If `x` is float32 or float64, the sum will
have the same type as `x`. If `x` is float16, the output is cast to float32.
If `x` is integral, the output is cast to [u]int64. If `x` is sparse and
reduce_inst_dims is False will return 0 in place where column has no values
across batches.
Raises:
TypeError: If the type of `x` is not supported.
"""
with tf.compat.v1.name_scope(name, 'sum'):
if reduce_instance_dims:
x = tf.reduce_sum(input_tensor=tf_utils.get_values(x))
elif isinstance(x, tf.SparseTensor):
if x.dtype == tf.uint8 or x.dtype == tf.uint16:
x = tf.cast(x, tf.int64)
elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
TypeError('Data type %r is not supported' % x.dtype)
x = tf.sparse.reduce_sum(x, axis=0)
elif isinstance(x, tf.RaggedTensor):
raise NotImplementedError(
'Elementwise sum does not support RaggedTensors.')
else:
x = tf.reduce_sum(input_tensor=x, axis=0)
output_dtype, sum_fn = _sum_combine_fn_and_dtype(x.dtype)
return _numeric_combine(
inputs=[x],
fn=sum_fn,
default_accumulator_value=0,
reduce_instance_dims=reduce_instance_dims,
output_dtypes=[output_dtype])[0]
def remove_leftmost_boundary(boundaries: tf.Tensor) -> tf.Tensor:
"""Removes the leftmost boundary from [1, None]-shaped `Tensor` of buckets."""
return boundaries[:, 1:]
@common.log_api_use(common.ANALYZER_COLLECTION)
def histogram(x: common_types.TensorType,
boundaries: Optional[Union[tf.Tensor, int]] = None,
categorical: Optional[bool] = False,
name: Optional[str] = None) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes a histogram over x, given the bin boundaries or bin count.
Ex (1):
counts, boundaries = histogram([0, 1, 0, 1, 0, 3, 0, 1], range(5))
counts: [4, 3, 0, 1, 0]
boundaries: [0, 1, 2, 3, 4]
Ex (2):
Can be used to compute class weights.
counts, classes = histogram([0, 1, 0, 1, 0, 3, 0, 1], categorical=True)
probabilities = counts / tf.reduce_sum(counts)
class_weights = dict(map(lambda (a, b): (a.numpy(), 1.0 / b.numpy()),
zip(classes, probabilities)))
Args:
x: A `Tensor` or `CompositeTensor`.
boundaries: (Optional) A `Tensor` or `int` used to build the histogram;
ignored if `categorical` is True. If possible, provide boundaries as
multiple sorted values. Default to 10 intervals over the 0-1 range, or
find the min/max if an int is provided (not recommended because
multi-phase analysis is inefficient).
categorical: (Optional) A `bool` that treats `x` as discrete values if true.
name: (Optional) A name for this operation.
Returns:
counts: The histogram, as counts per bin.
boundaries: A `Tensor` used to build the histogram representing boundaries.
"""
with tf.compat.v1.name_scope(name, 'histogram'):
x = tf.reshape(tf_utils.get_values(x), [-1])
if categorical:
x_dtype = x.dtype
x = x if x_dtype == tf.string else tf.strings.as_string(x)
elements, counts = count_per_key(x)
if x_dtype != elements.dtype:
elements = tf.strings.to_number(elements, tf.int64)
return counts, elements
if boundaries is None:
boundaries = tf.range(11, dtype=tf.float32) / 10.0
elif isinstance(boundaries, int) or (isinstance(boundaries, tf.Tensor) and
boundaries.get_shape().ndims == 0):
min_value, max_value = _min_and_max(x, True)
boundaries = tf.linspace(
tf.cast(min_value, tf.float32), tf.cast(max_value, tf.float32),
tf.cast(boundaries, tf.int64))
# Shift the boundaries slightly to account for floating point errors,
# and due to the fact that the rightmost boundary is essentially ignored.
boundaries = tf.expand_dims(tf.cast(boundaries, tf.float32), 0) - 0.0001
bucket_indices = tf_utils.assign_buckets(
tf.cast(x, tf.float32), remove_leftmost_boundary(boundaries))
bucket_vocab, counts = count_per_key(tf.strings.as_string(bucket_indices))
counts = tf_utils.reorder_histogram(bucket_vocab, counts,
tf.size(boundaries) - 1)
return counts, boundaries
@common.log_api_use(common.ANALYZER_COLLECTION)
def size(x: common_types.TensorType,
reduce_instance_dims: bool = True,
name: Optional[str] = None) -> tf.Tensor:
"""Computes the total size of instances in a `Tensor` over the whole dataset.
Args:
x: A `Tensor` or `CompositeTensor`.
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
name: (Optional) A name for this operation.
Returns:
A `Tensor` of type int64.
"""
with tf.compat.v1.name_scope(name, 'size'):
# Note: Calling `sum` defined in this module, not the builtin.
if isinstance(x, tf.SparseTensor):
ones_like_x = tf.SparseTensor(
indices=x.indices,
values=tf.ones_like(x.values, tf.int64),
dense_shape=x.dense_shape)
else:
ones_like_x = tf.ones_like(x, dtype=tf.int64)
return sum(ones_like_x, reduce_instance_dims)
@common.log_api_use(common.ANALYZER_COLLECTION)
def count_per_key(key: common_types.TensorType,
key_vocabulary_filename: Optional[str] = None,
name: Optional[str] = None):
"""Computes the count of each element of a `Tensor`.
Args:
key: A Tensor or `CompositeTensor` of dtype tf.string or tf.int.
key_vocabulary_filename: (Optional) The file name for the key-output mapping
file. If None and key are provided, this combiner assumes the keys fit in
memory and will not store the result in a file. If empty string, a file
name will be chosen based on the current scope. If not an empty string,
should be unique within a given preprocessing function.
name: (Optional) A name for this operation.
Returns:
Either:
(A) Two `Tensor`s: one the key vocab with dtype of input;
the other the count for each key, dtype tf.int64. (if
key_vocabulary_filename is None).
(B) The filename where the key-value mapping is stored (if
key_vocabulary_filename is not None).
Raises:
TypeError: If the type of `x` is not supported.
"""
with tf.compat.v1.name_scope(name, 'count_per_key'):
key_dtype = key.dtype
batch_keys, batch_counts = tf_utils.reduce_batch_count_per_key(key)
output_dtype, sum_fn = _sum_combine_fn_and_dtype(tf.int64)
numeric_combine_result = _numeric_combine(
inputs=[batch_counts],
fn=sum_fn,
default_accumulator_value=0,
reduce_instance_dims=True,
output_dtypes=[output_dtype],
key=batch_keys,
key_vocabulary_filename=key_vocabulary_filename)
if key_vocabulary_filename is not None:
return numeric_combine_result
keys, counts = numeric_combine_result
if key_dtype is not tf.string:
keys = tf.strings.to_number(keys, key_dtype)
return keys, counts
@common.log_api_use(common.ANALYZER_COLLECTION)
def mean(x: common_types.TensorType,
reduce_instance_dims: bool = True,
name: Optional[str] = None,
output_dtype: Optional[tf.DType] = None) -> tf.Tensor:
"""Computes the mean of the values of a `Tensor` over the whole dataset.
Args:
x: A `Tensor` or `CompositeTensor`. Its type must be floating point
(float{16|32|64}), or integral ([u]int{8|16|32|64}).
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
name: (Optional) A name for this operation.
output_dtype: (Optional) If not None, casts the output tensor to this type.
Returns:
A `Tensor` containing the mean. If `x` is floating point, the mean will have
the same type as `x`. If `x` is integral, the output is cast to float32.
NaNs and infinite input values are ignored.
Raises:
TypeError: If the type of `x` is not supported.
"""
with tf.compat.v1.name_scope(name, 'mean'):
return _mean_and_var(x, reduce_instance_dims, output_dtype)[0]
@common.log_api_use(common.ANALYZER_COLLECTION)
def var(x: common_types.TensorType,
reduce_instance_dims: bool = True,
name: Optional[str] = None,
output_dtype: Optional[tf.DType] = None) -> tf.Tensor:
"""Computes the variance of the values of a `Tensor` over the whole dataset.
Uses the biased variance (0 delta degrees of freedom), as given by
(x - mean(x))**2 / length(x).
Args:
x: `Tensor` or `CompositeTensor`. Its type must be floating point
(float{16|32|64}), or integral ([u]int{8|16|32|64}).
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
name: (Optional) A name for this operation.
output_dtype: (Optional) If not None, casts the output tensor to this type.
Returns:
A `Tensor` containing the variance. If `x` is floating point, the variance
will have the same type as `x`. If `x` is integral, the output is cast to
float32. NaNs and infinite input values are ignored.
Raises:
TypeError: If the type of `x` is not supported.
"""
with tf.compat.v1.name_scope(name, 'var'):
return _mean_and_var(x, reduce_instance_dims, output_dtype)[1]
def _mean_and_var(x: common_types.TensorType,
reduce_instance_dims: bool = True,
output_dtype: Optional[tf.DType] = None):
"""More efficient combined `mean` and `var`. See `var`."""
if output_dtype is None:
output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype)
if output_dtype is None:
raise TypeError('Tensor type %r is not supported' % x.dtype)
if not reduce_instance_dims and isinstance(x, tf.RaggedTensor):
raise NotImplementedError(
'Elementwise mean_and_var does not support RaggedTensors.')
with tf.compat.v1.name_scope('mean_and_var'):
x = tf.cast(x, output_dtype)
x_count, x_mean, x_variance = (
tf_utils.reduce_batch_count_mean_and_var(x, reduce_instance_dims))
combine_inputs = _WeightedMeanAndVarAccumulator(
count=x_count,
mean=x_mean,
variance=x_variance,
weight=tf.zeros([], tf.float32))
output_shape = ()
if not reduce_instance_dims:
# We need to use tf.expand_dims to artificially add a batch dimension.
output_shape = _get_output_shape_from_input(
tf.expand_dims(x_count, axis=0))
x_mean, x_var = _apply_cacheable_combiner(
WeightedMeanAndVarCombiner(output_dtype.as_numpy_dtype, output_shape),
*combine_inputs)
return x_mean, x_var
@common.log_api_use(common.ANALYZER_COLLECTION)
def tukey_location(x: common_types.TensorType,
reduce_instance_dims: Optional[bool] = True,
output_dtype: Optional[tf.DType] = None,
name: Optional[str] = None) -> tf.Tensor:
"""Computes the location of the values of a `Tensor` over the whole dataset.
This computes the location of x, assuming a Tukey HH distribution, i.e.
(x - tukey_location) / tukey_scale is a Tukey HH distribution with parameters
tukey_h_params. See the following publication for the definition of the Tukey
HH distribution:
Todd C. Headrick, and Mohan D. Pant. "Characterizing Tukey h and
hh-Distributions through L-Moments and the L-Correlation," ISRN Applied
Mathematics, vol. 2012, 2012. doi:10.5402/2012/980153
Args:
x: A `Tensor` or `CompositeTensor`. Its type must be floating point
(float{16|32|64}), or integral ([u]int{8|16|32|64}).
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
output_dtype: (Optional) If not None, casts the output tensor to this type.
name: (Optional) A name for this operation.
Returns:
A `Tensor` containing the location. If `x` is floating point, the location
will have the same type as `x`. If `x` is integral, the output is cast to
float32.
Raises:
TypeError: If the type of `x` is not supported.
"""
with tf.compat.v1.name_scope(name, 'tukey_location'):
return _tukey_parameters(x, reduce_instance_dims, output_dtype)[0]
@common.log_api_use(common.ANALYZER_COLLECTION)
def tukey_scale(x: common_types.TensorType,
reduce_instance_dims: Optional[bool] = True,
output_dtype: Optional[tf.DType] = None,
name: Optional[str] = None) -> tf.Tensor:
"""Computes the scale of the values of a `Tensor` over the whole dataset.
This computes the scale of x, assuming a Tukey HH distribution, i.e.
(x - tukey_location) / tukey_scale is a Tukey HH distribution with parameters
tukey_h_params. See the following publication for the definition of the Tukey
HH distribution:
Todd C. Headrick, and Mohan D. Pant. "Characterizing Tukey h and
hh-Distributions through L-Moments and the L-Correlation," ISRN Applied
Mathematics, vol. 2012, 2012. doi:10.5402/2012/980153
Args:
x: A `Tensor` or `CompositeTensor`. Its type must be floating point
(float{16|32|64}), or integral ([u]int{8|16|32|64}).
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
output_dtype: (Optional) If not None, casts the output tensor to this type.
name: (Optional) A name for this operation.
Returns:
A `Tensor` containing the scale. If `x` is floating point, the location
will have the same type as `x`. If `x` is integral, the output is cast to
float32.
Raises:
TypeError: If the type of `x` is not supported.
"""
with tf.compat.v1.name_scope(name, 'tukey_scale'):
return _tukey_parameters(x, reduce_instance_dims, output_dtype)[1]
@common.log_api_use(common.ANALYZER_COLLECTION)
def tukey_h_params(x: common_types.TensorType,
reduce_instance_dims: bool = True,
output_dtype: Optional[tf.DType] = None,
name: Optional[str] = None) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes the h parameters of the values of a `Tensor` over the dataset.
This computes the parameters (hl, hr) of the samples, assuming a Tukey HH
distribution, i.e. (x - tukey_location) / tukey_scale is a Tukey HH
distribution with parameters hl (left parameter) and hr (right parameter).
See the following publication for the definition of the Tukey HH distribution:
Todd C. Headrick, and Mohan D. Pant. "Characterizing Tukey h and
hh-Distributions through L-Moments and the L-Correlation," ISRN Applied
Mathematics, vol. 2012, 2012. doi:10.5402/2012/980153
Args:
x: A `Tensor` or `CompositeTensor`. Its type must be floating point
(float{16|32|64}), or integral ([u]int{8|16|32|64}).
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
output_dtype: (Optional) If not None, casts the output tensor to this type.
name: (Optional) A name for this operation.
Returns:
The tuple (hl, hr) containing two `Tensor` instances with the hl and hr
parameters. If `x` is floating point, each parameter will have the same type
as `x`. If `x` is integral, the output is cast to float32.
Raises:
TypeError: If the type of `x` is not supported.
"""
with tf.compat.v1.name_scope(name, 'tukey_h_params'):
return _tukey_parameters(x, reduce_instance_dims, output_dtype)[2:]
def _tukey_parameters(
x: common_types.TensorType,
reduce_instance_dims: bool = True,
output_dtype: Optional[tf.DType] = None
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Efficient computation of L-moments."""
if output_dtype is None:
output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype)
if output_dtype is None:
raise TypeError('Tensor type %r is not supported' % x.dtype)
with tf.compat.v1.name_scope('tukey_parameters'):
x = tf.cast(x, output_dtype)
(count_l1, l1, count_l2, l2, count_l3, l3, count_l4, l4) = (
tf_utils.reduce_batch_count_l_moments(x, reduce_instance_dims))
combine_inputs = _LMomentsAccumulator(
count_l1=count_l1,
count_l2=count_l2,
count_l3=count_l3,
count_l4=count_l4,
l1=l1,
l2=l2,
l3=l3,
l4=l4)
output_shape = ()
if not reduce_instance_dims:
output_shape = _get_output_shape_from_input(x)
x_loc, x_scale, hl_param, hr_param = _apply_cacheable_combiner(
_LMomentsCombiner(output_dtype.as_numpy_dtype, output_shape),
*combine_inputs)
return x_loc, x_scale, hl_param, hr_param
def _mean_and_var_per_key(
x: common_types.TensorType,
key: common_types.TensorType,
reduce_instance_dims: bool = True,
output_dtype: Optional[tf.DType] = None,
key_vocabulary_filename: Optional[str] = None
) -> Union[Tuple[tf.Tensor, tf.Tensor, tf.Tensor], tf.Tensor,
common_types.Asset]:
"""`mean_and_var` by group, specified by key.
Args:
x: A `Tensor` or `CompositeTensor`.
key: A Tensor or `CompositeTensor` of dtype tf.string. If `x` is
a `CompositeTensor`, `key` must exactly match `x` in everything except
values.
reduce_instance_dims: (Optional) By default collapses the batch and instance
dimensions to arrive at a single scalar output. The False case is not
currently supported for _mean_and_var_per_key.
output_dtype: (Optional) Desired output dtype, otherwise inferred.
key_vocabulary_filename: (Optional) The file name for the key-output mapping
file. If None and key are provided, this combiner assumes the keys fit in
memory and will not store the result in a file. If empty string, a file
name will be chosen based on the current scope. If not an empty string,
should be unique within a given preprocessing function.
Returns:
Either:
(A) Three `Tensor`s. The first is the key vocab of type tf.string, and the
second two have same type as `x` (if key_vocabulary_filename is None).
(B) The filename where the key-value mapping is stored (if
key_vocabulary_filename is not None).
NaNs and infinite input values are ignored.
"""
if output_dtype is None:
output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype)
if output_dtype is None:
raise TypeError('Tensor type %r is not supported' % x.dtype)
if key is None:
raise ValueError('A non-None key is required for _mean_and_var_per_key')
if not reduce_instance_dims:
raise NotImplementedError('Per-key elementwise reduction not supported')
with tf.compat.v1.name_scope('mean_and_var_per_key'):
x = tf.cast(x, output_dtype)
key_vocab, key_counts, key_means, key_variances = (
tf_utils.reduce_batch_count_mean_and_var_per_key(
x, key, reduce_instance_dims=reduce_instance_dims))
output_shape = ()
combine_inputs = _WeightedMeanAndVarAccumulator(
count=key_counts,
mean=key_means,
variance=key_variances,
weight=tf.zeros_like(key_means, tf.float32))
combiner = WeightedMeanAndVarCombiner(output_dtype.as_numpy_dtype,
output_shape)
if key_vocabulary_filename is not None:
key_vocabulary_filename = _maybe_get_per_key_vocab_filename(
key_vocabulary_filename)
return _apply_cacheable_combiner_per_key_large(
combiner, key_vocabulary_filename, key_vocab, *combine_inputs)
key, key_mean, key_var = _apply_cacheable_combiner_per_key(
combiner, key_vocab, *combine_inputs)
return key, key_mean, key_var
class _WeightedMeanAndVarAccumulator(
tfx_namedtuple.namedtuple('WeightedMeanAndVarAccumulator',
['count', 'mean', 'variance', 'weight'])):
"""Container for WeightedMeanAndVarCombiner intermediate values."""
@classmethod
def make_nan_to_num(cls,
counts,
means,
variances,
weights,
compute_variance=False,
compute_weighted=True):
"""Util function to replace NaN with 0 and inf with large finite numbers."""
if compute_variance:
variances = np.nan_to_num(variances, copy=True)
if compute_weighted:
weights = np.nan_to_num(weights, copy=True)
return cls(
np.array(counts), np.nan_to_num(means, copy=True), variances, weights)
class WeightedMeanAndVarCombiner(analyzer_nodes.Combiner):
"""Combines a PCollection of accumulators to compute mean and variance."""
accumulator_class = _WeightedMeanAndVarAccumulator
def __init__(self,
output_numpy_dtype,
output_shape: Optional[Collection[Optional[int]]] = None,
compute_variance: bool = True,
compute_weighted: bool = False):
"""Init method for WeightedMeanAndVarCombiner.
Args:
output_numpy_dtype: A numpy dtype that the outputs are cast to.
output_shape: The shape of the resulting Tensors.
compute_variance: A bool indicating whether or not a variance should be
calculated and returned.
compute_weighted: A bool indicating whether or not weights are provided
and all calculations should be weighted.
"""
self._output_numpy_dtype = output_numpy_dtype
self._output_shape = output_shape
self._compute_variance = compute_variance
self._compute_weighted = compute_weighted
if self._compute_variance and self._compute_weighted:
raise ValueError(
'WeightedMeanAndVarCombiner does not yet support weighted variance')
if self._output_shape is None:
raise ValueError('An output_shape must be provided.')
def create_accumulator(self) -> _WeightedMeanAndVarAccumulator:
"""Create an accumulator with all zero entries."""
# TODO(b/131325061): Determine whether counts/weights should always be
# scalars or if we want to continue supporting multi-dimensional arrays.
initial_count, initial_weight = np.array(0), np.array(0.)
# If we know the exact shape, initialize accumulator values with zeros of
# the exact shape. For unknown dimensions, initialize with a 1D 0 array.
output_shape = [dim if dim is not None else 0 for dim in self._output_shape]
initial_mean, initial_var = np.zeros(output_shape), np.zeros(output_shape)
return _WeightedMeanAndVarAccumulator(initial_count, initial_mean,
initial_var, initial_weight)
def add_input(
self, accumulator: _WeightedMeanAndVarAccumulator,
batch_values: _WeightedMeanAndVarAccumulator
) -> _WeightedMeanAndVarAccumulator:
"""Composes an accumulator from batch_values and calls merge_accumulators.
Args:
accumulator: The `_WeightedMeanAndVarAccumulator` computed so far.
batch_values: A `_WeightedMeanAndVarAccumulator` for the current batch.
Returns:
A `_WeightedMeanAndVarAccumulator` which is accumulator and batch_values
combined.
"""
new_accumulator = _WeightedMeanAndVarAccumulator(*batch_values)
return self._combine_mean_and_var_accumulators(accumulator, new_accumulator)
def merge_accumulators(
self, accumulators: List[_WeightedMeanAndVarAccumulator]
) -> _WeightedMeanAndVarAccumulator:
"""Merges several `_WeightedMeanAndVarAccumulator`s to a single accumulator.
Args:
accumulators: A list of `_WeightedMeanAndVarAccumulator`s.
Returns:
The sole merged `_WeightedMeanAndVarAccumulator`.
"""
accumulators = iter(accumulators)
result = next(accumulators)
for accumulator in accumulators:
result = self._combine_mean_and_var_accumulators(result, accumulator)
return result
def extract_output(
self, accumulator: _WeightedMeanAndVarAccumulator
) -> Union[Tuple[float, float], _WeightedMeanAndVarAccumulator]:
"""Converts an accumulator into the output accumulator or (mean, var) tuple.
Args:
accumulator: the final `_WeightedMeanAndVarAccumulator` value.
Returns:
A _WeightedMeanAndVarAccumulator or a 2-tuple composed of (mean, var).
"""
if self._compute_variance and not self._compute_weighted:
return (self._output_numpy_dtype(accumulator.mean),
self._output_numpy_dtype(accumulator.variance))
else:
return _WeightedMeanAndVarAccumulator(
np.int64(accumulator.count),
self._output_numpy_dtype(accumulator.mean),
self._output_numpy_dtype(accumulator.variance),
self._output_numpy_dtype(accumulator.weight))
def output_tensor_infos(self) -> List[analyzer_nodes.TensorInfo]:
# The output is (mean, var).
if self._compute_variance and not self._compute_weighted:
return [
analyzer_nodes.TensorInfo(
tf.as_dtype(self._output_numpy_dtype), self._output_shape, None)
] * 2
else:
return [
analyzer_nodes.TensorInfo(
tf.as_dtype(np.int64), self._output_shape, None),
analyzer_nodes.TensorInfo(
tf.as_dtype(self._output_numpy_dtype), self._output_shape, None),
analyzer_nodes.TensorInfo(
tf.as_dtype(self._output_numpy_dtype), self._output_shape, None),
analyzer_nodes.TensorInfo(
tf.as_dtype(self._output_numpy_dtype), self._output_shape, None)
]
def _combine_mean_and_var_accumulators(
self, a: _WeightedMeanAndVarAccumulator,
b: _WeightedMeanAndVarAccumulator) -> _WeightedMeanAndVarAccumulator:
"""Combines two mean and var accumulators.
Args:
a: A _WeightedMeanAndVarAccumulator.
b: A _WeightedMeanAndVarAccumulator.
Returns:
A _WeightedMeanAndVarAccumulator computed as the combination of a and b.
"""
# NaNs get preserved through division by a.count + b.count.
a = _WeightedMeanAndVarAccumulator.make_nan_to_num(
*a,
compute_variance=self._compute_variance,
compute_weighted=self._compute_weighted)
b = _WeightedMeanAndVarAccumulator.make_nan_to_num(
*b,
compute_variance=self._compute_variance,
compute_weighted=self._compute_weighted)
# a.count >= b.count following this logic.
if np.sum(a.count) < np.sum(b.count):
a, b = b, a
if np.sum(a.count) == 0:
return b
a_count, b_count = _pad_arrays_to_match(a.count, b.count)
a_mean, b_mean = _pad_arrays_to_match(a.mean, b.mean)
if self._compute_variance:
a_variance, b_variance = _pad_arrays_to_match(a.variance, b.variance)
if self._compute_weighted:
a_weight, b_weight = _pad_arrays_to_match(a.weight, b.weight)
combined_total = a_count + b_count
# Mean and variance update formulas which are more numerically stable when
# a and b vary in magnitude.
if self._compute_weighted:
combined_weights_mean = (
a_weight + (b_count / combined_total) * (b_weight - a_weight))
combined_mean = a_mean + (b_count * b_weight /
(combined_total * combined_weights_mean)) * (
b_mean - a_mean)
else:
combined_weights_mean = np.ones(shape=combined_total.shape)
combined_mean = a_mean + (b_count / combined_total * (b_mean - a_mean))
if self._compute_variance:
# TODO(zoyahav): Add an option for weighted variance if needed.
assert not self._compute_weighted
combined_variance = (
a_variance + (b_count / combined_total) * (b_variance - a_variance +
((b_mean - combined_mean) *
(b_mean - a_mean))))
else:
combined_variance = np.zeros(combined_mean.shape)
return _WeightedMeanAndVarAccumulator(combined_total, combined_mean,
combined_variance,
combined_weights_mean)
# TODO(b/165020671): Optimize padding to save up to 15% computing resource.
def _pad_arrays_to_match(a, b):
"""Pad the ndarray values to match dimensions as needed.
If the dimensions of the ndarrays values differ, we pad the smaller of the
two arrays with zeros to be the same shape as the larger. In other words,
the missing accumulator indices are assumed to be zero, and combining
a = [1, 2, 3] with b = [1, 2] is equivalent t combining with b = [1, 2, 0].
Args:
a: NDarray to be matched in shaped with b
b: NDarray to be matched in shaped with a
Returns:
a: a padded to same dimensions as b
b: b padded to same dimensions as a
"""
if a.shape == b.shape:
return a, b
padding_a, padding_b = [], []
for a_dim, b_dim in zip(a.shape, b.shape):
a_pad = b_pad = (0, 0)
delta = a_dim - b_dim
if delta > 0:
b_pad = (0, abs(delta))
elif delta < 0:
a_pad = (0, abs(delta))
padding_a.append(a_pad)
padding_b.append(b_pad)
if padding_a:
a = np.pad(a, padding_a, mode='constant')
if padding_b:
b = np.pad(b, padding_b, mode='constant')
return a, b
class _LMomentsAccumulator(
tfx_namedtuple.namedtuple('LMomentsAccumulator', [
'count_l1', 'count_l2', 'count_l3', 'count_l4', 'l1', 'l2', 'l3', 'l4'
])):
"""Container for _LMomentsCombiner intermediate values."""
@classmethod
def make_nan_to_num(cls, count_l1, count_l2, count_l3, count_l4,
l1, l2, l3, l4):
return cls(
np.array(count_l1), np.array(count_l2), np.array(count_l3),
np.array(count_l4), np.nan_to_num(l1), np.nan_to_num(l2),
np.nan_to_num(l3), np.nan_to_num(l4))
def __reduce__(self):
return self.__class__, tuple(self)
class _LMomentsCombiner(analyzer_nodes.Combiner):
"""Combines a PCollection of accumulators to compute L-moments."""
accumulator_class = _LMomentsAccumulator
def __init__(self, output_numpy_dtype, output_shape):
"""Init method for _LMomentsCombiner.
Args:
output_numpy_dtype: A numpy dtype that the outputs are cast to.
output_shape: The shape of the resulting Tensors.
"""
self._output_numpy_dtype = output_numpy_dtype
self._output_shape = output_shape
def create_accumulator(self):
"""Create an accumulator with all zero entries."""
# If we know the exact shape, initialize accumulator values with zeros of
# the exact shape. For unknown dimensions, initialize with a 1D 0 array
# (this accumulator will be discarded by _combine_accumulators).
output_shape = () if None in self._output_shape else self._output_shape
initial_moment = np.zeros(output_shape, dtype=self._output_numpy_dtype)
initial_count = np.zeros(output_shape, dtype=self._output_numpy_dtype)
return _LMomentsAccumulator(
initial_count, initial_count, initial_count, initial_count,
initial_moment, initial_moment, initial_moment, initial_moment)
def add_input(self, accumulator, batch_values):
"""Composes an accumulator from batch_values and calls merge_accumulators.
Args:
accumulator: The `_LMomentsAccumulator` computed so far.
batch_values: A `_LMomentsAccumulator` for the current batch.
Returns:
A `_LMomentsAccumulator` which is accumulator and batch_values combined.
"""
new_accumulator = _LMomentsAccumulator(*batch_values)
return self._combine_accumulators(accumulator, new_accumulator)
def merge_accumulators(self, accumulators):
"""Merges several `_LMomentsAccumulator`s to a single accumulator.
Args:
accumulators: A list of `_LMomentsAccumulator`s.
Returns:
The sole merged `_LMomentsAccumulator`.
"""
accumulators = iter(accumulators)
result = next(accumulators)
for accumulator in accumulators:
result = self._combine_accumulators(result, accumulator)
return result
def extract_output(self, accumulator):
"""Converts an accumulator into the output (loc, scale, hl, hr) tuple.
Estimates the parameters of a Tukey HH distribution, given estimates of the
first four L-moments. The parameters are: location, scale, hl, and hr. If
x is the input sample, then (x - location) / scale is distributed according
to the Tukey HH distribution with parameters hl (left parameter) and hr
(right parameter).
Args:
accumulator: the final `_LMomentsAccumulator` value.
Returns:
A 4-tuple composed of (location, scale, hl, hr).
"""
# To compute kurtosis, we need positive scale and at least one quadruplet.
# If this is not the case, L-kewness and L-kurtosis are set to zero, which
# gives hl=0, hr=0 and samples are treated as in the Gaussian case.
valid_scale = accumulator.l2 > 0.0
valid_kurtosis = np.logical_and(valid_scale, accumulator.count_l4 > 0.0)
l_skewness = np.true_divide(accumulator.l3, accumulator.l2,
where=valid_kurtosis,
out=np.zeros_like(accumulator.l3))
l_kurtosis = np.true_divide(accumulator.l4, accumulator.l2,
where=valid_kurtosis,
out=np.zeros_like(accumulator.l4))
l_skewness_and_kurtosis = np.stack((l_skewness, l_kurtosis), axis=0)
h_params = np.apply_along_axis(
gaussianization.compute_tukey_hh_params, 0, l_skewness_and_kurtosis)
hh_l_mean, hh_l_scale = gaussianization.tukey_hh_l_mean_and_scale(h_params)
scale = np.true_divide(accumulator.l2, hh_l_scale,
where=valid_scale, out=np.ones_like(accumulator.l2))
loc = accumulator.l1 - scale * hh_l_mean
hl = h_params[0, ...]
hr = h_params[1, ...]
return [self._output_numpy_dtype(x) for x in [loc, scale, hl, hr]]
def output_tensor_infos(self):
# The output is (loc, scale, hl, hr).
return [
analyzer_nodes.TensorInfo(
tf.as_dtype(self._output_numpy_dtype), self._output_shape, None)
] * 4
@property
def accumulator_coder(self):
# TODO(b/170510451): Re-enable caching for this Combiner.
return None
def _combine_accumulators(self, a, b):
"""Combines two accumulators.
Args:
a: A _LMomentsAccumulator.
b: A _LMomentsAccumulator.
Returns:
A _LMomentsAccumulator computed as the combination of a and b.
"""
# NaNs get preserved through division by a.count + b.count.
a = _LMomentsAccumulator.make_nan_to_num(*a)
b = _LMomentsAccumulator.make_nan_to_num(*b)
# If one accumulator is empty return the other.
if np.sum(a.count_l1) < np.sum(b.count_l1):
a, b = b, a
if np.sum(b.count_l1) == 0:
return a
a_count_l1, b_count_l1 = _pad_arrays_to_match(a.count_l1, b.count_l1)
a_l1, b_l1 = _pad_arrays_to_match(a.l1, b.l1)
a_count_l2, b_count_l2 = _pad_arrays_to_match(a.count_l2, b.count_l2)
a_l2, b_l2 = _pad_arrays_to_match(a.l2, b.l2)
a_count_l3, b_count_l3 = _pad_arrays_to_match(a.count_l3, b.count_l3)
a_l3, b_l3 = _pad_arrays_to_match(a.l3, b.l3)
a_count_l4, b_count_l4 = _pad_arrays_to_match(a.count_l4, b.count_l4)
a_l4, b_l4 = _pad_arrays_to_match(a.l4, b.l4)
combined_count_l1 = a_count_l1 + b_count_l1
combined_count_l2 = a_count_l2 + b_count_l2
combined_count_l3 = a_count_l3 + b_count_l3
combined_count_l4 = a_count_l4 + b_count_l4
combined_l1 = (a_l1 + np.true_divide(
b_count_l1, combined_count_l1, where=combined_count_l1 > 0,
out=np.zeros_like(a_l1)) * (b_l1 - a_l1))
combined_l2 = (a_l2 + np.true_divide(
b_count_l2, combined_count_l2, where=combined_count_l2 > 0,
out=np.zeros_like(a_l2)) * (b_l2 - a_l2))
combined_l3 = (a_l3 + np.true_divide(
b_count_l3, combined_count_l3, where=combined_count_l3 > 0,
out=np.zeros_like(a_l3)) * (b_l3 - a_l3))
combined_l4 = (a_l4 + np.true_divide(
b_count_l4, combined_count_l4, where=combined_count_l4 > 0,
out=np.zeros_like(a_l4)) * (b_l4 - a_l4))
return _LMomentsAccumulator(
combined_count_l1, combined_count_l2, combined_count_l3,
combined_count_l4, combined_l1, combined_l2, combined_l3, combined_l4)
def sanitized_vocab_filename(filename=None, prefix=None):
"""Generates a sanitized filename either from the given filename or the scope.
If filename is specified, provide a sanitized version of the given filename.
Otherwise generate a filename from the current scope. Note that it is the
callers responsibility to ensure that filenames are unique across calls within
a given preprocessing function.
Args:
filename: A filename with non-alpha characters replaced with underscores and
spaces to hyphens.
prefix: Prefix to use for the name of the vocab file, if filename
is not given.
Returns:
A valid filename.
Raises:
ValueError: If neither filename and prefix are specified, or if both
are specified.
"""
if filename is None and prefix is None:
raise ValueError('Both filename and prefix cannot be None.')
if filename is not None and prefix is not None:
raise ValueError('Only one of filename or prefix can be specified.')
if filename is None:
filename = prefix + tf.compat.v1.get_default_graph().get_name_scope()
# Replace non-alpha characters (excluding whitespaces) with '_'.
filename = re.sub(r'[^\w\s-]', '_', filename).strip()
# Replace whitespaces with '-'.
return re.sub(r'[-\s]+', '-', filename)
def _get_vocab_filename(vocab_filename, store_frequency):
"""Returns a sanitized vocabulary filename with appropriate prefix applied.
Args:
vocab_filename: The file name for the vocabulary file. If none, the
"vocabulary" scope name in the context of this graph will be used as the
file name.
store_frequency: A bool that is true when the vocabulary for which this
generates a filename stores term frequency. False otherwise.
Returns:
A valid filename.
"""
if vocab_filename is not None:
prefix = None
elif store_frequency:
prefix = VOCAB_FREQUENCY_FILENAME_PREFIX
else:
prefix = VOCAB_FILENAME_PREFIX
# Make the file name path safe.
return sanitized_vocab_filename(vocab_filename, prefix=prefix)
def _maybe_get_per_key_vocab_filename(key_vocabulary_filename):
if key_vocabulary_filename == '': # pylint: disable=g-explicit-bool-comparison
key_vocabulary_filename = _get_vocab_filename(vocab_filename=None,
store_frequency=False)
return key_vocabulary_filename
# TODO(b/116308354): frequency_threshold is misleading since this threshold can
# be applied to mutual information rather than frequency.
def _get_top_k_and_frequency_threshold(top_k, frequency_threshold):
"""Validate `top_k` and `frequency_threshold` values and convert to number."""
if top_k is not None:
top_k = int(top_k)
if top_k <= 0:
raise ValueError('top_k must be positive, but got: %r' % top_k)
if frequency_threshold is not None:
frequency_threshold = float(frequency_threshold)
if frequency_threshold < 0:
raise ValueError(
'frequency_threshold must be non-negative, but got: %r' %
frequency_threshold)
elif frequency_threshold <= 1:
# Note: this warning is misleading in the context where tokens are ranked
# based on mutual information rather than frequency.
tf.compat.v1.logging.warn(
'frequency_threshold %d <= 1 is a no-op, use None instead.',
frequency_threshold)
return top_k, frequency_threshold
class _VocabOrderingType:
"""Class for all vocab ordering types."""
# Orders vocabulary based on the simple frequency of the token
FREQUENCY = 1
# Orders vocabulary based on the weighted frequency of the token
WEIGHTED_FREQUENCY = 2
# Orders vocabulary based on the weighted mutual
# information of token with the label
WEIGHTED_MUTUAL_INFORMATION = 3
# Experimental
WEIGHTED_LABELS = 4
# Orders vocabulary based on the mutual information
# of token with the label and without weight.
MUTUAL_INFORMATION = 5
def register_vocab(sanitized_filename: str,
vocabulary_size: Optional[tf.Tensor] = None,
vocabulary_key: Optional[str] = None,
file_format: common_types
.VocabularyFileFormatType = DEFAULT_VOCABULARY_FILE_FORMAT):
"""Registers the specificed vocabulary within the asset map.
Args:
sanitized_filename: The santized filename of the vocabulary.
vocabulary_size: The size of the vocabulary.
vocabulary_key: The key of the vocabulary to use.
file_format: The format of the vocabulary file (text or tfrecord_gzip).
"""
if vocabulary_key is None:
vocabulary_key = sanitized_filename
filename = ('{}.tfrecord.gz'.format(sanitized_filename)
if file_format == 'tfrecord_gzip' else sanitized_filename)
annotators.annotate_asset(vocabulary_key, filename)
if vocabulary_size is not None:
annotators.annotate_vocab_size(vocabulary_key, vocabulary_size)
def get_empy_vocabulary_dummy_value(
dtype: Union[tf.dtypes.DType, str]) -> Tuple[int, bytes]:
"""Returns a vocabulary entry to use in case of an empty vocabulary."""
# TODO(b/62272023) remove this workaround if/when fixed on tensorflow.
# If the vocabulary is empty add a dummy value with count one so
# the tensorflow index operations don't fail to initialize with empty
# tensors downstream.
dummy_value = (b'49d0cd50-04bb-48c0-bc6f-5b575dce351a'
if tf.dtypes.as_dtype(dtype) == tf.string else b'-1')
return (1, dummy_value)
# TODO(KesterTong): Once multiple outputs are supported, return indices too.
# TODO(b/117796748): Add coverage key feature input as alternative to `key_fn`.
# TODO(tensorflow/community) the experimental fingerprint_shuffle argument is a
# workaround for the inability to appropriately rebalance sharded variables on
# TF 1.0. The following TF 2.0 proposal should address this issue in the future
# https://github.com/tensorflow/community/blob/master/rfcs/20190116-embedding-partitioned-variable.md#goals
@common.log_api_use(common.ANALYZER_COLLECTION)
def vocabulary(
x: common_types.TensorType,
top_k: Optional[int] = None,
frequency_threshold: Optional[int] = None,
vocab_filename: Optional[str] = None,
store_frequency: Optional[bool] = False,
weights: Optional[tf.Tensor] = None,
labels: Optional[tf.Tensor] = None,
use_adjusted_mutual_info: bool = False,
min_diff_from_avg: Optional[int] = None,
coverage_top_k: Optional[int] = None,
coverage_frequency_threshold: Optional[int] = None,
key_fn: Optional[Callable[[Any], Any]] = None,
fingerprint_shuffle: Optional[bool] = False,
file_format: common_types
.VocabularyFileFormatType = DEFAULT_VOCABULARY_FILE_FORMAT,
name: Optional[str] = None) -> common_types.TemporaryAnalyzerOutputType:
r"""Computes the unique values of a `Tensor` over the whole dataset.
Computes The unique values taken by `x`, which can be a `Tensor` or
`CompositeTensor` of any size. The unique values will be aggregated over all
dimensions of `x` and all instances.
In case `file_format` is 'text' and one of the tokens contains the '\n' or
'\r' characters or is empty it will be discarded.
If an integer `Tensor` is provided, its semantic type should be categorical
not a continuous/numeric, since computing a vocabulary over a continuous
feature is not appropriate.
The unique values are sorted by decreasing frequency and then reverse
lexicographical order (e.g. [('a', 5), ('c', 3), ('b', 3)]). This is true even
if `x` is numerical dtype (e.g. [('3', 5), ('2', 3), ('111', 3)]).
For large datasets it is highly recommended to either set frequency_threshold
or top_k to control the size of the output, and also the run time of this
operation.
When labels are provided, we filter the vocabulary based on the relationship
between the token's presence in a record and the label for that record, using
(possibly adjusted) Mutual Information. Note: If labels are provided, the x
input must be a unique set of per record, as the semantics of the mutual
information calculation depend on a multi-hot representation of the input.
Having unique input tokens per row is advisable but not required for a
frequency-based vocabulary.
WARNING: The following is experimental and is still being actively worked on.
Supply `key_fn` if you would like to generate a vocabulary with coverage over
specific keys.
A "coverage vocabulary" is the union of two vocabulary "arms". The "standard
arm" of the vocabulary is equivalent to the one generated by the same function
call with no coverage arguments. Adding coverage only appends additional
entries to the end of the standard vocabulary.
The "coverage arm" of the vocabulary is determined by taking the
`coverage_top_k` most frequent unique terms per key. A term's key is obtained
by applying `key_fn` to the term. Use `coverage_frequency_threshold` to lower
bound the frequency of entries in the coverage arm of the vocabulary.
Note this is currently implemented for the case where the key is contained
within each vocabulary entry (b/117796748).
Args:
x: A categorical/discrete input `Tensor` or `CompositeTensor` with dtype
tf.string or tf.int[8|16|32|64]. The inputs should generally be unique per
row (i.e. a bag of words/ngrams representation).
top_k: Limit the generated vocabulary to the first `top_k` elements. If set
to None, the full vocabulary is generated.
frequency_threshold: Limit the generated vocabulary only to elements whose
absolute frequency is >= to the supplied threshold. If set to None, the
full vocabulary is generated. Absolute frequency means the number of
occurrences of the element in the dataset, as opposed to the proportion of
instances that contain that element.
vocab_filename: The file name for the vocabulary file. If None, a file name
will be chosen based on the current scope. If not None, should be unique
within a given preprocessing function. NOTE To make your pipelines
resilient to implementation details please set `vocab_filename` when you
are using the vocab_filename on a downstream component.
store_frequency: If True, frequency of the words is stored in the vocabulary
file. In the case labels are provided, the mutual information is stored in
the file instead. Each line in the file will be of the form
'frequency word'. NOTE: if this is True then the computed vocabulary
cannot be used with `tft.apply_vocabulary` directly, since frequencies are
added to the beginning of each row of the vocabulary, which the mapper
will not ignore.
weights: (Optional) Weights `Tensor` for the vocabulary. It must have the
same shape as x.
labels: (Optional) Labels dense `Tensor` for the vocabulary. If provided,
the vocabulary is calculated based on mutual information with the label,
rather than frequency. The labels must have the same batch dimension as x.
If x is sparse, labels should be a 1D tensor reflecting row-wise labels.
If x is dense, labels can either be a 1D tensor of row-wise labels, or a
dense tensor of the identical shape as x (i.e. element-wise labels).
Labels should be a discrete integerized tensor (If the label is numeric,
it should first be bucketized; If the label is a string, an integer
vocabulary should first be applied). Note: `CompositeTensor` labels are
not yet supported (b/134931826). WARNING: When labels are provided, the
frequency_threshold argument functions as a mutual information
threshold, which is a float. TODO(b/116308354): Fix confusing naming.
use_adjusted_mutual_info: If true, and labels are provided, calculate
vocabulary using adjusted rather than raw mutual information.
min_diff_from_avg: MI (or AMI) of a feature x label will be adjusted to zero
whenever the difference between count and the expected (average) count is
lower than min_diff_from_average. This can be thought of as a regularizing
parameter that pushes small MI/AMI values to zero. If None, a default
parameter will be selected based on the size of the dataset (see
calculate_recommended_min_diff_from_avg).
coverage_top_k: (Optional), (Experimental) The minimum number of elements
per key to be included in the vocabulary.
coverage_frequency_threshold: (Optional), (Experimental) Limit the coverage
arm of the vocabulary only to elements whose absolute frequency is >= this
threshold for a given key.
key_fn: (Optional), (Experimental) A fn that takes in a single entry of `x`
and returns the corresponding key for coverage calculation. If this is
`None`, no coverage arm is added to the vocabulary.
fingerprint_shuffle: (Optional), (Experimental) Whether to sort the
vocabularies by fingerprint instead of counts. This is useful for load
balancing on the training parameter servers. Shuffle only happens while
writing the files, so all the filters above (top_k, frequency_threshold,
etc) will still take effect.
file_format: (Optional) A str. The format of the resulting vocabulary file.
Accepted formats are: 'tfrecord_gzip', 'text'. 'tfrecord_gzip' requires
tensorflow>=2.4. The default value is 'text'.
name: (Optional) A name for this operation.
Returns:
The path name for the vocabulary file containing the unique values of `x`.
Raises:
ValueError: If `top_k` or `frequency_threshold` is negative.
If `coverage_top_k` or `coverage_frequency_threshold` is negative.
If either `coverage_top_k` or `coverage_frequency_threshold` is specified
and `key_fn` is not.
If `key_fn` is specified and neither `coverage_top_k`, nor
"""
top_k, frequency_threshold = _get_top_k_and_frequency_threshold(
top_k, frequency_threshold)
if (coverage_top_k or coverage_frequency_threshold) and not key_fn:
raise ValueError('You must specify `key_fn` if you specify `coverage_top_k'
' or `coverage_frequency_threshold` in `vocabulary`.')
if key_fn and not (coverage_top_k or coverage_frequency_threshold):
raise ValueError('You must specify `coverage_top_k` or '
'`coverage_frequency_threshold` if you specify `key_fn` in'
' `vocabulary`.')
if file_format not in ALLOWED_VOCABULARY_FILE_FORMATS:
raise ValueError(
'"{}" is not an accepted file_format. It should be one of: {}'.format(
file_format, ALLOWED_VOCABULARY_FILE_FORMATS))
coverage_top_k, coverage_frequency_threshold = (
_get_top_k_and_frequency_threshold(
coverage_top_k, coverage_frequency_threshold))
if x.dtype != tf.string and not x.dtype.is_integer:
raise ValueError('expected tf.string or integer but got %r' % x.dtype)
if labels is not None and not labels.dtype.is_integer:
raise ValueError('expected integer labels but got %r' % labels.dtype)
if (frequency_threshold is None and labels is None and key_fn is None and
not fingerprint_shuffle and top_k is not None and
top_k <= LARGE_VOCAB_TOP_K):
logging.info('If the number of unique tokens is smaller than the provided '
'top_k or approximation error is acceptable, consider using '
'tft.experimental.approximate_vocabulary for a potentially '
'more efficient implementation.')
with tf.compat.v1.name_scope(name, 'vocabulary'):
vocabulary_key = vocab_filename
vocab_filename = _get_vocab_filename(vocab_filename, store_frequency)
informativeness_threshold = float('-inf')
coverage_informativeness_threshold = float('-inf')
if labels is not None:
if weights is not None:
vocab_ordering_type = _VocabOrderingType.WEIGHTED_MUTUAL_INFORMATION
else:
vocab_ordering_type = _VocabOrderingType.MUTUAL_INFORMATION
# Correct for the overloaded `frequency_threshold` API.
if frequency_threshold is not None:
informativeness_threshold = frequency_threshold
frequency_threshold = 0.0
if coverage_frequency_threshold is not None:
coverage_informativeness_threshold = coverage_frequency_threshold
coverage_frequency_threshold = 0.0
elif weights is not None:
vocab_ordering_type = _VocabOrderingType.WEIGHTED_FREQUENCY
else:
vocab_ordering_type = _VocabOrderingType.FREQUENCY
analyzer_inputs = _get_vocabulary_analyzer_inputs(
vocab_ordering_type=vocab_ordering_type,
x=x,
file_format=file_format,
labels=labels,
weights=weights)
return _vocabulary_analyzer_nodes(
analyzer_inputs=analyzer_inputs,
input_dtype=x.dtype.name,
vocab_ordering_type=vocab_ordering_type,
vocab_filename=vocab_filename,
top_k=top_k,
frequency_threshold=frequency_threshold or 0,
informativeness_threshold=informativeness_threshold,
use_adjusted_mutual_info=use_adjusted_mutual_info,
min_diff_from_avg=min_diff_from_avg,
fingerprint_shuffle=fingerprint_shuffle,
store_frequency=store_frequency,
key_fn=key_fn,
coverage_top_k=coverage_top_k,
coverage_frequency_threshold=coverage_frequency_threshold or 0,
coverage_informativeness_threshold=coverage_informativeness_threshold,
file_format=file_format,
vocabulary_key=vocabulary_key)
def _get_vocabulary_analyzer_inputs(
vocab_ordering_type: int,
x: common_types.TensorType,
file_format: common_types.VocabularyFileFormatType,
labels: Optional[tf.Tensor] = None,
weights: Optional[tf.Tensor] = None):
"""Helper for constructing analyzer inputs from tensors.
Args:
vocab_ordering_type: VocabOrderingType specifying how to select vocabulary.
x: Tensor to compute vocabulary over.
file_format: The format of the resulting vocabulary file. Accepted formats
are 'tfrecord_gzip', 'text'. 'tfrecord_gzip' requires tensorflow>=2.4.
labels: Optional tensor of integerized labels.
weights: Optional tensor of weights.
Returns:
A list of batch-reduced tensors to feed to vocabulary analysis.
"""
filter_regex = get_vocab_newline_characters_regex(x.dtype, file_format)
if vocab_ordering_type == _VocabOrderingType.WEIGHTED_MUTUAL_INFORMATION:
labels = tf.reshape(labels, [-1])
reduced_batch = tf_utils.reduce_batch_weighted_cooccurrences(
x, labels, weights, filter_regex=filter_regex)
return [
reduced_batch.unique_x, reduced_batch.summed_weights_per_x,
reduced_batch.summed_positive_per_x_and_y, reduced_batch.counts_per_x
]
elif vocab_ordering_type == _VocabOrderingType.MUTUAL_INFORMATION:
labels = tf.reshape(labels, [-1])
reduced_batch = tf_utils.reduce_batch_weighted_cooccurrences(
x, labels, weights, filter_regex=filter_regex)
return [
reduced_batch.unique_x, reduced_batch.summed_positive_per_x_and_y,
reduced_batch.counts_per_x
]
elif vocab_ordering_type == _VocabOrderingType.WEIGHTED_FREQUENCY:
reduced_batch = tf_utils.reduce_batch_weighted_counts(
x, weights, filter_regex=filter_regex)
assert reduced_batch.summed_positive_per_x_and_y is None
assert reduced_batch.counts_per_x is None
return [reduced_batch.unique_x, reduced_batch.summed_weights_per_x]
else:
reduced_batch = tf_utils.reduce_batch_weighted_counts(
x, filter_regex=filter_regex)
assert reduced_batch.summed_weights_per_x is None
assert reduced_batch.summed_positive_per_x_and_y is None
assert reduced_batch.counts_per_x is None
return [reduced_batch.unique_x]
def get_vocab_newline_characters_regex(
input_dtype: tf.dtypes.DType,
file_format: common_types.VocabularyFileFormatType) -> Optional[str]:
if input_dtype == tf.string and file_format == 'text':
return _EMPTY_STRING_OR_NEWLINE_CHARS_REGEX
else:
return None
def _vocabulary_analyzer_nodes(
analyzer_inputs: Collection[tf.Tensor],
input_dtype: tf.dtypes.DType,
vocab_ordering_type: int,
vocab_filename: str,
top_k: Optional[int] = None,
frequency_threshold: int = 0,
informativeness_threshold: float = float('-inf'),
use_adjusted_mutual_info: bool = False,
min_diff_from_avg: Optional[int] = None,
fingerprint_shuffle: bool = False,
store_frequency: bool = False,
key_fn: Optional[Callable[[Any], Any]] = None,
coverage_top_k: Optional[int] = None,
coverage_frequency_threshold: float = 0.0,
coverage_informativeness_threshold: float = float('-inf'),
file_format: common_types
.VocabularyFileFormatType = DEFAULT_VOCABULARY_FILE_FORMAT,
vocabulary_key: Optional[str] = None
) -> common_types.TemporaryAnalyzerOutputType:
"""Internal helper for analyzing vocab. See `vocabulary` doc string."""
if (file_format == 'tfrecord_gzip' and
not tf_utils.is_vocabulary_tfrecord_supported()):
raise ValueError(
'Vocabulary file_format "tfrecord_gzip" not yet supported for '
f'{tf.version.VERSION}.')
input_values_node = analyzer_nodes.get_input_tensors_value_nodes(
analyzer_inputs)
accumulate_output_value_node = nodes.apply_operation(
analyzer_nodes.VocabularyAccumulate,
input_values_node,
vocab_ordering_type=vocab_ordering_type,
input_dtype=input_dtype)
merge_output_value_node = nodes.apply_operation(
analyzer_nodes.VocabularyMerge,
accumulate_output_value_node,
use_adjusted_mutual_info=use_adjusted_mutual_info,
min_diff_from_avg=min_diff_from_avg,
vocab_ordering_type=vocab_ordering_type)
filtered_value_node = nodes.apply_operation(
analyzer_nodes.VocabularyPrune,
merge_output_value_node,
coverage_top_k=coverage_top_k,
coverage_frequency_threshold=coverage_frequency_threshold,
coverage_informativeness_threshold=coverage_informativeness_threshold,
key_fn=key_fn,
top_k=top_k,
frequency_threshold=frequency_threshold,
informativeness_threshold=informativeness_threshold,
input_dtype=input_dtype)
vocab_filename_node = nodes.apply_operation(
analyzer_nodes.VocabularyOrderAndWrite,
filtered_value_node,
vocab_filename=vocab_filename,
store_frequency=store_frequency,
fingerprint_shuffle=fingerprint_shuffle,
input_dtype=input_dtype,
file_format=file_format,
# LINT.IfChange(input_is_sorted)
input_is_sorted=(top_k is not None and key_fn is None and
not fingerprint_shuffle)
# LINT.ThenChange(beam/analyzer_impls.py:top_k_impl)
)
scope = tf.compat.v1.get_default_graph().get_name_scope()
unfiltered_vocab_size_node = nodes.apply_operation(
analyzer_nodes.VocabularyCount,
merge_output_value_node,
label=f'VocabularyCountUnfiltered[{scope}]')
unfiltered_vocab_size = analyzer_nodes.bind_future_as_tensor(
unfiltered_vocab_size_node,
analyzer_nodes.TensorInfo(tf.int64, [], None),
name=f'{vocab_filename}_unpruned_vocab_size')
filtered_vocab_size_node = nodes.apply_operation(
analyzer_nodes.VocabularyCount,
filtered_value_node,
label=f'VocabularyCountFiltered[{scope}]')
filtered_vocab_size = analyzer_nodes.bind_future_as_tensor(
filtered_vocab_size_node,
analyzer_nodes.TensorInfo(tf.int64, [], None),
name=f'{vocab_filename}_pruned_vocab_size')
_maybe_annotate_vocab_metadata(vocab_filename, unfiltered_vocab_size,
filtered_vocab_size)
register_vocab(
vocab_filename,
vocabulary_size=filtered_vocab_size,
vocabulary_key=vocabulary_key,
file_format=file_format)
return analyzer_nodes.wrap_as_tensor(vocab_filename_node)
def calculate_recommended_min_diff_from_avg(dataset_size: int) -> int:
"""Calculates a recommended min_diff_from_avg argument to tft.vocabulary.
Computes a default min_diff_from_average parameter based on the size of the
dataset. The MI (or AMI) of a token x label will be pushed to zero whenever
the difference between the observed and the expected (average) cooccurrence
with the label is < min_diff_from_average. This can be thought of as a
regularization parameter for mutual information based vocabularies.
Args:
dataset_size: The number of recods in the dataset. The bigger the dataset,
the higher the min_diff_from_average will be.
Returns:
An integer that is recomended to use as the min_diff_from_avg parameter of
`vocabulary`.
"""
# The minimum and maximum min_diff_from_avg parameter to use.
min_value, max_value = 2, 25
# Heuristics for a "small" and "large" dataset. The selected parameter will
# be between min_value and max_value depending on where the dataset_size falls
# relative to these values.
small_dataset_size, large_dataset_size = 10000, 1000000
return int(
builtin_min(
max_value,
builtin_max(min_value, (dataset_size - small_dataset_size) /
(large_dataset_size - small_dataset_size) *
(max_value - min_value) + min_value)))
# Code related to this class is performance sensitive, so (micro-)benchmarks
# should be run when it is updated.
class QuantilesCombiner(analyzer_nodes.Combiner):
"""Computes quantiles on the PCollection.
This implementation is based on go/squawd.
For additional details on the algorithm, such as streaming and summary,
see also http://web.cs.ucla.edu/~weiwang/paper/SSDBM07_2.pdf
"""
def __init__(self,
num_quantiles,
epsilon,
bucket_numpy_dtype,
has_weights=False,
output_shape=None,
include_max_and_min=False,
feature_shape=None):
self._num_quantiles = num_quantiles
self._epsilon = epsilon
# Expected upper bound on the total number of input elements per feature.
# Theoretical error bound is guaranteed to be <= epsilon as long as the
# number of input elements is <= max_num_values.
self._max_num_values = 1 << 32
self._bucket_numpy_dtype = bucket_numpy_dtype
self._has_weights = has_weights
self._include_max_and_min = include_max_and_min
num_outputs = (num_quantiles +
1) if include_max_and_min else (num_quantiles - 1)
if feature_shape is None:
feature_shape = []
elif isinstance(feature_shape, int):
feature_shape = [feature_shape]
if output_shape is None:
self._output_shape = list(feature_shape) + [num_outputs]
else:
self._output_shape = output_shape
self._num_features = np.prod(feature_shape, dtype=np.int64).item()
def create_accumulator(self):
return sketches.QuantilesSketch(self._epsilon, self._max_num_values,
self._num_features)
def add_input(self, accumulator, next_input):
# Flattened input array will be split on inputs for each feature.
# C-contiguous order of flattened array is required.
flat_values = pa.array(np.ravel(next_input[0]))
if self._has_weights:
flat_weights = pa.array(np.ravel(next_input[1]))
accumulator.AddValues(flat_values, flat_weights)
else:
accumulator.AddValues(flat_values)
return accumulator
def merge_accumulators(self, accumulators):
accumulators = iter(accumulators)
result = next(accumulators)
for accumulator in accumulators:
result.Merge(accumulator)
return result
def compact(self, accumulator):
accumulator.Compact()
return accumulator
def extract_output(self, accumulator):
result = accumulator.GetQuantiles(self._num_quantiles).to_pylist()
if not result:
return [np.zeros(self._output_shape, self._bucket_numpy_dtype)]
result = np.array(result, self._bucket_numpy_dtype)
# Trim elementwise results if max and min should be excluded.
if not self._include_max_and_min:
result = result[:, 1:-1]
return [np.reshape(result, self._output_shape)]
def output_tensor_infos(self):
return [
analyzer_nodes.TensorInfo(
tf.as_dtype(self._bucket_numpy_dtype), self._output_shape, None)
]
@property
def accumulator_coder(self):
return _QuantilesSketchCacheCoder()
class _QuantilesSketchCacheCoder(analyzer_nodes.CacheCoder):
"""Cache coder for the quantiles accumulator."""
def encode_cache(self, accumulator):
# TODO(b/174549940): Consider exposing and calling
# `QuantilesSketch::Serialize` directly.
# TODO(b/37788560): Should we be "intelligently" choosing the 'protocol'
# argument for 'dumps'?
return pickle.dumps(accumulator)
def decode_cache(self, encoded_accumulator):
return pickle.loads(encoded_accumulator)
@common.log_api_use(common.ANALYZER_COLLECTION)
def quantiles(x: tf.Tensor,
num_buckets: int,
epsilon: float,
weights: Optional[tf.Tensor] = None,
reduce_instance_dims: bool = True,
name: Optional[str] = None) -> tf.Tensor:
"""Computes the quantile boundaries of a `Tensor` over the whole dataset.
Quantile boundaries are computed using approximate quantiles,
and error tolerance is specified using `epsilon`. The boundaries divide the
input tensor into approximately equal `num_buckets` parts.
See go/squawd for details, and how to control the error due to approximation.
NaN input values and values with NaN weights are ignored.
Args:
x: An input `Tensor`.
num_buckets: Values in the `x` are divided into approximately equal-sized
buckets, where the number of buckets is `num_buckets`. The number of
returned quantiles is `num_buckets` - 1.
epsilon: Error tolerance, typically a small fraction close to zero (e.g.
0.01). Higher values of epsilon increase the quantile approximation, and
hence result in more unequal buckets, but could improve performance,
and resource consumption. Some measured results on memory consumption:
For epsilon = 0.001, the amount of memory for each buffer to hold the
summary for 1 trillion input values is ~25000 bytes. If epsilon is
relaxed to 0.01, the buffer size drops to ~2000 bytes for the same input
size. The buffer size also determines the amount of work in the
different stages of the beam pipeline, in general, larger epsilon
results in fewer and smaller stages, and less time. For more performance
trade-offs see also http://web.cs.ucla.edu/~weiwang/paper/SSDBM07_2.pdf
weights: (Optional) Weights tensor for the quantiles. Tensor must have the
same batch size as x.
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single output vector. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
name: (Optional) A name for this operation.
Returns:
The bucket boundaries represented as a list, with num_bucket-1 elements,
unless reduce_instance_dims is False, which results in a Tensor of
shape x.shape + [num_bucket-1].
See code below for discussion on the type of bucket boundaries.
"""
# Quantile ops convert input values to double under the hood. Keep bucket
# boundaries as float for all numeric types.
bucket_dtype = tf.float32
with tf.compat.v1.name_scope(name, 'quantiles'):
if weights is None:
analyzer_inputs = [x]
has_weights = False
else:
analyzer_inputs = [x, weights]
has_weights = True
feature_shape = [] if reduce_instance_dims else x.get_shape().as_list()[1:]
output_shape = (feature_shape if feature_shape else [1]) + [num_buckets - 1]
combiner = QuantilesCombiner(
num_buckets,
epsilon,
bucket_dtype.as_numpy_dtype,
has_weights=has_weights,
output_shape=output_shape,
feature_shape=feature_shape)
(quantile_boundaries,) = _apply_cacheable_combiner(combiner,
*analyzer_inputs)
return quantile_boundaries
def _quantiles_per_key(
x: tf.Tensor,
key: tf.Tensor,
num_buckets: int,
epsilon: float,
weights: Optional[tf.Tensor] = None,
name: Optional[str] = None
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, int]:
"""Like quantiles but per-key.
For private use in tf.Transform implementation only.
Args:
x: An input `Tensor`.
key: An input `Tensor` with rank 1 and size same as the fist dimension of
`x`. All values of `x` will be aggregated according to the corresponding
value of `key`.
num_buckets: See `quantiles`.
epsilon: See `quantiles`.
weights: See `quantiles`.
name: (Optional) A name for this operation.
Returns:
A 4-tuple of (boundaries, scale, shift, num_buckets).
The returned boundaries is a 1-d Tensor of size:
((num_buckets - 2) * num_keys) + 1
And the returned scale and shift 1-d Tensors can be used to transform a
value before applying bucketization and shift the resulting bucket.
So the transformation of each input x before computing its bucket should be:
F(x, key) = x * scale_factor_per_key[key] + shift_per_key[key]
For example, if there are 2 keys, and the following boundaries are computed
for them: [[0, 1, 2], [0, 1, 2]], this will return:
boundaries: [0, 0.5, 1, 1.5, 2]
scale_factor_per_key: [0.5, 0.5]
shift_per_key: [0, 1]
num_buckets: 4
Raises:
ValueError: If key has wrong dtype.
"""
if key.dtype != tf.string:
raise ValueError('key must have type tf.string')
# Quantile ops convert input values to double under the hood. Keep bucket
# boundaries as float for all numeric types.
bucket_dtype = tf.float32
with tf.compat.v1.name_scope(name, 'quantiles_by_key'):
combiner = QuantilesCombiner(
num_buckets,
epsilon,
bucket_dtype.as_numpy_dtype,
has_weights=weights is not None,
output_shape=(num_buckets - 1,))
input_values_node = analyzer_nodes.get_input_tensors_value_nodes((
key, x) if weights is None else (key, x, weights))
accumulate_outputs_value_nodes = nodes.apply_multi_output_operation(
analyzer_nodes.CacheableCombinePerKeyAccumulate,
input_values_node,
combiner=combiner)
merge_output_value_node = nodes.apply_operation(
analyzer_nodes.CacheableCombinePerKeyMerge,
*accumulate_outputs_value_nodes,
combiner=combiner)
key_value_node, bucket_boundaries = nodes.apply_multi_output_operation(
analyzer_nodes.CacheableCombinePerKeyFormatKeys,
merge_output_value_node,
combiner=combiner)
boundaries, scale_factor, shift, num_buckets_node = (
nodes.apply_multi_output_operation(
analyzer_nodes.ScaleAndFlattenPerKeyBucketBouandaries,
bucket_boundaries,
output_tensor_dtype=bucket_dtype))
return tuple(
map(analyzer_nodes.wrap_as_tensor,
[key_value_node, boundaries, scale_factor, shift, num_buckets_node
]))
class CovarianceCombiner(analyzer_nodes.Combiner):
"""Combines the PCollection to compute the biased covariance matrix."""
def __init__(self, output_shape, numpy_dtype=np.float64):
"""Store the dtype and shape for np arrays/matrices for precision."""
self._output_shape = output_shape
self._numpy_dtype = numpy_dtype
def create_accumulator(self):
"""Create an accumulator with all zero entries."""
return [
np.zeros((self._output_shape[0], self._output_shape[0]),
self._numpy_dtype),
np.zeros((self._output_shape[0],), self._numpy_dtype),
np.zeros((), self._numpy_dtype)
]
def add_input(self, accumulator, batch_values):
"""Compute sum of input cross-terms, sum of inputs, and count.
The cross terms for a numeric 1d array x are given by the set:
{z_ij = x_i * x_j for all indices i and j}. This is stored as a 2d array.
Since next_input is an array of 1d numeric arrays (i.e. a 2d array),
matmul(transpose(next_input), next_input) will automatically sum up
the cross terms of each 1d array in next_input.
Args:
accumulator: running sum of cross terms, input vectors, and count
batch_values: entries from the pipeline, which must be single element list
containing a 2d array
representing multiple 1d arrays
Returns:
An accumulator with next_input considered in its running list of
sum_product, sum_vectors, and count of input rows.
"""
# Expect a single input representing the batch for the input tensor.
batch_value, = batch_values
assert len(np.shape(batch_value)) == 2
batch_cross_terms = np.matmul(
np.transpose(batch_value),
batch_value
).astype(self._numpy_dtype)
batch_sum = np.array(np.sum(batch_value, axis=0), self._numpy_dtype)
batch_count = np.shape(batch_value)[0]
sum_product, sum_vectors, count = accumulator
return [
sum_product + batch_cross_terms, sum_vectors + batch_sum,
count + batch_count
]
def merge_accumulators(self, accumulators):
"""Sums values in each accumulator entry."""
# TODO(b/215378946): Consider updating accumulators[0] in place.
products, vectors, counts = zip(*accumulators)
return [
np.sum(products, axis=0),
np.sum(vectors, axis=0),
np.sum(counts, axis=0)
]
def extract_output(self, accumulator):
"""Run covariance logic on sum_product, sum of input vectors, and count.
The formula used to compute the covariance is cov(x) = E(xx^T) - uu^T,
where x is the original input to the combiner, and u = mean(x).
E(xx^T) is computed by dividing sum of cross terms (index 0) by count
(index 2). u is computed by taking the sum of rows (index 1) and dividing by
the count (index 2).
Args:
accumulator: final accumulator as a list of the sum of cross-terms matrix,
sum of input vectors, and count.
Returns:
A list containing a single 2d ndarray, the covariance matrix.
"""
sum_product, sum_vectors, count = accumulator
if count == 0:
return [np.zeros(self._output_shape, self._numpy_dtype)]
expected_cross_terms = sum_product / count
expected_terms = sum_vectors / count
return [
np.ndarray.astype( # TODO(b/64987151): # pytype: disable=attribute-error
expected_cross_terms - np.outer(expected_terms, expected_terms),
self._numpy_dtype)
]
def output_tensor_infos(self):
return [
analyzer_nodes.TensorInfo(
tf.as_dtype(self._numpy_dtype), self._output_shape, None)
]
@common.log_api_use(common.ANALYZER_COLLECTION)
def covariance(x: tf.Tensor,
dtype: tf.DType,
name: Optional[str] = None) -> tf.Tensor:
"""Computes the covariance matrix over the whole dataset.
The covariance matrix M is defined as follows:
Let x[:j] be a tensor of the jth element of all input vectors in x, and let
u_j = mean(x[:j]). The entry M[i,j] = E[(x[:i] - u_i)(x[:j] - u_j)].
Notice that the diagonal entries correspond to variances of individual
elements in the vector, i.e. M[i,i] corresponds to the variance of x[:i].
Args:
x: A rank-2 `Tensor`, 0th dim are rows, 1st dim are indices in each input
vector.
dtype: Tensorflow dtype of entries in the returned matrix.
name: (Optional) A name for this operation.
Raises:
ValueError: if input is not a rank-2 Tensor.
Returns:
A rank-2 (matrix) covariance `Tensor`
"""
if not isinstance(x, tf.Tensor):
raise TypeError('Expected a Tensor, but got %r' % x)
with tf.compat.v1.name_scope(name, 'covariance'):
x.shape.assert_has_rank(2)
input_dim = x.shape.as_list()[1]
shape = (input_dim, input_dim)
(result,) = _apply_cacheable_combiner(
CovarianceCombiner(shape, dtype.as_numpy_dtype), x)
return result
class PCACombiner(CovarianceCombiner):
"""Compute PCA of accumulated data using the biased covariance matrix."""
def __init__(self, output_shape, output_dim=None, numpy_dtype=np.float64):
"""Store pca output dimension, shape and dtype for precision."""
super().__init__(output_shape, numpy_dtype=numpy_dtype)
self._output_dim = output_dim
def extract_output(self, accumulator):
"""Compute PCA of the accumulated data using the biased covariance matrix.
Following the covariance computation in CovarianceCombiner, this method runs
eigenvalue decomposition on the covariance matrix, sorts eigenvalues in
decreasing order, and returns the first output_dim corresponding
eigenvectors (principal components) as a matrix.
Args:
accumulator: final accumulator as a list of the sum of cross-terms matrix,
sum of input vectors, and count.
Returns:
A list containing a matrix of shape (input_dim, output_dim).
"""
sum_product, sum_vectors, count = accumulator
if count == 0:
# In this case all eigenvalues==0 and we output (possibly truncated) basis
# vectors. Note that if _output_dim is None, then M is set to N in np.eye.
return [np.eye(N=self._output_shape[0], M=self._output_dim,
dtype=self._numpy_dtype)]
expected_cross_terms = sum_product / count
expected_terms = sum_vectors / count
cov = np.ndarray.astype( # TODO(b/64987151): # pytype: disable=attribute-error
expected_cross_terms - np.outer(expected_terms, expected_terms),
self._numpy_dtype)
vals, vecs = np.linalg.eigh(cov)
sorted_vecs = vecs[:, np.argsort(vals)[::-1]]
if self._output_dim is None:
return [sorted_vecs]
else:
return [sorted_vecs[:, :self._output_dim]]
@common.log_api_use(common.ANALYZER_COLLECTION)
def pca(x: tf.Tensor,
output_dim: int,
dtype: tf.DType,
name: Optional[str] = None) -> tf.Tensor:
"""Computes PCA on the dataset using biased covariance.
The PCA analyzer computes output_dim orthonormal vectors that capture
directions/axes corresponding to the highest variances in the input vectors of
`x`. The output vectors are returned as a rank-2 tensor with shape
`(input_dim, output_dim)`, where the 0th dimension are the components of each
output vector, and the 1st dimension are the output vectors representing
orthogonal directions in the input space, sorted in order of decreasing
variances.
The output rank-2 tensor (matrix) serves a useful transform purpose. Formally,
the matrix can be used downstream in the transform step by multiplying it to
the input tensor `x`. This transform reduces the dimension of input vectors to
output_dim in a way that retains the maximal variance.
NOTE: To properly use PCA, input vector components should be converted to
similar units of measurement such that the vectors represent a Euclidean
space. If no such conversion is available (e.g. one element represents time,
another element distance), the canonical approach is to first apply a
transformation to the input data to normalize numerical variances, i.e.
`tft.scale_to_z_score()`. Normalization allows PCA to choose output axes that
help decorrelate input axes.
Below are a couple intuitive examples of PCA.
Consider a simple 2-dimensional example:
Input x is a series of vectors `[e, e]` where `e` is Gaussian with mean 0,
variance 1. The two components are perfectly correlated, and the resulting
covariance matrix is
```
[[1 1],
[1 1]].
```
Applying PCA with `output_dim = 1` would discover the first principal
component `[1 / sqrt(2), 1 / sqrt(2)]`. When multipled to the original
example, each vector `[e, e]` would be mapped to a scalar `sqrt(2) * e`. The
second principal component would be `[-1 / sqrt(2), 1 / sqrt(2)]` and would
map `[e, e]` to 0, which indicates that the second component captures no
variance at all. This agrees with our intuition since we know that the two
axes in the input are perfectly correlated and can be fully explained by a
single scalar `e`.
Consider a 3-dimensional example:
Input `x` is a series of vectors `[a, a, b]`, where `a` is a zero-mean, unit
variance Gaussian and `b` is a zero-mean, variance 4 Gaussian and is
independent of `a`. The first principal component of the unnormalized vector
would be `[0, 0, 1]` since `b` has a much larger variance than any linear
combination of the first two components. This would map `[a, a, b]` onto `b`,
asserting that the axis with highest energy is the third component. While this
may be the desired output if `a` and `b` correspond to the same units, it is
not statistically desireable when the units are irreconciliable. In such a
case, one should first normalize each component to unit variance first, i.e.
`b := b / 2`. The first principal component of a normalized vector would yield
`[1 / sqrt(2), 1 / sqrt(2), 0]`, and would map `[a, a, b]` to `sqrt(2) * a`.
The second component would be `[0, 0, 1]` and map `[a, a, b]` to `b`. As can
be seen, the benefit of normalization is that PCA would capture highly
correlated components first and collapse them into a lower dimension.
Args:
x: A rank-2 `Tensor`, 0th dim are rows, 1st dim are indices in row vectors.
output_dim: The PCA output dimension (number of eigenvectors to return).
dtype: Tensorflow dtype of entries in the returned matrix.
name: (Optional) A name for this operation.
Raises:
ValueError: if input is not a rank-2 Tensor.
Returns:
A 2D `Tensor` (matrix) M of shape (input_dim, output_dim).
"""
if not isinstance(x, tf.Tensor):
raise TypeError('Expected a Tensor, but got %r' % x)
with tf.compat.v1.name_scope(name, 'pca'):
x.shape.assert_has_rank(2)
input_dim = x.shape.as_list()[1]
shape = (input_dim, output_dim)
(result,) = _apply_cacheable_combiner(
PCACombiner(shape, output_dim, dtype.as_numpy_dtype), x)
return result
def _maybe_annotate_vocab_metadata(vocab_filename: str,
unfiltered_vocabulary_size: tf.Tensor,
filtered_vocabulary_size: tf.Tensor):
"""Annotates a bucketized tensor with the boundaries that were applied.
Creates a deferred annotation for the specified tensor.
Args:
vocab_filename: The name of the vocabulary.
unfiltered_vocabulary_size: A tf.int64 tensor containing the unfiltered
vocab size.
filtered_vocabulary_size: A tf.int64 tensor containing the filtered vocab
size.
"""
if not common.IS_ANNOTATIONS_PB_AVAILABLE:
return
from tensorflow_transform import annotations_pb2 # pylint: disable=g-import-not-at-top
message_type = annotations_pb2.VocabularyMetadata.DESCRIPTOR.full_name
unfiltered_vocabulary_size = tf.expand_dims(unfiltered_vocabulary_size, 0)
filtered_vocabulary_size = tf.expand_dims(filtered_vocabulary_size, 0)
file_name = tf.convert_to_tensor([vocab_filename])
descriptor_source = descriptor_pb2.FileDescriptorSet()
annotations_pb2.VocabularyMetadata.DESCRIPTOR.file.CopyToProto(
descriptor_source.file.add())
descriptor_source_str = b'bytes://' + descriptor_source.SerializeToString()
message_proto = tf_utils._encode_proto( # pylint: disable=protected-access
{
'unfiltered_vocabulary_size': unfiltered_vocabulary_size,
'filtered_vocabulary_size': filtered_vocabulary_size,
'file_name': file_name,
}, message_type, descriptor_source=descriptor_source_str)
assert message_proto.shape == [1]
message_proto = message_proto[0]
# Note: we annotate globally here (tied to a vocabulary by filename) rather
# than attaching to a tensor, because this annotation is tied to an analysis
# output not a final tensor produced by a mapper.
type_url = os.path.join(common.ANNOTATION_PREFIX_URL, message_type)
schema_inference.annotate(type_url, message_proto)
|
tensorflow/transform
|
tensorflow_transform/analyzers.py
|
Python
|
apache-2.0
| 109,671
|
[
"Gaussian"
] |
b945fb17be09920bd5170f54e9040f1ff4947d8f9084cd379ffd5693c0be2006
|
# Orca
#
# Copyright 2013 The Orca Team.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom speech generator for gnome-documents."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2013 The Orca Team"
__license__ = "LGPL"
import orca.speech_generator as speech_generator
class SpeechGenerator(speech_generator.SpeechGenerator):
def __init__(self, script):
speech_generator.SpeechGenerator.__init__(self, script)
def _generateUnselectedCell(self, obj, **args):
# There are a number of objects in gnome-documents which claim to
# be selectable, but cannot actually be selected. Until we find and
# fix those issues, this will keep Orca from constantly tacking on
# "not selected" when presenting these objects.
return []
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/scripts/apps/gnome-documents/speech_generator.py
|
Python
|
gpl-3.0
| 1,530
|
[
"ORCA"
] |
b8120e91f9de3450988c10b01a69a412cf64fe2f64d0bab8e5ed3070efcd5086
|
import pandas as pd
import numpy as np
mets = ['A','B','C','D','E','F','A_ext', 'D_ext', 'E_ext','F_ext']
internal_mets = [m for m in mets if 'ext' not in m]
rxns = ['R_{}'.format(i) for i in range(1,10)]
data = {'R_1': pd.Series({'A':1, 'A_ext': -1}),
'R_2': pd.Series({'A':-1,'B':1}),
'R_3': pd.Series({'A':-1,'C':1}),
'R_4': pd.Series({'B':-1, 'D': 2, 'E': -1}),
'R_5': pd.Series({'E':1, 'E_ext':-1}),
'R_6': pd.Series({'B':-2, 'C':1, 'F': 1}),
'R_7': pd.Series({'C':-1, 'D':1}),
'R_8': pd.Series({'D': -1, 'D_ext': 1}),
'R_9': pd.Series({'F':-1, 'F_ext':1})}
fullS = pd.DataFrame(data, columns=rxns, index=mets,dtype='int64').fillna(0)
biomass_rxn = 'R_8'
biomass = fullS.columns.get_loc(biomass_rxn) # Index of biomass reaction
A_uptake, E_uptake = 0, 4 # Index of uptake reactions
R = 8.3144598/1000.0 # ideal gas constant
T = 298.15 # standard temperature
n_A = 6.022e23 # Avogadro's number
V = 1e-15 # volume of cell in Liters
c_L = 1e-8 # lower bound of metabolite concentrations
c_U = 1e-3 # upper bound of metabolite concentrations
v_L = 0
v_U = 100
A_ext = 6 # Index of A_ext
E_ext = 8 # Index of E_ext
D_ext = 7 # Index of D_ext
F_ext = 9 # Index of F_ext
A,B,C,D,E,F = range(6) # Index of internal metabolites
lambda_x = 0.5
S = fullS.loc[internal_mets].as_matrix()
external_mets = [met for met in fullS.index if 'ext' in met]
m,n = fullS.shape
metab = {}
reactions = {}
true_metab, true_reactions = {},{}
mu0 = pd.Series([0.0,-2,-2,-4.0,-2.,-10.0,0.0,-4.0,-2.0,-10.0], index=mets,dtype='float64')
deltaG0 = fullS.T.dot(mu0)
met_bounds = pd.Series({'A_ext':c_U, 'E_ext': c_U, 'F_ext': c_L, 'D_ext': c_L}, index=external_mets)
efflux = [fullS.index.get_loc(met) for met in met_bounds[met_bounds == c_L].index]
uptake = [fullS.index.get_loc(met) for met in met_bounds[met_bounds == c_U].index]
internal = [fullS.index.get_loc(met) for met in internal_mets]
mu_ext = mu0[external_mets] + R*T*met_bounds.apply(np.log)
external_free_energy = (mu_ext['D_ext'] + mu_ext['F_ext']) - (mu_ext['A_ext'] + mu_ext['E_ext'])
|
djinnome/mentos
|
mentos/abc_model.py
|
Python
|
gpl-3.0
| 2,191
|
[
"Avogadro"
] |
6d6c7ead70b9a416d565b9b8cb0b4b6f6bb196ec43a6609a6d447f5d049bbb95
|
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom speech generator for gnome-panel."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.settings_manager as settings_manager
import orca.speech_generator as speech_generator
_settingsManager = settings_manager.getManager()
class SpeechGenerator(speech_generator.SpeechGenerator):
def __init__(self, script):
speech_generator.SpeechGenerator.__init__(self, script)
def _generateName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the name of the object. If the object is directly
displaying any text, that text will be treated as the name.
Otherwise, the accessible name of the object will be used. If
there is no accessible name, then the description of the
object will be used. This method will return an empty array
if nothing can be found.
"""
role = args.get('role', obj.getRole())
if role == pyatspi.ROLE_FRAME and _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = speech_generator.SpeechGenerator._generateName(self, obj, **args)
if result:
if role == pyatspi.ROLE_FRAME:
result.extend(self.voice(speech_generator.SYSTEM, obj=obj, **args))
else:
result.extend(self.voice(speech_generator.DEFAULT, obj=obj, **args))
return result
|
GNOME/orca
|
src/orca/scripts/apps/gnome-panel/speech_generator.py
|
Python
|
lgpl-2.1
| 2,321
|
[
"ORCA"
] |
d308935b73fd2289f753bfaddcfeb14cc43de8735d1e577dc7a0c590cd0113c8
|
import pyNN.nest as sim
#import pyNN.neuron as sim
import pyNN.utility
import numpy as np
import sys
from parameters import *
from matplotlib import pyplot as plt
def save_spikes(pop_list, base_name, filename):
for idx, pop in enumerate(pop_list):
print idx, pop
spikes = pop.getSpikes()
print 'spikes', spikes.list_units
print "////", spikes.list_recordingchannels
fname = base_name + 'spikes_' + str(idx) + '_' + filename
print "saving spikes to file:", fname, "length=", len(spikes)
np.savetxt(fname, np.array(spikes, ndmin=2))
##if __name__ == '__main__':
def run(a_state):
output_base = "out/"
spike_count_filename = "gpi_spike_count.dat"
weight_filename = conn_filename # filename, from which the cortex - striatum connections are read
spike_count_full_filename = output_base + spike_count_filename
#active_state = int(sys.argv[1])
active_state = a_state
#Model of the basal ganglia D1 and D1 pathways. States and actions are populations coded.
pyNN.utility.init_logging(None, debug=True)
sim.setup(time_step)
# cell class for all neurons in the network
# (on HMF can be one of IF_cond_exp, EIF_cond_exp_isfa_ista)
cellclass = sim.IF_cond_exp
# #############
# POPULATIONS
# #############
#CORTEX input population: N states, poisson inputs
#?assemblies of m_actions populations or dictionnary of populations?
#STRIATUM 2 populations of M actions, D1 and D2
#GPi/SNr 1 population of M actions, baseline firing rate driven by external poisson inputs
cortex = [
sim.Population(n_cortex_cells, cellclass, neuron_parameters, label="CORTEX_{}".format(i))
for i in xrange(n_states)]
cortex_assembly = sim.Assembly(
*cortex,
label="CORTEX")
# independent Poisson input to cortex populations.
# /active_state/ determines, which population receives
# a different firing rate
cortex_input = []
for i in xrange(n_states):
if i == active_state:
rate = active_state_rate
else:
rate = inactive_state_rate
new_input = sim.Population(
n_cortex_cells,
sim.SpikeSourcePoisson,
{'rate': rate},
label="STATE_INPUT_" + str(i))
sim.Projection(
new_input,
cortex[i],
sim.OneToOneConnector(),
sim.StaticSynapse(weight=cortex_input_weight, delay=cortex_input_delay)
)
cortex_input.append(new_input)
#print 'cortex ok'
# striatum:
# exciatatory populations
striatum_d1 = [
sim.Population(n_msns, cellclass, neuron_parameters, label="D1_{}".format(i))
for i in xrange(m_actions)]
# inhibitory populations
striatum_d2 = [
sim.Population(n_msns, cellclass, neuron_parameters, label="D2_{}".format(i))
for i in xrange(m_actions)]
# Striatum D2->D2 and D1->D1 lateral inhibition
for lat_inh_source in xrange(m_actions):
for lat_inh_target in xrange(m_actions):
if lat_inh_source == lat_inh_target:
continue
sim.Projection(
striatum_d1[lat_inh_source],
striatum_d1[lat_inh_target],
sim.FixedProbabilityConnector(
d1_lat_inh_prob),
sim.StaticSynapse(
weight=d1_lat_inh_weight,
delay=d1_lat_inh_delay),
receptor_type="inhibitory",
label="d1_lateral_inhibition_{}_{}".format(
lat_inh_source, lat_inh_target))
sim.Projection(
striatum_d2[lat_inh_source],
striatum_d2[lat_inh_target],
sim.FixedProbabilityConnector(
d2_lat_inh_prob),
sim.StaticSynapse(
weight=d2_lat_inh_weight,
delay=d2_lat_inh_delay),
receptor_type="inhibitory",
label="d2_lateral_inhibition_{}_{}".format(
lat_inh_source, lat_inh_target))
striatum_assembly = sim.Assembly(
*(striatum_d1 + striatum_d2),
label="STRIATUM")
#gids_cortex= []
#gids_d1= []
#gids_d2= []
#for s in xrange(n_states):
# gids_cortex.append([gid for gid in cortex_assembly.get_population("CORTEX_"+str(s)).all()])
#for a in xrange(m_actions):
# gids_d1.append([gid1 for gid1 in striatum_assembly.get_population("D1_"+str(a)).all()])
# gids_d2.append([gid2 for gid2 in striatum_assembly.get_population("D2_"+str(a)).all()])
#for i in xrange(0,3):
# print i, 'len cortex ', len(gids_cortex[i]), 'unique ', len(np.unique(gids_cortex[i]))
# print i, 'len d1', len(gids_d1[i]), 'unique ', len(np.unique(gids_d1[i]))
# print i, 'len d2', len(gids_d2[i]), 'unique ', len(np.unique(gids_d2[i]))
#print "striatum ok"
#for i in xrange(0,3):
# print np.unique(gids_cortex[i])
# gids_cortex[i][:]-=3
#if init:
# init_w(gids_cortex, gids_d1, gids_d2)
# cortex - striatum connection, all-to-all using loaded weights
cs = sim.Projection(
cortex_assembly,
striatum_assembly,
#sim.AllToAllConnector(),
#sim.StaticSynapse(
# weight=wd1,
# delay=ctx_strd1_delay))
sim.FromFileConnector(
weight_filename))
gpi = [
sim.Population(n_gpi, cellclass, neuron_parameters,
label="GPI_{}".format(i))
for i in xrange(m_actions)
]
gpi_assembly = sim.Assembly(
*gpi,
label="GPi")
# external Poisson input to GPi
gpi_input = sim.Population(
m_actions * n_gpi,
sim.SpikeSourcePoisson,
dict(
duration=sim_duration,
rate=gpi_external_rate,
start=0.),
label="GPI_EXT_INPUT")
sim.Projection(
gpi_input,
gpi_assembly,
sim.OneToOneConnector(),
sim.StaticSynapse(
weight=gpi_external_weight,
delay= gpi_external_delay))
# striatum - gpi connections
for i in xrange(m_actions):
gpi_p = sim.Projection(
striatum_d1[i],
gpi[i],
sim.FixedProbabilityConnector(d1_gpi_prob),
sim.StaticSynapse( weight=d1_gpi_weight, delay = d1_gpi_delay))
sim.Projection(
striatum_d2[i],
gpi[i],
sim.FixedProbabilityConnector(d2_gpi_prob),
sim.StaticSynapse(weight=d2_gpi_weight, delay=d2_gpi_delay),
#target="inhibitory")
receptor_type="inhibitory")
#print gpi_p.get('weight', format='list')
cortex_assembly.record('spikes')
striatum_assembly.record('spikes')
gpi_assembly.record('spikes')
#print 'sim start'
sim.run(sim_duration)
sim.end()
label = "CORTEX_0"
#print 'cortex get pop', cortex_assembly.get_population(label)
#print 'cortex describe', cortex_assembly.describe()
#cortex_assembly.write_data("spikes")
#cortex_assembly.get_population(label).write_data("spikes")
#spikes = gpi_assembly #get_data("spikes", gather=True)
# print "getdata spikes", spikes
# print 'spikes.segment', spikes.segments
#print 'spikes.segments.SpikeTrains', spikes.segments.spike
#save_spikes(cortex_assembly, output_base, "cortex.dat")
#save_spikes(striatum_d1, output_base, "striatum_d1.dat")
#save_spikes(striatum_d2, output_base, "striatum_d2.dat")
#save_spikes(gpi, output_base, "gpi.dat")
#output_rates = np.array(
# [len(i.getSpikes()) for i in gpi])
#np.savetxt(spike_count_full_filename, output_rates)
# for seg in cortex_assembly.segments:
# print("Analyzing segment %d" % seg.index)
# stlist = [st - st.t_start for st in seg.spiketrains]
# plt.figure()
# count, bins = np.histogram(stlist)
# plt.bar(bins[:-1], count, width=bins[1] - bins[0])
# plt.title("PSTH in segment %d" % seg.index)
cortex_mean_spikes = np.zeros(n_states)
gpi_mean_spikes = np.zeros(m_actions)
d1_mean_spikes = np.zeros(m_actions)
d2_mean_spikes = np.zeros(m_actions)
for i in xrange(n_states):
cortex_mean_spikes[i] = cortex_assembly.get_population("CORTEX_"+str(i)).mean_spike_count()
for i in xrange(m_actions):
gpi_mean_spikes[i] = gpi_assembly.get_population("GPI_"+str(i)).mean_spike_count()
d1_mean_spikes[i] = striatum_assembly.get_population("D1_"+str(i)).mean_spike_count()
d2_mean_spikes[i] = striatum_assembly.get_population("D2_"+str(i)).mean_spike_count()
print 'CORTEX ', cortex_mean_spikes
print 'D1', d1_mean_spikes
print 'D2', d2_mean_spikes
return gpi_mean_spikes
# #############
# CONNECTIONS
# #############
#N poisson generators to N states in cortex
#M poisson generators to M actions in GPi/SNr
#background noise for all meurons?
#all to all cortex to striatum for both D1 and D2 populations
#"plastic weights"
#lateral inhibition D2-D2 and D1-D1
#static weights
#D1[m] --> GPi[m] positive static weight
#D2[m] --> GPi[m] negative static weight
# #############
# RECORDERS
# #############
#spike detectors
# GPi/SNr
# #############
# SIMULATION
# #############
#initialize noise
##LOOP
#run 1 trial
# set one poisson generator to active firing rate for one state in cortex
# sim.run(time)
# get spikes from GPi/SNr
# offline computations of selection, reward, and update
# load weights
# new trial
|
pierreberthet/demo2.2-bg
|
pynn/simplified_bg_pynn.py
|
Python
|
gpl-2.0
| 9,657
|
[
"NEURON"
] |
f4c91ac4f0c85dd4d10f8bc59c3b60a6fe6a0be287d65302b3dc0d3f23e7fdc1
|
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from __future__ import print_function, absolute_import
from .event import Event
from .file import Location
from .mongodb import ObjectJSON
from .task import Task, DummyTask
class Scheduler(object):
"""
Class to handle task execution on a resource
Notes
-----
In RP this would correspond to a Pilot with a UnitManager
Attributes
----------
project : `Project`
a back reference to the project that uses this scheduler
tasks : dict uid : `Task`
dict that references all running task by the associated CU.uid
wrapper : `Task`
a wrapping task that contains additional commands to be executed
around each task running on that scheduler. It usually contains
adding certain paths, etc.
"""
def __init__(self, resource, queue=None, runtime=240, cores=1):
"""
Parameters
----------
resource : `Resource`
a `Resource` where this scheduler works on
queue : str
the name of the queue to be used for pilot creation
runtime : int
max runtime in minutes for the created pilot
cores
number of used cores to be used in the created pilot
"""
self.resource = resource
self.queue = queue
self.runtime = runtime
self.cores = cores
self.project = None
self.tasks = dict()
self.auto_submit_dependencies = True
self._generator_list = []
self._events = []
self._stop_signal = False
self._shutting_down = False
self._finished = False
self.wrapper = DummyTask()
self._folder_name = None
self.simplifier = ObjectJSON()
self._state_cb = None
self.state = 'booting'
@property
def staging_area_location(self):
"""
Return the path to the staging area used by this scheduler
"""
return 'sandbox:///' + self.folder_name + '/staging_area'
@property
def generators(self):
"""
Return the generators of the attached project
Returns
-------
list of `TaskGenerator`
"""
if self.project:
return self.project.generators
else:
return []
@property
def folder_name(self):
return self._folder_name
def get_path(self, f):
"""
Get the schedulers representation of the path in `Location` object
Parameters
----------
f : `Location`
the location object
Returns
-------
str
a real file path
"""
return self.replace_prefix(f.url)
# def in_staging_area(self, url):
# pass
def unroll_staging_path(self, location):
"""
Convert a staging location into an adaptiveMD location
Parameters
----------
location : `Location`
the location to the changed
"""
if location.drive == 'staging':
location.location = self.staging_area_location + location.path
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
fail = True
if exc_type is None:
pass
elif issubclass(exc_type, (KeyboardInterrupt, SystemExit)):
# self.report.warn('exit requested\n')
pass
elif issubclass(exc_type, Exception):
# self.report.error('caught exception: %s\n' % exc_type)
fail = False
self.exit()
return fail
def enter(self, project=None):
"""
Call a preparations to use a scheduler
Parameters
----------
project : `Project`
the project the worker should execute for
"""
if project is not None:
self.project = project
def __call__(self, submission):
return self.submit(submission)
@property
def is_idle(self):
"""
Check whether the scheduler is idle
"""
return len(self.tasks) == 0
def exit(self):
"""
Shut down the scheduler
"""
self.shut_down(False)
def stage_generators(self):
"""
Prepare files and folder for all generators
"""
pass
def stage_in(self, staging):
pass
def flatten_location(self, obj):
if isinstance(obj, Location):
return self.replace_prefix(obj.url)
elif isinstance(obj, list):
return list(map(self.flatten_location, obj))
elif isinstance(obj, dict):
return {
self.flatten_location(key): self.flatten_location(value)
for key, value in obj.items()
}
elif isinstance(obj, tuple):
return tuple(map(self.flatten_location, obj))
else:
return obj
def remove_task(self, task):
pass
def _to_tasks(self, submission):
if isinstance(submission, (tuple, list)):
return sum(list(map(self._to_tasks, submission)), [])
elif isinstance(submission, Task):
if submission in self.tasks.values() or submission.is_done():
return []
if submission.ready:
return [submission]
else:
if self.auto_submit_dependencies:
return self._to_tasks(submission.dependencies)
else:
return []
# else:
# for cls, gen in self.file_generators.items():
# if isinstance(submission, cls):
# return self._to_tasks(gen(submission))
#
# return []
return []
def _to_events(self, submission):
if isinstance(submission, (tuple, list)):
return sum(list(map(self._to_events, submission)), [])
elif isinstance(submission, Event):
return [submission]
else:
return []
def submit(self, submission):
"""
Submit a task in form of an event, a task or an task-like object
Parameters
----------
submission : (list of) [`Task` or object or `Event`]
Returns
-------
list of `Task`
the list of tasks actually executed after looking at all objects
"""
return self._to_tasks(submission)
def add_event(self, event):
if isinstance(event, (tuple, list)):
list(map(self._events.append, event))
else:
self._events.append(event)
self.trigger()
return event
def trigger(self):
"""
Trigger a check of state changes that leads to task execution
"""
# delegate to project level
self.project.trigger()
def shut_down(self, wait_to_finish=True):
"""
Do a controlled shutdown. Cancel all units and wait until they finish.
Parameters
----------
wait_to_finish : bool
if True default the function will block until all tasks report
finish
"""
if not self._finished:
self._finished = True
def on(self, condition):
"""
Shortcut for creation and appending of a new Event
Parameters
----------
condition : `Condition`
Returns
-------
`Event`
"""
ev = Event(condition)
self._events.append(ev)
return ev
def wait(self):
"""
Wait until no more units are running and hence no more state changes
"""
pass
def cancel_events(self):
"""
Remove all pending events and stop them from further task execution
"""
for ev in self._events:
ev.cancel()
self._events = []
def replace_prefix(self, path):
"""
Interprete adaptive paths and replace prefixes with real os paths
Parameters
----------
path : str
the path with an adaptiveMD prefix
Returns
-------
str
the path without any adaptiveMD prefixes
"""
path = path.replace('staging://', '../staging_area')
# the rp sandbox://
path = path.replace('sandbox://', '../..')
# the main remote shared FS
path = path.replace('shared://', '../../..')
path = path.replace('worker://', '')
path = path.replace('file://', '')
# the specific project folder://
path = path.replace(
'project://', '../../projects/' + self.project.name)
return path
def change_state(self, new_state):
print('changed state to', new_state)
self.state = new_state
if self._state_cb is not None:
self._state_cb(self)
@property
def is_idle(self):
return len(self.tasks) == 0 and self.state == 'running'
|
markovmodel/adaptivemd
|
adaptivemd/scheduler.py
|
Python
|
lgpl-2.1
| 10,021
|
[
"MDTraj"
] |
4485cde8b8f00dc312476f68a3b108daef90d3efbe93c44699be58ac35c00b28
|
# class to get get data from OpenBooks
import httplib2
import json
import sqlalchemy
from copy import deepcopy
from oauth2client import file, client, tools
from octopus.core import app, initialise
from service.database import db, FA_API_TABLES
from service.lib import util
class Sync(object):
SCOPES = 'CottageLabsFinance'
CLIENT_SECRET_FILE = 'config/openbooks_secret.json'
STORAGE_SECRET_FILE = 'config/openbooks_secret_storage.json'
BASE_URL = "https://api.freeagent.com/v2/"
JSON_HEADERS = {'Accept': 'application/json', 'Content-Type': 'application/json; charset=UTF-8'}
DATA_REQUIRING_BANK_ACC = ['bank_transactions', 'bank_transaction_explanations']
def __init__(self):
credentials, store = self.get_oauth_creds()
if not credentials or credentials.invalid:
self.refresh_oauth_creds()
# apply credentials to http instance
self.http = httplib2.Http()
self.http = credentials.authorize(self.http)
@classmethod
def get_oauth_creds(cls):
store = file.Storage(cls.STORAGE_SECRET_FILE)
credentials = store.get()
return credentials, store
@classmethod
def refresh_oauth_creds(cls):
credentials, store = cls.get_oauth_creds()
flow = client.flow_from_clientsecrets(cls.CLIENT_SECRET_FILE, cls.SCOPES)
credentials = tools.run_flow(flow, store)
return True
@staticmethod
def sync_prep():
initialise()
@classmethod
def sync_fetch(cls, table=''):
if table:
app.logger.info("Fetching data from API for table: %s" % table)
else:
app.logger.info("Fetching data from API for all tables.")
s = cls()
data = {}
if table:
data[table] = s.get_one_table(table)
else:
cls.sync_prep()
if len(FA_API_TABLES.keys()) == 0:
app.logger.critical('No tables detected by SQLAlchemy! '
'Can\'t fetch. Stopping.')
return {}
for table_name in FA_API_TABLES.keys():
data[table_name] = s.get_one_table(table_name)
return data
@classmethod
def sync_write_table(cls, table, data):
if table:
app.logger.info("Writing data to DB for table: %s" % table)
else:
app.logger.info("Writing data to DB for all tables.")
for obj in data:
# look up sqlalchemy Table obj by the table name we have,
# then look up the model class corresponding to that table
mclass = util.get_model_class_by_tablename(table)
modeli = mclass(**obj)
db.session.add(modeli)
db.session.commit()
def get_data(self, method, querystring=""):
app.logger.debug("Requesting {0}{1}{2} with headers:\n{3}"
.format(self.BASE_URL, method, querystring, self.JSON_HEADERS))
return json.loads(self.http.request("{0}{1}{2}".format(self.BASE_URL, method, querystring), headers=self.JSON_HEADERS)[1])
def get_data_batch(self, method, subquery, page, per_page):
querystring = "?"
if subquery:
querystring += subquery + "&"
querystring += "page={0}&per_page={1}".format(page, per_page)
return self.get_data(method, querystring)
def get_data_paged(self, method, subquery=""):
page = 1
per_page = 100
data = []
while True:
if method == 'bank_transactions' and not subquery:
raise ValueError(
"You must specify a subquery when asking for bank "
"transactions. Something like bank_account=' + bank_account[\"url\"]"
" where bank_account is the bank account you want to query."
)
raw = self.get_data_batch(method, subquery, page, per_page)
if u'errors' in raw:
app.logger.error("FreeAgent API returned errors:\n" + json.dumps(raw[u'errors']))
return []
if method not in raw:
app.logger.error(
'Assumption made about FreeAgent API that it has an '
'envelope wrapping around all results (except '
'categories) with '
'{{<method_name>: [<results>]}} is not holding. Please '
'double-check documentation for getting {0} data and '
"fix code. Skipping {0}. Was on page {1}. "
"Response dump:\n\n{2}\n\nAn error occurred! "
.format(method, page, json.dumps(raw, indent=2)))
return []
batch = raw[method]
if method == 'users':
for user in batch:
user.pop('ni_number', '') # throw away this private info
if len(batch) > 0:
data.extend(batch)
page += 1
else:
break
return data
def get_one_table(self, table_name):
if table_name in self.DATA_REQUIRING_BANK_ACC:
accounts = self.get_one_table("bank_accounts")
primary_account = self.get_primary_bank_acc(accounts)
return self.get_data_paged(table_name, subquery='bank_account=' + primary_account["url"])
if table_name == 'categories':
# Paging doesn't work on the categories endpoint.
# Specifically, it keeps returning the same data for
# pages 1,2,3 etc. So we don't add paging info to request.
raw = self.get_data(table_name)
categories = []
for cat_type in raw.keys():
# flatten out the categories to 1 list, not several
categories.extend(raw[cat_type])
return categories
return self.get_data_paged(table_name)
@staticmethod
def get_primary_bank_acc(bank_accounts):
account = None
for acc in bank_accounts:
if acc['is_primary']:
account = acc
if not account:
raise ValueError('No bank account is marked as primary in '
'FreeAgent API. Please double-check and fix.')
return account
class CompareAPI2Models(object):
@staticmethod
def trim_api_response_to_model(tablename, api_data):
trim_api_data = deepcopy(api_data)
api_fields = set()
for obj in trim_api_data:
api_fields |= set(obj.keys())
mclass = util.get_model_class_by_tablename(tablename)
# get a list of database fields present in the model
model_fields = []
for attr in dir(mclass):
is_db_field = isinstance(getattr(mclass, attr), sqlalchemy.orm.attributes.InstrumentedAttribute)
if is_db_field:
model_fields.append(attr)
# check for fields the API has that we do not
for field in api_fields:
if not hasattr(mclass, field):
# trim off fields that models do not have
for obj in trim_api_data:
obj.pop(field, None)
return trim_api_data
@staticmethod
def cmp_api2model(tablename, api_sample):
detected_differences = False
api_fields = set()
for obj in api_sample:
api_fields |= set(obj.keys())
mclass = util.get_model_class_by_tablename(tablename)
# get a list of database fields present in the model
model_fields = []
for attr in dir(mclass):
is_db_field = isinstance(getattr(mclass, attr), sqlalchemy.orm.attributes.InstrumentedAttribute)
if is_db_field:
model_fields.append(attr)
# check for fields the API has that we do not
for field in api_fields:
if not hasattr(mclass, field):
app.logger.warn('{0} does not have field {1} present in API.'
.format(mclass.__name__, field))
detected_differences = True
# check for fields our models have, but the API does not
for mfield in model_fields:
if mfield not in api_fields:
app.logger.warn('{0} has an attribute {1} NOT present in API.'
.format(mclass.__name__, mfield))
detected_differences = True
if not detected_differences:
app.logger.info('No differences detected between {0} and API'.format(tablename))
|
CottageLabs/finance
|
service/lib/sync.py
|
Python
|
apache-2.0
| 8,570
|
[
"Octopus"
] |
323f1065973e3d418dec499c93dedb26315062cd12896d11c28982c65fbb54e3
|
# coding=utf-8
"""
Protein-Ligand Interaction Profiler - Analyze and visualize protein-ligand interactions in PDB files.
test_metal_coordination.py - Unit Tests for Metal Coordination.
"""
import unittest
from plip.structure.preparation import PDBComplex
class MetalCoordinationTest(unittest.TestCase):
"""Checks predictions against literature-validated interactions for metal coordination."""
###############################################
# Literature-validated cases from publication #
###############################################
def test_1rmd(self):
"""Zinc binding sites in RAG1 dimerization domain (1rmd)
Reference: Harding. The architecture of metal coordination groups in proteins. (2004), Fig. 1a
"""
tmpmol = PDBComplex()
tmpmol.load_pdb('./pdb/1rmd.pdb')
bsid = 'ZN:A:119'
for ligand in tmpmol.ligands:
if ':'.join([ligand.hetid, ligand.chain, str(ligand.position)]) == bsid:
tmpmol.characterize_complex(ligand)
s = tmpmol.interaction_sets[bsid]
# Coordination by three cysteines and one histidine of the protein
metalres = [mres.restype for mres in s.metal_complexes]
self.assertEqual(metalres.count('CYS'), 3)
self.assertEqual(metalres.count('HIS'), 1)
# Zn atom with tetrahedral geometry (coordination number 4)
self.assertEqual(s.metal_complexes[0].coordination_num, 4)
self.assertEqual(s.metal_complexes[0].geometry, 'tetrahedral')
def test_1rla(self):
"""Rat liver arginase, a binuclear manganese metalloenzyme (1rmd)
Reference: Harding. The architecture of metal coordination groups in proteins. (2004), Fig. 1b
"""
tmpmol = PDBComplex()
tmpmol.load_pdb('./pdb/1rla.pdb')
bsid = 'MN:A:500'
for ligand in tmpmol.ligands:
if ':'.join([ligand.hetid, ligand.chain, str(ligand.position)]) == bsid:
tmpmol.characterize_complex(ligand)
s = tmpmol.interaction_sets[bsid]
# Coordination by one histidine, three aspartic acid residues, and one water molecule
metalres = [mres.restype for mres in s.metal_complexes]
self.assertEqual(metalres.count('HIS'), 1)
self.assertEqual(metalres.count('ASP'), 3)
self.assertEqual(metalres.count('HOH'), 1)
# Mn atom with square pyramidal geometry (coordination number 5)
self.assertEqual(s.metal_complexes[0].coordination_num, 5)
self.assertEqual(s.metal_complexes[0].geometry, 'square.pyramidal')
def test_1het(self):
"""Liver alcohol deshydrogenase (1het)
Reference: Harding. The architecture of metal coordination groups in proteins. (2004), Fig. 2
"""
tmpmol = PDBComplex()
tmpmol.load_pdb('./pdb/1het.pdb')
bsid = 'ZN:A:401'
for ligand in tmpmol.ligands:
if ':'.join([ligand.hetid, ligand.chain, str(ligand.position)]) == bsid:
tmpmol.characterize_complex(ligand)
s = tmpmol.interaction_sets[bsid]
# Coordination by four cysteines
metalres = [mres.restype + str(mres.resnr) for mres in s.metal_complexes]
self.assertEqual(set(metalres), {'CYS97', 'CYS100', 'CYS103', 'CYS111'})
# Zn atom with tetrahedral geometry (coordination number 4)
self.assertEqual(s.metal_complexes[0].coordination_num, 4)
self.assertEqual(s.metal_complexes[0].geometry, 'tetrahedral')
def test_1vfy(self):
"""Phosphatidylinositol-3-phosphate binding FYVE domain of VPS27P protein (1vfy)
Reference: Harding. The architecture of metal coordination groups in proteins. (2004), Fig. 5
"""
tmpmol = PDBComplex()
tmpmol.load_pdb('./pdb/1vfy.pdb')
bsid = 'ZN:A:300'
for ligand in tmpmol.ligands:
if ':'.join([ligand.hetid, ligand.chain, str(ligand.position)]) == bsid:
tmpmol.characterize_complex(ligand)
s = tmpmol.interaction_sets[bsid]
# Coordination by four cysteines
metalres = [mres.restype for mres in s.metal_complexes]
self.assertEqual(set(metalres), {'CYS'})
# Zn atom with tetrahedral geometry (coordination number 4)
self.assertEqual(s.metal_complexes[0].coordination_num, 4)
self.assertEqual(s.metal_complexes[0].geometry, 'tetrahedral')
def test_2pvb(self):
"""Pike parvalbumin binding calcium (2pvb)
Reference: Harding. The architecture of metal coordination groups in proteins. (2004), Fig. 6
"""
tmpmol = PDBComplex()
tmpmol.load_pdb('./pdb/2pvb.pdb')
bsid = 'CA:A:110'
for ligand in tmpmol.ligands:
if ':'.join([ligand.hetid, ligand.chain, str(ligand.position)]) == bsid:
tmpmol.characterize_complex(ligand)
s = tmpmol.interaction_sets[bsid]
# Ca atom with square pyramidal geometry (coordination number 5)
self.assertEqual(s.metal_complexes[0].coordination_num, 5)
self.assertEqual(s.metal_complexes[0].geometry, 'square.pyramidal')
def test_2q8q(self):
"""Crystal Structure of S. aureus IsdE complexed with heme (2q8q)
Reference: Grigg et al. Heme coordination by Staphylococcus aureus IsdE. (2007)
"""
tmpmol = PDBComplex()
tmpmol.load_pdb('./pdb/2q8q.pdb')
bsid = 'HEM:A:300'
for ligand in tmpmol.ligands:
if ':'.join([ligand.hetid, ligand.chain, str(ligand.position)]) == bsid:
tmpmol.characterize_complex(ligand)
s = tmpmol.interaction_sets[bsid]
# Coordination by four nitrogens of heme itself and one additional histidine from the protein
metalres = [mres.restype for mres in s.metal_complexes]
self.assertEqual(metalres.count('HEM'), 4)
self.assertEqual(metalres.count('HIS'), 1)
# Fe atom with square pyramidal geometry (coordination number 5)
self.assertEqual(s.metal_complexes[0].coordination_num, 5)
self.assertEqual(s.metal_complexes[0].geometry, 'square.pyramidal')
|
ssalentin/plip
|
plip/test/test_metal_coordination.py
|
Python
|
gpl-2.0
| 6,129
|
[
"CRYSTAL"
] |
289b96132e652c98dc8b93ba4e7eec5ea74a8ed9ed8982f3cb82b1a5259674ca
|
# This file is part of Bioy
#
# Bioy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Bioy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Bioy. If not, see <http://www.gnu.org/licenses/>.
"""Deduplicate any number of csv file with optional column group indexing.
"""
import logging
import pandas
import sys
from bioy_pkg import utils
log = logging.getLogger(__name__)
def build_parser(parser):
# required inputs
parser.add_argument(
'csv',
nargs='+',
help='CSV tabular blast file of query and subject hits.')
# common outputs
parser.add_argument(
'-o', '--out', metavar='FILE',
default=sys.stdout, type=utils.Opener('w'),
help="Classification results.")
parser.add_argument(
'--limit', type=int, help='Limit number of rows read from each csv.')
parser.add_argument(
'--on',
metavar='COLS',
help=('Comma delimited list of column '
'names or indices if --no-header'))
parser.add_argument(
'--no-header',
action='store_true',
help='If no header available.')
parser.add_argument(
'--take-last',
action='store_true',
help='Take the last duplicate value. Default is first.')
parser.add_argument(
'--stack',
action='store_true',
help=('keep all lines, do not deduplicate'))
def action(args):
# for debugging:
# pandas.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
dfs = []
columns = None # to preserve column order
for csv in args.csv:
df = utils.read_csv(csv,
dtype=str,
nrows=args.limit,
comment='#',
na_filter=False,
header=None if args.no_header else 0)
columns = df.columns
dfs.append(df)
df = pandas.concat(dfs, ignore_index=True)
if not args.stack:
if args.on:
on = args.on.split(',')
if args.no_header:
on = map(int, on)
df = df.groupby(by=on, sort=False)
df = df.tail(1) if args.take_last else df.head(1)
else:
df = df.drop_duplicates(take_last=args.take_last)
df.to_csv(args.out, columns=columns, index=False)
|
nhoffman/bioy
|
bioy_pkg/subcommands/csvdeduplicate.py
|
Python
|
gpl-3.0
| 2,842
|
[
"BLAST"
] |
fb8d3d289069c6b559ab86822d6e8097818b55642051be11dc51ea7456ea2d25
|
#!/usr/bin/python2
from distutils.core import setup
import os
import sys
# config file
data_files = [("/etc/lorax", ["etc/lorax.conf"])]
# shared files
for root, dnames, fnames in os.walk("share"):
for fname in fnames:
data_files.append((root.replace("share", "/usr/share/lorax", 1),
[os.path.join(root, fname)]))
# executable
data_files.append(("/usr/sbin", ["src/sbin/lorax", "src/sbin/mkefiboot",
"src/sbin/livemedia-creator"]))
data_files.append(("/usr/bin", ["src/bin/image-minimizer"]))
# get the version
sys.path.insert(0, "src")
try:
import pylorax.version
except ImportError:
vernum = "devel"
else:
vernum = pylorax.version.num
finally:
sys.path = sys.path[1:]
setup(name="lorax",
version=vernum,
description="Lorax",
long_description="",
author="Martin Gracik, Will Woods <wwoods@redhat.com>, Brian C. Lane <bcl@redhat.com>",
url="http://www.github.com/rhinstaller/lorax/",
download_url="http://www.github.com/rhinstaller/lorax/releases/",
license="GPLv2+",
packages=["pylorax"],
package_dir={"" : "src"},
data_files=data_files
)
|
dashea/lorax
|
setup.py
|
Python
|
gpl-2.0
| 1,203
|
[
"Brian"
] |
5ff4c3d850cc2842f90c273c952ad4d620aa6b1d18ce62feb8667cf996312d44
|
import numpy as np
import pandas as pd
import mdtraj as md
trj0 = md.load("./system.subset.pdb")
top, bonds = trj0.top.to_dataframe()
i0 = np.where((top.name == "CB") & (top.resSeq == 310))[0][0]
i1 = np.where((top.name == "CB") & (top.resSeq == 409))[0][0]
indices = np.array([[i0, i1]])
n_traj = 131
all_distances = []
for i in range(n_traj):
print(i)
traj = md.load("./Trajectories/trj%d.h5" % i)
d = md.geometry.compute_distances(traj, indices, periodic=False)
all_distances.extend(d[:,0])
all_distances = np.array(all_distances)
|
hainm/MSMs
|
attic/src/code/hmsm/compute_distances.py
|
Python
|
gpl-2.0
| 559
|
[
"MDTraj"
] |
498bc73a1c5693aedfd9f63c3127f785fd194621b3aacaf2495debd258bfbf96
|
# Copyright (C) 2010 CAMd
# Please see the accompanying LICENSE file for further information.
"""This module provides all the classes and functions associated with the
evaluation of exact exchange with k-point sampling."""
from time import time
from math import pi, sqrt
import numpy as np
from ase.utils import prnt
from ase.units import Hartree
from ase.dft.kpoints import monkhorst_pack
import gpaw.mpi as mpi
import gpaw.fftw as fftw
from gpaw.xc.hybrid import HybridXCBase
from gpaw.kpt_descriptor import KPointDescriptor
from gpaw.wavefunctions.pw import PWDescriptor, PWLFC
from gpaw.utilities import pack, unpack2, packed_index, logfile, erf
from gpaw.utilities.ewald import madelung
from gpaw.utilities.timing import Timer
class HybridXC(HybridXCBase):
orbital_dependent = True
def __init__(self, name, hybrid=None, xc=None,
alpha=None,
gamma_point=1,
method='standard',
bandstructure=False,
logfilename='-', bands=None,
fcut=1e-10,
molecule=False,
qstride=1,
world=None):
"""Mix standard functionals with exact exchange.
name: str
Name of functional: EXX, PBE0, HSE03, HSE06
hybrid: float
Fraction of exact exchange.
xc: str or XCFunctional object
Standard DFT functional with scaled down exchange.
method: str
Use 'standard' standard formula and 'acdf for
adiabatic-connection dissipation fluctuation formula.
alpha: float
XXX describe
gamma_point: bool
0: Skip k2-k1=0 interactions.
1: Use the alpha method.
2: Integrate the gamma point.
bandstructure: bool
Calculate bandstructure instead of just the total energy.
bands: list of int
List of bands to calculate bandstructure for. Default is
all bands.
molecule: bool
Decouple electrostatic interactions between periodically
repeated images.
fcut: float
Threshold for empty band.
"""
self.alpha = alpha
self.fcut = fcut
self.gamma_point = gamma_point
self.method = method
self.bandstructure = bandstructure
self.bands = bands
self.fd = logfilename
self.write_timing_information = True
HybridXCBase.__init__(self, name, hybrid, xc)
# EXX energies:
self.exx = None # total
self.evv = None # valence-valence (pseudo part)
self.evvacdf = None # valence-valence (pseudo part)
self.devv = None # valence-valence (PAW correction)
self.evc = None # valence-core
self.ecc = None # core-core
self.exx_skn = None # bandstructure
self.qlatest = None
if world is None:
world = mpi.world
self.world = world
self.molecule = molecule
if isinstance(qstride, int):
qstride = [qstride] * 3
self.qstride_c = np.asarray(qstride)
self.timer = Timer()
def log(self, *args, **kwargs):
prnt(file=self.fd, *args, **kwargs)
self.fd.flush()
def calculate_radial(self, rgd, n_sLg, Y_L, v_sg,
dndr_sLg=None, rnablaY_Lv=None,
tau_sg=None, dedtau_sg=None):
return self.xc.calculate_radial(rgd, n_sLg, Y_L, v_sg,
dndr_sLg, rnablaY_Lv)
def calculate_paw_correction(self, setup, D_sp, dEdD_sp=None,
addcoredensity=True, a=None):
return self.xc.calculate_paw_correction(setup, D_sp, dEdD_sp,
addcoredensity, a)
def initialize(self, dens, ham, wfs, occupations):
assert wfs.bd.comm.size == 1
self.xc.initialize(dens, ham, wfs, occupations)
self.dens = dens
self.wfs = wfs
# Make a k-point descriptor that is not distributed
# (self.kd.comm is serial_comm):
self.kd = wfs.kd.copy()
self.fd = logfile(self.fd, self.world.rank)
wfs.initialize_wave_functions_from_restart_file()
def set_positions(self, spos_ac):
self.spos_ac = spos_ac
def calculate(self, gd, n_sg, v_sg=None, e_g=None):
# Normal XC contribution:
exc = self.xc.calculate(gd, n_sg, v_sg, e_g)
# Add EXX contribution:
return exc + self.exx * self.hybrid
def calculate_exx(self):
"""Non-selfconsistent calculation."""
self.timer.start('EXX')
self.timer.start('Initialization')
kd = self.kd
wfs = self.wfs
if fftw.FFTPlan is fftw.NumpyFFTPlan:
self.log('NOT USING FFTW !!')
self.log('Spins:', self.wfs.nspins)
W = max(1, self.wfs.kd.comm.size // self.wfs.nspins)
# Are the k-points distributed?
kparallel = (W > 1)
# Find number of occupied bands:
self.nocc_sk = np.zeros((self.wfs.nspins, kd.nibzkpts), int)
for kpt in self.wfs.kpt_u:
for n, f in enumerate(kpt.f_n):
if abs(f) < self.fcut:
self.nocc_sk[kpt.s, kpt.k] = n
break
else:
self.nocc_sk[kpt.s, kpt.k] = self.wfs.bd.nbands
self.wfs.kd.comm.sum(self.nocc_sk)
noccmin = self.nocc_sk.min()
noccmax = self.nocc_sk.max()
self.log('Number of occupied bands (min, max): %d, %d' %
(noccmin, noccmax))
self.log('Number of valence electrons:', self.wfs.setups.nvalence)
if self.bandstructure:
self.log('Calculating eigenvalue shifts.')
# allocate array for eigenvalue shifts:
self.exx_skn = np.zeros((self.wfs.nspins,
kd.nibzkpts,
self.wfs.bd.nbands))
if self.bands is None:
noccmax = self.wfs.bd.nbands
else:
noccmax = max(max(self.bands) + 1, noccmax)
N_c = self.kd.N_c
vol = wfs.gd.dv * wfs.gd.N_c.prod()
if self.alpha is None:
alpha = 6 * vol**(2 / 3.0) / pi**2
else:
alpha = self.alpha
if self.gamma_point == 1:
if alpha == 0.0:
qvol = (2*np.pi)**3 / vol / N_c.prod()
self.gamma = 4*np.pi * (3*qvol / (4*np.pi))**(1/3.) / qvol
else:
self.gamma = self.calculate_gamma(vol, alpha)
else:
kcell_cv = wfs.gd.cell_cv.copy()
kcell_cv[0] *= N_c[0]
kcell_cv[1] *= N_c[1]
kcell_cv[2] *= N_c[2]
self.gamma = madelung(kcell_cv) * vol * N_c.prod() / (4 * np.pi)
self.log('Value of alpha parameter: %.3f Bohr^2' % alpha)
self.log('Value of gamma parameter: %.3f Bohr^2' % self.gamma)
# Construct all possible q=k2-k1 vectors:
Nq_c = (N_c - 1) // self.qstride_c
i_qc = np.indices(Nq_c * 2 + 1, float).transpose(
(1, 2, 3, 0)).reshape((-1, 3))
self.bzq_qc = (i_qc - Nq_c) / N_c * self.qstride_c
self.q0 = ((Nq_c * 2 + 1).prod() - 1) // 2 # index of q=(0,0,0)
assert not self.bzq_qc[self.q0].any()
# Count number of pairs for each q-vector:
self.npairs_q = np.zeros(len(self.bzq_qc), int)
for s in range(kd.nspins):
for k1 in range(kd.nibzkpts):
for k2 in range(kd.nibzkpts):
for K2, q, n1_n, n2 in self.indices(s, k1, k2):
self.npairs_q[q] += len(n1_n)
self.npairs0 = self.npairs_q.sum() # total number of pairs
self.log('Number of pairs:', self.npairs0)
# Distribute q-vectors to Q processors:
Q = self.world.size // self.wfs.kd.comm.size
myrank = self.world.rank // self.wfs.kd.comm.size
rank = 0
N = 0
myq = []
nq = 0
for q, n in enumerate(self.npairs_q):
if n > 0:
nq += 1
if rank == myrank:
myq.append(q)
N += n
if N >= (rank + 1.0) * self.npairs0 / Q:
rank += 1
assert len(myq) > 0, 'Too few q-vectors for too many processes!'
self.bzq_qc = self.bzq_qc[myq]
try:
self.q0 = myq.index(self.q0)
except ValueError:
self.q0 = None
self.log('%d x %d x %d k-points' % tuple(self.kd.N_c))
self.log('Distributing %d IBZ k-points over %d process(es).' %
(kd.nibzkpts, self.wfs.kd.comm.size))
self.log('Distributing %d q-vectors over %d process(es).' % (nq, Q))
# q-point descriptor for my q-vectors:
qd = KPointDescriptor(self.bzq_qc)
# Plane-wave descriptor for all wave-functions:
self.pd = PWDescriptor(wfs.pd.ecut, wfs.gd,
dtype=wfs.pd.dtype, kd=kd)
# Plane-wave descriptor pair-densities:
self.pd2 = PWDescriptor(self.dens.pd2.ecut, self.dens.gd,
dtype=wfs.dtype, kd=qd)
self.log('Cutoff energies:')
self.log(' Wave functions: %10.3f eV' %
(self.pd.ecut * Hartree))
self.log(' Density: %10.3f eV' %
(self.pd2.ecut * Hartree))
# Calculate 1/|G+q|^2 with special treatment of |G+q|=0:
G2_qG = self.pd2.G2_qG
if self.q0 is None:
if self.omega is None:
self.iG2_qG = [1.0 / G2_G for G2_G in G2_qG]
else:
self.iG2_qG = [(1.0 / G2_G *
(1 - np.exp(-G2_G / (4 * self.omega**2))))
for G2_G in G2_qG]
else:
G2_qG[self.q0][0] = 117.0 # avoid division by zero
if self.omega is None:
self.iG2_qG = [1.0 / G2_G for G2_G in G2_qG]
self.iG2_qG[self.q0][0] = self.gamma
else:
self.iG2_qG = [(1.0 / G2_G *
(1 - np.exp(-G2_G / (4 * self.omega**2))))
for G2_G in G2_qG]
self.iG2_qG[self.q0][0] = 1 / (4 * self.omega**2)
G2_qG[self.q0][0] = 0.0 # restore correct value
# Compensation charges:
self.ghat = PWLFC([setup.ghat_l for setup in wfs.setups], self.pd2)
self.ghat.set_positions(self.spos_ac)
if self.molecule:
self.initialize_gaussian()
self.log('Value of beta parameter: %.3f 1/Bohr^2' % self.beta)
self.timer.stop('Initialization')
# Ready ... set ... go:
self.t0 = time()
self.npairs = 0
self.evv = 0.0
self.evvacdf = 0.0
for s in range(self.wfs.nspins):
kpt1_q = [KPoint(self.wfs, noccmax).initialize(kpt)
for kpt in self.wfs.kpt_u if kpt.s == s]
kpt2_q = kpt1_q[:]
if len(kpt1_q) == 0:
# No s-spins on this CPU:
continue
# Send and receive ranks:
srank = self.wfs.kd.get_rank_and_index(
s, (kpt1_q[0].k - 1) % kd.nibzkpts)[0]
rrank = self.wfs.kd.get_rank_and_index(
s, (kpt1_q[-1].k + 1) % kd.nibzkpts)[0]
# Shift k-points kd.nibzkpts - 1 times:
for i in range(kd.nibzkpts):
if i < kd.nibzkpts - 1:
if kparallel:
kpt = kpt2_q[-1].next(self.wfs)
kpt.start_receiving(rrank)
kpt2_q[0].start_sending(srank)
else:
kpt = kpt2_q[0]
self.timer.start('Calculate')
for kpt1, kpt2 in zip(kpt1_q, kpt2_q):
# Loop over all k-points that k2 can be mapped to:
for K2, q, n1_n, n2 in self.indices(s, kpt1.k, kpt2.k):
self.apply(K2, q, kpt1, kpt2, n1_n, n2)
self.timer.stop('Calculate')
if i < kd.nibzkpts - 1:
self.timer.start('Wait')
if kparallel:
kpt.wait()
kpt2_q[0].wait()
self.timer.stop('Wait')
kpt2_q.pop(0)
kpt2_q.append(kpt)
self.evv = self.world.sum(self.evv)
self.evvacdf = self.world.sum(self.evvacdf)
self.calculate_exx_paw_correction()
if self.method == 'standard':
self.exx = self.evv + self.devv + self.evc + self.ecc
elif self.method == 'acdf':
self.exx = self.evvacdf + self.devv + self.evc + self.ecc
else:
1 / 0
self.log('Exact exchange energy:')
for txt, e in [
('core-core', self.ecc),
('valence-core', self.evc),
('valence-valence (pseudo, acdf)', self.evvacdf),
('valence-valence (pseudo, standard)', self.evv),
('valence-valence (correction)', self.devv),
('total (%s)' % self.method, self.exx)]:
self.log(' %-36s %14.6f eV' % (txt + ':', e * Hartree))
self.log('Total time: %10.3f seconds' % (time() - self.t0))
self.npairs = self.world.sum(self.npairs)
assert self.npairs == self.npairs0
self.timer.stop('EXX')
self.timer.write(self.fd)
def calculate_gamma(self, vol, alpha):
if self.molecule:
return 0.0
N_c = self.kd.N_c
offset_c = (N_c + 1) % 2 * 0.5 / N_c
bzq_qc = monkhorst_pack(N_c) + offset_c
qd = KPointDescriptor(bzq_qc)
pd = PWDescriptor(self.wfs.pd.ecut, self.wfs.gd, kd=qd)
gamma = (vol / (2 * pi)**2 * sqrt(pi / alpha) *
self.kd.nbzkpts)
for G2_G in pd.G2_qG:
if G2_G[0] < 1e-7:
G2_G = G2_G[1:]
gamma -= np.dot(np.exp(-alpha * G2_G), G2_G**-1)
return gamma / self.qstride_c.prod()
def indices(self, s, k1, k2):
"""Generator for (K2, q, n1, n2) indices for (k1, k2) pair.
s: int
Spin index.
k1: int
Index of k-point in the IBZ.
k2: int
Index of k-point in the IBZ.
Returns (K, q, n1_n, n2), where K then index of the k-point in
the BZ that k2 is mapped to, q is the index of the q-vector
between K and k1, and n1_n is a list of bands that should be
combined with band n2."""
for K, k in enumerate(self.kd.bz2ibz_k):
if k == k2:
for K, q, n1_n, n2 in self._indices(s, k1, k2, K):
yield K, q, n1_n, n2
def _indices(self, s, k1, k2, K2):
k1_c = self.kd.ibzk_kc[k1]
k2_c = self.kd.bzk_kc[K2]
q_c = k2_c - k1_c
q = abs(self.bzq_qc - q_c).sum(1).argmin()
if abs(self.bzq_qc[q] - q_c).sum() > 1e-7:
return
if self.gamma_point == 0 and q == self.q0:
return
nocc1 = self.nocc_sk[s, k1]
nocc2 = self.nocc_sk[s, k2]
# Is k2 in the IBZ?
is_ibz2 = (self.kd.ibz2bz_k[k2] == K2)
for n2 in range(self.wfs.bd.nbands):
# Find range of n1's (from n1a to n1b-1):
if is_ibz2:
# We get this combination twice, so let's only do half:
if k1 >= k2:
n1a = n2
else:
n1a = n2 + 1
else:
n1a = 0
n1b = self.wfs.bd.nbands
if self.bandstructure:
if n2 >= nocc2:
n1b = min(n1b, nocc1)
else:
if n2 >= nocc2:
break
n1b = min(n1b, nocc1)
if self.bands is not None:
assert self.bandstructure
n1_n = []
for n1 in range(n1a, n1b):
if (n1 in self.bands and n2 < nocc2 or
is_ibz2 and n2 in self.bands and n1 < nocc1):
n1_n.append(n1)
n1_n = np.array(n1_n)
else:
n1_n = np.arange(n1a, n1b)
if len(n1_n) == 0:
continue
yield K2, q, n1_n, n2
def apply(self, K2, q, kpt1, kpt2, n1_n, n2):
k20_c = self.kd.ibzk_kc[kpt2.k]
k2_c = self.kd.bzk_kc[K2]
if k2_c.any():
self.timer.start('Initialize plane waves')
eik2r_R = self.wfs.gd.plane_wave(k2_c)
eik20r_R = self.wfs.gd.plane_wave(k20_c)
self.timer.stop('Initialize plane waves')
else:
eik2r_R = 1.0
eik20r_R = 1.0
w1 = self.kd.weight_k[kpt1.k]
w2 = self.kd.weight_k[kpt2.k]
# Is k2 in the 1. BZ?
is_ibz2 = (self.kd.ibz2bz_k[kpt2.k] == K2)
e_n = self.calculate_interaction(n1_n, n2, kpt1, kpt2, q, K2,
eik20r_R, eik2r_R,
is_ibz2)
e_n *= 1.0 / self.kd.nbzkpts / self.wfs.nspins * self.qstride_c.prod()
if q == self.q0:
e_n[n1_n == n2] *= 0.5
f1_n = kpt1.f_n[n1_n]
eps1_n = kpt1.eps_n[n1_n]
f2 = kpt2.f_n[n2]
eps2 = kpt2.eps_n[n2]
s_n = np.sign(eps2 - eps1_n)
evv = (f1_n * f2 * e_n).sum()
evvacdf = 0.5 * (f1_n * (1 - s_n) * e_n +
f2 * (1 + s_n) * e_n).sum()
self.evv += evv * w1
self.evvacdf += evvacdf * w1
if is_ibz2:
self.evv += evv * w2
self.evvacdf += evvacdf * w2
if self.bandstructure:
x = self.wfs.nspins
self.exx_skn[kpt1.s, kpt1.k, n1_n] += x * f2 * e_n
if is_ibz2:
self.exx_skn[kpt2.s, kpt2.k, n2] += x * np.dot(f1_n, e_n)
def calculate_interaction(self, n1_n, n2, kpt1, kpt2, q, k,
eik20r_R, eik2r_R, is_ibz2):
"""Calculate Coulomb interactions.
For all n1 in the n1_n list, calculate interaction with n2."""
# number of plane waves:
ng1 = self.wfs.ng_k[kpt1.k]
ng2 = self.wfs.ng_k[kpt2.k]
# Transform to real space and apply symmetry operation:
self.timer.start('IFFT1')
if is_ibz2:
u2_R = self.pd.ifft(kpt2.psit_nG[n2, :ng2], kpt2.k)
else:
psit2_R = self.pd.ifft(kpt2.psit_nG[n2, :ng2], kpt2.k) * eik20r_R
self.timer.start('Symmetry transform')
u2_R = self.kd.transform_wave_function(psit2_R, k) / eik2r_R
self.timer.stop()
self.timer.stop()
# Calculate pair densities:
nt_nG = self.pd2.zeros(len(n1_n), q=q)
for n1, nt_G in zip(n1_n, nt_nG):
self.timer.start('IFFT2')
u1_R = self.pd.ifft(kpt1.psit_nG[n1, :ng1], kpt1.k)
self.timer.stop()
nt_R = u1_R.conj() * u2_R
self.timer.start('FFT')
nt_G[:] = self.pd2.fft(nt_R, q)
self.timer.stop()
s = self.kd.sym_k[k]
time_reversal = self.kd.time_reversal_k[k]
k2_c = self.kd.ibzk_kc[kpt2.k]
self.timer.start('Compensation charges')
Q_anL = {} # coefficients for shape functions
for a, P1_ni in kpt1.P_ani.items():
P1_ni = P1_ni[n1_n]
if is_ibz2:
P2_i = kpt2.P_ani[a][n2]
else:
b = self.kd.symmetry.a_sa[s, a]
S_c = (np.dot(self.spos_ac[a], self.kd.symmetry.op_scc[s]) -
self.spos_ac[b])
assert abs(S_c.round() - S_c).max() < 1e-5
if self.ghat.dtype == complex:
x = np.exp(2j * pi * np.dot(k2_c, S_c))
else:
x = 1.0
P2_i = np.dot(self.wfs.setups[a].R_sii[s],
kpt2.P_ani[b][n2]) * x
if time_reversal:
P2_i = P2_i.conj()
D_np = []
for P1_i in P1_ni:
D_ii = np.outer(P1_i.conj(), P2_i)
D_np.append(pack(D_ii))
Q_anL[a] = np.dot(D_np, self.wfs.setups[a].Delta_pL)
self.timer.start('Expand')
if q != self.qlatest:
self.f_IG = self.ghat.expand(q)
self.qlatest = q
self.timer.stop('Expand')
# Add compensation charges:
self.ghat.add(nt_nG, Q_anL, q, self.f_IG)
self.timer.stop('Compensation charges')
if self.molecule and n2 in n1_n:
nn = (n1_n == n2).nonzero()[0][0]
nt_nG[nn] -= self.ngauss_G
else:
nn = None
iG2_G = self.iG2_qG[q]
# Calculate energies:
e_n = np.empty(len(n1_n))
for n, nt_G in enumerate(nt_nG):
e_n[n] = -4 * pi * np.real(self.pd2.integrate(nt_G, nt_G * iG2_G))
self.npairs += 1
if nn is not None:
e_n[nn] -= 2 * (self.pd2.integrate(nt_nG[nn], self.vgauss_G) +
(self.beta / 2 / pi)**0.5)
if self.write_timing_information:
t = (time() - self.t0) / len(n1_n)
self.log('Time for first pair-density: %10.3f seconds' % t)
self.log('Estimated total time: %10.3f seconds' %
(t * self.npairs0 / self.world.size))
self.write_timing_information = False
return e_n
def calculate_exx_paw_correction(self):
self.timer.start('PAW correction')
self.devv = 0.0
self.evc = 0.0
self.ecc = 0.0
deg = 2 // self.wfs.nspins # spin degeneracy
for a, D_sp in self.dens.D_asp.items():
setup = self.wfs.setups[a]
for D_p in D_sp:
D_ii = unpack2(D_p)
ni = len(D_ii)
for i1 in range(ni):
for i2 in range(ni):
A = 0.0
for i3 in range(ni):
p13 = packed_index(i1, i3, ni)
for i4 in range(ni):
p24 = packed_index(i2, i4, ni)
A += setup.M_pp[p13, p24] * D_ii[i3, i4]
self.devv -= D_ii[i1, i2] * A / deg
self.evc -= np.dot(D_p, setup.X_p)
self.ecc += setup.ExxC
if not self.bandstructure:
self.timer.stop('PAW correction')
return
Q = self.world.size // self.wfs.kd.comm.size
self.exx_skn *= Q
for kpt in self.wfs.kpt_u:
for a, D_sp in self.dens.D_asp.items():
setup = self.wfs.setups[a]
for D_p in D_sp:
D_ii = unpack2(D_p)
ni = len(D_ii)
P_ni = kpt.P_ani[a]
for i1 in range(ni):
for i2 in range(ni):
A = 0.0
for i3 in range(ni):
p13 = packed_index(i1, i3, ni)
for i4 in range(ni):
p24 = packed_index(i2, i4, ni)
A += setup.M_pp[p13, p24] * D_ii[i3, i4]
self.exx_skn[kpt.s, kpt.k] -= \
(A * P_ni[:, i1].conj() * P_ni[:, i2]).real
p12 = packed_index(i1, i2, ni)
self.exx_skn[kpt.s, kpt.k] -= \
(P_ni[:, i1].conj() * setup.X_p[p12] *
P_ni[:, i2]).real / self.wfs.nspins
self.world.sum(self.exx_skn)
self.exx_skn *= self.hybrid / Q
self.timer.stop('PAW correction')
def initialize_gaussian(self):
"""Calculate gaussian compensation charge and its potential.
Used to decouple electrostatic interactions between
periodically repeated images for molecular calculations.
Charge containing one electron::
(beta/pi)^(3/2)*exp(-beta*r^2),
its Fourier transform::
exp(-G^2/(4*beta)),
and its potential::
erf(beta^0.5*r)/r.
"""
gd = self.wfs.gd
# Set exponent of exp-function to -19 on the boundary:
self.beta = 4 * 19 * (gd.icell_cv**2).sum(1).max()
# Calculate gaussian:
G_Gv = self.pd2.G_Qv[self.pd2.Q_qG[0]]
G2_G = self.pd2.G2_qG[0]
C_v = gd.cell_cv.sum(0) / 2 # center of cell
self.ngauss_G = np.exp(-1.0 / (4 * self.beta) * G2_G +
1j * np.dot(G_Gv, C_v)) / gd.dv
# Calculate potential from gaussian:
R_Rv = gd.get_grid_point_coordinates().transpose((1, 2, 3, 0))
r_R = ((R_Rv - C_v)**2).sum(3)**0.5
if (gd.N_c % 2 == 0).all():
r_R[tuple(gd.N_c // 2)] = 1.0 # avoid dividing by zero
v_R = erf(self.beta**0.5 * r_R) / r_R
if (gd.N_c % 2 == 0).all():
v_R[tuple(gd.N_c // 2)] = (4 * self.beta / pi)**0.5
self.vgauss_G = self.pd2.fft(v_R)
# Compare self-interaction to analytic result:
assert abs(0.5 * self.pd2.integrate(self.ngauss_G, self.vgauss_G) -
(self.beta / 2 / pi)**0.5) < 1e-6
class KPoint:
def __init__(self, wfs, nbands):
"""Helper class for parallelizing over k-points.
Placeholder for wave functions, occupation numbers, eigenvalues,
projections, spin index and global k-point index."""
self.kd = wfs.kd
self.ng_k = wfs.ng_k
# Array large enough to hold wave functions from all k-points:
self.psit_nG = wfs.pd.empty(nbands)
self.requests = []
def initialize(self, kpt):
ng = self.ng_k[kpt.k]
nbands = len(self.psit_nG)
self.psit_nG[:, :ng] = kpt.psit_nG[:nbands]
self.f_n = kpt.f_n / kpt.weight # will be in the range [0,1]
self.eps_n = kpt.eps_n
self.P_ani = kpt.P_ani
self.k = kpt.k
self.s = kpt.s
return self
def next(self, wfs):
"""Create empty object.
Data will be received from another process."""
nbands = len(self.psit_nG)
kpt = KPoint(wfs, nbands)
# Allocate arrays for receiving:
kpt.f_n = wfs.bd.empty()
kpt.eps_n = wfs.bd.empty()
# Total number of projector functions:
I = sum([P_ni.shape[1] for P_ni in self.P_ani.values()])
kpt.P_nI = np.empty((wfs.bd.nbands, I), wfs.dtype)
kpt.P_ani = {}
I1 = 0
assert self.P_ani.keys() == range(len(self.P_ani)) # ???
for a, P_ni in self.P_ani.items():
I2 = I1 + P_ni.shape[1]
kpt.P_ani[a] = kpt.P_nI[:, I1:I2]
I1 = I2
kpt.k = (self.k + 1) % self.kd.nibzkpts
kpt.s = self.s
return kpt
def start_sending(self, rank):
assert self.P_ani.keys() == range(len(self.P_ani)) # ???
P_nI = np.hstack([P_ni for P_ni in self.P_ani.values()])
P_nI = np.ascontiguousarray(P_nI)
self.requests += [
self.kd.comm.send(self.psit_nG, rank, block=False, tag=1),
self.kd.comm.send(self.f_n, rank, block=False, tag=2),
self.kd.comm.send(self.eps_n, rank, block=False, tag=3),
self.kd.comm.send(P_nI, rank, block=False, tag=4)]
def start_receiving(self, rank):
self.requests += [
self.kd.comm.receive(self.psit_nG, rank, block=False, tag=1),
self.kd.comm.receive(self.f_n, rank, block=False, tag=2),
self.kd.comm.receive(self.eps_n, rank, block=False, tag=3),
self.kd.comm.receive(self.P_nI, rank, block=False, tag=4)]
def wait(self):
self.kd.comm.waitall(self.requests)
self.requests = []
|
robwarm/gpaw-symm
|
gpaw/xc/hybridg.py
|
Python
|
gpl-3.0
| 28,354
|
[
"ASE",
"GPAW",
"Gaussian"
] |
87a8469e02bd09a69d7eacf024b809f44eced666e0b22511aa45966285eb8f0b
|
"""Tests for Trees and Leaves.
"""
import pytest
import os
import py
import datreant as dtr
from datreant import Veg, Leaf, Tree, Treant
class TestVeg(object):
"""Common element tests of Trees and Leaves"""
cls = Veg
name = 'veggie'
@pytest.fixture
def veg(self, tmpdir):
with tmpdir.as_cwd():
v = Veg(self.name)
yield v
def test_str(self, veg):
assert str(veg) == os.path.join(os.getcwd(), self.name)
def test_hash(self, veg):
# hases are based only on abspath
vset = {veg}
assert veg in vset
v2 = self.cls(self.name)
assert v2 in vset
def test_exists(self, veg):
assert not veg.exists
def test_abspath(self, veg):
assert veg.abspath == os.path.join(os.getcwd(), self.name)
def test_relpath(self, veg):
assert veg.relpath == self.name
def test_parent(self, veg):
p = veg.parent
assert isinstance(p, Tree)
assert p == Tree(os.path.split(self.name)[0])
def test_name(self, veg):
assert veg.name == os.path.split(self.name)[1]
class TestTree(TestVeg):
"""Test generic Treant features"""
cls = Tree
name = 'testtreant'
@pytest.fixture
def tree(self, tmpdir):
with tmpdir.as_cwd():
t = Tree(self.name)
yield t
veg = tree
def test_abspath(self, veg):
assert veg.abspath == os.path.join(os.getcwd(), self.name) + os.sep
def test_relpath(self, veg):
assert veg.relpath == self.name + os.sep
class TestTreeInit(object):
"""Test tree init.
Test that tree works for:
1. nonexistent directory
2. existing directory
Test that exception raised for:
1. tree initialized with existing file
"""
def test_nonexistant(self, tmpdir):
with tmpdir.as_cwd():
# test nonexistent directory
t = Tree('bark')
assert isinstance(t, Tree)
assert not t.exists
def test_existing(self, tmpdir):
with tmpdir.as_cwd():
os.makedirs('bark/lark')
t = Tree('bark/lark/')
assert isinstance(t, Tree)
assert t.exists
def test_file_ValueError(self, tmpdir):
with tmpdir.as_cwd():
os.mkdir('bark')
with open(os.path.join('bark', 'mark.txt'), 'w') as f:
f.write('hello\nthis is a cool file\n')
with pytest.raises(ValueError):
t = Tree('bark/mark.txt')
def test_exists(self, tree):
tree.make()
assert os.path.exists(tree.abspath)
assert tree.exists is True
def test_not_exists(self, tree):
assert not tree.exists
@pytest.fixture
def contains_Tree(self, tmpdir):
# Contains various combinations of files and directories
# that do and do not exist. Structure:
# container
# + dir1
# + file2
# + dir3 # doesn't exist
# + file4 # doesn't exist
with tmpdir.as_cwd():
os.mkdir('container')
os.mkdir(os.path.join('container', 'dir1'))
with open(os.path.join('container', 'dir1', 'file2'), 'w') as f:
f.write('some data here\n')
tree = Tree('container')
yield tree
@pytest.mark.parametrize('path,exp', (
('dir1/', True),
('dir1/file2', True),
('dir3/', False),
('dir3/file4', False),
))
def test_exists(self, contains_Tree, path, exp):
assert contains_Tree[path].exists == exp
@pytest.mark.parametrize('path,exp', (
(os.path.join(name, 'thing1'), True),
(os.path.join(name, 'thing2', 'thing3'), True),
(os.path.join('other', 'thing1'), False),
))
# loop over possible input types
@pytest.mark.parametrize('inptype', (Leaf, Tree, str))
def test_contains(self, tree, inptype, path, exp):
thing = inptype(path) # convert to desired type
assert (thing in tree) == exp
def test_contains_TypeError(self, tree):
with pytest.raises(TypeError):
24.0 in tree
def test_loc(self, tree):
l = tree.loc
assert isinstance(l, dtr.trees._Loc)
@pytest.fixture(params=[
lambda x: x, # ie just pass on the tree
lambda x: getattr(x, 'loc') # pass on the .loc of it
])
def tree_getitem(self, tree, request):
# for testing getitem, yields either the Tree or the '.loc' of it
# these should be equivalent
yield request.param(tree)
def test_getitem_subtree(self, tree, tree_getitem):
subt = tree_getitem['ground/control/to/major/treebeard/']
assert isinstance(subt, Tree)
assert not subt.exists
assert subt.path
assert subt in tree
def test_getitem_leaf(self, tree, tree_getitem):
leaf = tree_getitem['this/is/a/file']
assert isinstance(leaf, Leaf)
assert leaf in tree
def test_getitem_many_leaves(self, tree_getitem):
v = tree_getitem[['a/file', 'a/tree/']]
assert len(v) == 2
assert len(v.memberleaves) == 1
assert len(v.membertrees) == 1
def test_getitem_ValueError(self, tree_getitem):
with pytest.raises(ValueError):
tree_getitem['lolcats', 'a/not/file']
def test_getitem_returntypes(self, tree):
tree['ground/hogs/on/mars/'].make()
tree['the/file/of your/life'].make()
v = tree[['ground/hogs/on/mars', 'the/file/of your/life']]
assert isinstance(v[0], Tree)
assert isinstance(v[1], Leaf)
def test_leaves(self, tree):
with pytest.raises(OSError):
tree.leaves()
# actually make the directory now
tree.makedirs()
tree['.hide/me'].make()
tree['.hide/here/'].make()
assert len(tree.leaves()) == 0
tree['thing1'].make()
tree['thing2'].make()
tree['thing3'].make()
assert len(tree.leaves()) == 3
tree['larry/'].make()
tree['curly/'].make()
assert len(tree.leaves()) == 3
def test_trees(self, tree):
with pytest.raises(OSError):
tree.trees()
# actually make the directory now
tree.makedirs()
assert len(tree.trees()) == 0
tree['thing1'].make()
tree['thing2'].make()
tree['thing3'].make()
assert len(tree.trees()) == 0
tree['larry/'].make()
tree['curly/'].make()
assert len(tree.trees()) == 2
def test_equal(self, tree):
t1 = tree['a dir/']
t2 = tree['another dir/']
assert t1 != t2
assert t1['.'] == t1
def test_compare(self, tree):
assert tree['bark/'] <= tree['dark/']
def test_makedirs(self, tree):
t1 = tree['a/ton of/stupid/bricks/'].makedirs()
assert t1.exists
def test_glob(self, tree):
tm = tree['moe'].make()
tl = tree['larry'].make()
tc = tree['curly'].make()
assert tl in tree.glob('*r*y')
assert tc in tree.glob('*r*y')
def test_glob_OSError(self, tree):
with pytest.raises(OSError) as error:
tree.glob('something.*')
assert "Tree doesn't exist in the filesystem" in str(error.value)
def test_walk(self, tree):
# files
tree['scipy'].make()
tree['2016'].make()
tree['sprint'].make()
# directories with files
t1 = tree['a_dir/has_no_name']
t2 = tree['another_dir/bites_the_dust']
t1.make()
t2.make()
roots_scandir = []
dirs_scandir = []
files_scandir = []
all_roots = []
all_trees = []
all_leaves = []
for root, dirs, files in os.walk(tree.abspath):
# os.walk normally doesn't add slash to path, in order to
# replicate the path given by tree.walk() we use os.path.join
if root != tree.abspath:
root = os.path.join(root, '')
roots_scandir.append(root)
for directory in dirs:
# this is the abspath of the directory,
# same reason as above for use of os.path.join
dirs_scandir.append(os.path.join(root, directory, ''))
for f in files:
files_scandir.append(f)
for root, trees, leaves in tree.walk():
all_roots.append(root.abspath)
for tree in trees:
# this is the abspath of the directory
all_trees.append(tree.abspath)
for leaf in leaves:
all_leaves.append(leaf.name)
assert roots_scandir == all_roots
assert dirs_scandir == all_trees
assert files_scandir == all_leaves
def test_walk_OSError(self, tree):
with pytest.raises(OSError) as error:
for v in tree.walk(): # need to use generator to trigger OSError?!
assert v == 1
assert "Tree doesn't exist in the filesystem" in str(error.value)
def test_draw_OSError(self, tree):
with pytest.raises(OSError) as error:
tree.draw()
assert "Tree doesn't exist in the filesystem" in str(error.value)
def test_children(self, tree):
# actually make the directory now
tree.makedirs()
tree['a/file'].make()
tree['a/dir/'].make()
assert len(tree.children()) == 1
tree['thing1'].make()
tree['thing2'].make()
assert len(tree.children()) == 3
def test_children_nopermissions(self, tree):
# actually make the directory now
tree.makedirs()
tree['a/file'].make()
tree['a/dir/'].make()
os.chmod(tree.abspath, 0000)
assert len(tree.children()) == 0
class TestLeaf(TestVeg):
"""Test Leaf-specific features.
"""
cls = Leaf
name = 'treeroot/a/fault/in/our/roots'
@pytest.fixture
def leaf(self, tmpdir):
with tmpdir.as_cwd():
l = Leaf(self.name)
yield l
veg = leaf
def test_init(self, tmpdir):
"""
Test that leaf works for:
1. nonexistent file
2. existing file
Test that exception raised for:
1. leaf initialized with existing directory
"""
with tmpdir.as_cwd():
# test nonexistent file
t = Leaf('bark')
assert not t.exists
# test existent file
t.make()
assert t.exists
t2 = Leaf('bark')
assert t2.exists
# test that init with directory raises ValueError
# this should create a nonexistent Tree
t3 = Tree('mark/').make()
assert t3.exists
with pytest.raises(ValueError):
t4 = Leaf('mark')
def test_exists(self, leaf):
leaf.make()
assert os.path.exists(leaf.abspath)
assert leaf.exists is True
def test_makedirs(self, leaf):
pass
@pytest.fixture
def draw_tree(tmpdir):
# set up a tree to draw with
with tmpdir.as_cwd():
t = Tree('here')
t['file_zero'].make()
t['.file_zero_hidden'].make()
t['dir_one/'].make()
t['dir_one/file_one'].make()
t['dir_one/.file_one_hidden'].make()
t['dir_one/dir_two/'].make()
t['dir_one/dir_two/file_two'].make()
t['dir_one/dir_two/.file_two_hidden'].make()
yield t
DRAWREF_d0_T = """\
here/
+-- .file_zero_hidden
+-- file_zero
+-- dir_one/
+-- .file_one_hidden
+-- file_one
+-- dir_two/
+-- .file_two_hidden
+-- file_two
"""
DRAWREF_d0_F = """\
here/
+-- file_zero
+-- dir_one/
+-- file_one
+-- dir_two/
+-- file_two
"""
DRAWREF_d1_T = """\
here/
+-- .file_zero_hidden
+-- file_zero
+-- dir_one/
"""
DRAWREF_d1_F = """\
here/
+-- file_zero
+-- dir_one/
"""
DRAWREF_d2_T = """\
here/
+-- .file_zero_hidden
+-- file_zero
+-- dir_one/
+-- .file_one_hidden
+-- file_one
+-- dir_two/
"""
DRAWREF_d2_F = """\
here/
+-- file_zero
+-- dir_one/
+-- file_one
+-- dir_two/
"""
@pytest.mark.parametrize('depth,hidden,ref', (
(None, True, DRAWREF_d0_T),
(None, False, DRAWREF_d0_F),
(1, True, DRAWREF_d1_T),
(1, False, DRAWREF_d1_F),
(2, True, DRAWREF_d2_T),
(2, False, DRAWREF_d2_F),
(3, True, DRAWREF_d0_T), # 3 is max depth, so goes to full
(3, False, DRAWREF_d0_F),
))
def test_tree_draw(draw_tree, depth, hidden, ref, capsys):
draw_tree.draw(depth=depth, hidden=hidden)
out, err = capsys.readouterr()
assert out == ref
|
dotsdl/datreant
|
src/datreant/tests/test_trees.py
|
Python
|
bsd-3-clause
| 12,837
|
[
"MOE"
] |
93aa7812a68a6e618cf686329d6261706635b631fd095f7f43408fa3f34c7c7a
|
# -*- coding: utf-8 -*-
#
# Collection of functions related to BAM and SAM files
#
# pysam uses 0-based coordinates
from past.builtins import xrange
from collections import namedtuple
import os
import re
import sys
import pysam
from . import compat
from . import exceptions
from . import g2g
from . import g2g_utils
from . import vci
from . import __version__
FLAG_NONE = 0x0 # base value
FLAG_PAIRED = 0x1 # template having multiple segments in sequencing
FLAG_PROPER_PAIR = 0x2 # each segment properly aligned according to the aligner
FLAG_UNMAP = 0x4 # segment unmapped
FLAG_MUNMAP = 0x8 # next segment in the template unmapped (mate unmapped)
FLAG_REVERSE = 0x10 # SEQ being reverse complemented
FLAG_MREVERSE = 0x20 # SEQ of the next segment in the template being reversed
FLAG_READ1 = 0x40 # the first segment in the template
FLAG_READ2 = 0x80 # the last segment in the template
FLAG_SECONDARY = 0x100 # secondary alignment
FLAG_QCFAIL = 0x200 # not passing quality controls
FLAG_DUP = 0x400 # PCR or optical duplicate
FLAG_SUPPLEMENTARY = 0x800 # supplementary alignment
REGEX_CIGAR = re.compile("(\d+)([\w=])")
REGEX_CIGAR_LENGTH = re.compile("\D")
CIGAR_M = 'M'
CIGAR_I = 'I'
CIGAR_D = 'D'
CIGAR_N = 'N'
CIGAR_S = 'S'
CIGAR_H = 'H'
CIGAR_P = 'P'
CIGAR_E = '='
CIGAR_X = 'X'
CIGAR_m = 0
CIGAR_i = 1
CIGAR_d = 2
CIGAR_n = 3
CIGAR_s = 4
CIGAR_h = 5
CIGAR_p = 6
CIGAR_e = 7
CIGAR_x = 8
CIGAR_N2C = {
0: 'M', # alignment match (can be a sequence match or mismatch)
1: 'I', # insertion to the reference
2: 'D', # deletion from the reference
3: 'N', # skipped region from the reference
4: 'S', # soft clipping (clipped sequences present in SEQ)
5: 'H', # hard clipping (clipped sequences NOT present in SEQ)
6: 'P', # padding (silent deletion from padded reference)
7: '=', # sequence match
8: 'X', # sequence mismatch
'0': 'M',
'1': 'I',
'2': 'D',
'3': 'N',
'4': 'S',
'5': 'H',
'6': 'P',
'7': '=',
'8': 'X'
}
CIGAR_C2N = {
'M': 0,
'I': 1,
'D': 2,
'N': 3,
'S': 4,
'H': 5,
'P': 6,
'=': 7,
'X': 8
}
LOG = g2g.get_logger()
Cigar = namedtuple('Cigar', ['code', 'length', 'start', 'end'])
def convert_bam_file(vci_file, file_in, file_out, reverse=False):
"""
Convert genome coordinates (in BAM/SAM format) between assemblies. These coordinates
are stored in the :class:`.vci.VCIFile` object.
:param vci_file: vci file used for conversion
:type vci_file: :class:`.vci.VCIFile`
:param str file_in: the input SAM or BAM file
:type file_in: string
:param file_out: the output SAM or file
:type file_out: string
:param reverse: reverse direction of original chain file
:type reverse: boolean
"""
if file_out is None:
raise exceptions.G2GBAMError("Conversion of BAM/SAM file needs output file")
if not isinstance(vci_file, vci.VCIFile):
vci_file = g2g_utils.check_file(vci_file)
if not isinstance(file_in, pysam.Samfile):
file_in = g2g_utils.check_file(file_in)
output_file_name = g2g_utils.check_file(file_out, "w")
unmapped_file_name = "{0}.unmapped".format(output_file_name)
LOG.info("VCI FILE: {0}".format(vci_file))
LOG.info("INPUT FILE: {0}".format(file_in))
LOG.info("OUTPUT FILE: {0}".format(output_file_name))
LOG.info("UNMAPPED FILE: {0}".format(unmapped_file_name))
if not isinstance(vci_file, vci.VCIFile):
LOG.info("Parsing vci file...")
vci_file = vci.VCIFile(vci_file, reverse=reverse)
vci_file.parse(reverse=reverse)
LOG.info("VCI file parsed")
if not isinstance(file_in, pysam.Samfile):
try:
sam_file = pysam.Samfile(file_in, 'rb')
if len(sam_file.header) == 0:
raise exceptions.G2GBAMError("BAM File has no header information")
except:
sam_file = pysam.Samfile(file_in, 'r')
if len(sam_file.header) == 0:
raise exceptions.G2GBAMError("SAM File has no header information")
LOG.info("Converting BAM file")
new_header = sam_file.header.to_dict()
# replace 'HD'
new_header['HD'] = {'VN': 1.0, 'SO': 'coordinate'}
# replace SQ
tmp = []
name_to_id = {}
id = 0
for ref_name in vci_file.contigs:
tmp.append({'LN': vci_file.contigs[ref_name], 'SN': ref_name})
name_to_id[ref_name] = sam_file.get_tid(ref_name)
id += 1
new_header['SQ'] = tmp
if 'PG' not in new_header:
new_header['PG'] = []
new_header['PG'].append({'ID': 'g2gtools', 'VN': __version__})
if 'CO' not in new_header:
new_header['CO'] = []
new_header['CO'].append("Original file: {0}".format(file_in))
new_header['CO'].append("VCI File: {0}".format(vci_file.filename))
dir, temp_file_name = os.path.split(file_out)
parts = temp_file_name.split('.')
ext = parts[-1]
if ext.lower() == 'bam':
new_file = pysam.Samfile(file_out, 'wb', header=new_header)
new_file_unmapped = pysam.Samfile(unmapped_file_name, 'wb', template=sam_file)
elif ext.lower() == 'sam':
new_file = pysam.Samfile(file_out, 'wh', header=new_header)
new_file_unmapped = pysam.Samfile(unmapped_file_name, 'wh', template=sam_file)
else:
raise exceptions.G2GBAMError("Unable to create new file based upon file extension")
total = 0
total_unmapped = 0
total_fail_qc = 0
map_statistics = {'total': 0,
'fail_cannot_map': 0,
'success_simple': 0,
'success_complex': 0}
map_statistics_pair = {'total': 0,
'fail_cannot_map': 0,
'success_1_fail_2_simple': 0,
'success_1_fail_2_complex': 0,
'success_1_simple_2_fail': 0,
'success_1_simple_2_simple': 0,
'success_1_simple_2_complex': 0,
'success_1_complex_2_fail': 0,
'success_1_complex_2_simple': 0,
'success_1_complex_2_complex': 0}
try:
while True:
if total and total % 10000 == 0:
status_success = 0
status_failed = 0
for k, v in map_statistics_pair.items():
if k.startswith('success'):
status_success += v
elif k.startswith('fail'):
status_failed += v
LOG.info("Processed {0:,} reads, {1:,} successful, {2:,} failed".format(total, status_success, status_failed))
if compat.is_py2:
alignment = sam_file.next()
else:
alignment = next(sam_file)
alignment_new = pysam.AlignedRead()
read_chr = sam_file.getrname(alignment.tid)
# READ ONLY
# aend aligned reference position of the read on the reference genome
# alen aligned length of the read on the reference genome.
# positions a list of reference positions that this read aligns to
# qend end index of the aligned query portion of the sequence (0-based, exclusive)
# qlen Length of the aligned query sequence
# qqual aligned query sequence quality values
# qstart start index of the aligned query portion of the sequence (0-based, inclusive)
# query aligned portion of the read and excludes any flanking bases that were soft clipped
# rlen length of the read
# TRUE / FALSE (setting effects flag)
# is_paired true if read is paired in sequencing
# is_proper_pair true if read is mapped in a proper pair
# is_qcfail true if QC failure
# is_read1 true if this is read1
# is_read2 true if this is read2
# is_reverse true if read is mapped to reverse strand
# is_secondary true if not primary alignment
# is_unmapped true if read itself is unmapped
# mate_is_reverse true is read is mapped to reverse strand
# mate_is_unmapped true if the mate is unmapped
# SET
# cigar cigar as list of tuples
# cigarstring alignment as a string
# flag properties flag
# mapq mapping quality
# pnext the position of the mate
# pos 0-based leftmost coordinate
# pnext the position of the mate
# qname the query name
# rnext the reference id of the mate
# seq read sequence bases, including soft clipped bases
# tid target id, contains the index of the reference sequence in the sequence dictionary
# DON'T NEED TO SET or SHOULD WE SET?
# qual read sequence base qualities, including soft clipped bases
# tags the tags in the AUX field
# tlen insert size
total += 1
LOG.debug('~'*80)
LOG.debug("Converting {0} {1} {2} {3}".format(alignment.qname, read_chr, alignment.pos, alignment.cigarstring))
if alignment.is_qcfail:
LOG.debug("\tFail due to qc of old alignment")
new_file_unmapped.write(alignment)
total_fail_qc += 1
continue
if alignment.is_unmapped:
LOG.debug("\tFail due to unmapped old alignment")
new_file_unmapped.write(alignment)
total_unmapped += 1
continue
if not alignment.is_paired:
LOG.debug("SINGLE END ALIGNMENT")
map_statistics['total'] += 1
alignment_new.seq = alignment.seq
alignment_new.flag = FLAG_NONE
alignment_new.mapq = alignment.mapq
alignment_new.qname = alignment.qname
alignment_new.qual = alignment.qual
alignment_new.tags = alignment.tags
read_start = alignment.pos
read_end = alignment.aend
read_strand = '-' if alignment.is_reverse else '+'
mappings = vci_file.find_mappings(read_chr, read_start, read_end)
# unmapped
if mappings is None:
LOG.debug("\tFail due to no mappings")
new_file_unmapped.write(alignment)
map_statistics['fail_cannot_map'] += 1
elif len(mappings) == 1:
if alignment.is_reverse:
alignment_new.flag |= FLAG_REVERSE
alignment_new.tid = name_to_id[mappings[0].to_chr]
alignment_new.pos = mappings[0].to_start
alignment_new.cigar = alignment.cigar
new_file.write(alignment_new)
LOG.debug("\tSuccess (simple): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
map_statistics['success_simple'] += 1
else:
LOG.debug("MAPPINGS: {0}".format(len(mappings)))
for m in mappings:
LOG.debug("> {0}".format(m))
if alignment.is_reverse:
alignment_new.flag |= FLAG_REVERSE
alignment_new.tid = name_to_id[mappings[0].to_chr]
alignment_new.pos = mappings[0].to_start
alignment_new.cigar = convert_cigar(alignment.cigar, read_chr, vci_file, alignment.seq, read_strand, alignment.pos)
new_file.write(alignment_new)
LOG.debug("\tSuccess (complex): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
map_statistics['success_complex'] += 1
else:
LOG.debug("PAIRED END ALIGNMENT")
map_statistics_pair['total'] += 1
alignment_new.seq = alignment.seq
alignment_new.flag = FLAG_PAIRED
alignment_new.mapq = alignment.mapq
alignment_new.qname = alignment.qname
alignment_new.qual = alignment.qual
alignment_new.tags = alignment.tags
if alignment.is_read1:
alignment_new.flag |= FLAG_READ1
if alignment.is_read2:
alignment_new.flag |= FLAG_READ2
if alignment.is_reverse:
alignment_new.flag |= FLAG_REVERSE
if alignment.mate_is_reverse:
alignment_new.flag |= FLAG_MREVERSE
read1_chr = sam_file.getrname(alignment.tid)
read1_start = alignment.pos
read1_end = alignment.aend
read1_strand = '-' if alignment.is_reverse else '+'
read1_mappings = vci_file.find_mappings(read1_chr, read1_start, read1_end) #, read1_strand)
if alignment.mate_is_unmapped:
alignment_new.flag |= FLAG_MUNMAP
else:
read2_chr = sam_file.getrname(alignment.rnext)
read2_start = alignment.pnext
read2_end = read2_start + 1
read2_strand = '-' if alignment.mate_is_reverse else '+'
try:
read2_mappings = vci_file.find_mappings(read2_chr, read2_start, read2_end) #, read2_strand)
except:
read2_mappings = None
if read1_mappings is None and read2_mappings is None:
alignment_new.flag |= FLAG_UNMAP
alignment_new.flag |= FLAG_MUNMAP
LOG.debug("\tFail due to no mappings")
new_file_unmapped.write(alignment)
map_statistics_pair['fail_cannot_map'] += 1
elif read1_mappings is None and read2_mappings and len(read2_mappings) == 1:
alignment_new.flag |= FLAG_UNMAP
alignment_new.pos = 0
alignment_new.cigarstring = '0M'
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0
LOG.debug("\tPair Success (1:fail,2:simple): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_fail_2_simple'] += 1
elif read1_mappings is None and read2_mappings and len(read2_mappings) > 1:
alignment_new.flag |= FLAG_UNMAP
alignment_new.pos = 0
alignment_new.cigarstring = '0M'
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0
LOG.debug("\tPair Success (1:fail,2:complex): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_fail_2_complex'] += 1
elif read1_mappings and len(read1_mappings) == 1 and read2_mappings is None:
alignment_new.flag |= FLAG_MUNMAP
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = alignment.cigar
alignment_new.rnext = name_to_id[read1_mappings[0].to_chr]
alignment_new.pnext = 0
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:simple,2:fail): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_simple_2_fail'] += 1
elif read1_mappings and len(read1_mappings) == 1 and read2_mappings and len(read2_mappings) == 1:
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = alignment.cigar
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:simple,2:simple): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_simple_2_simple'] += 1
elif read1_mappings and len(read1_mappings) == 1 and read2_mappings and len(read2_mappings) > 1:
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = alignment.cigar
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:simple,2:complex): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_simple_2_complex'] += 1
elif read1_mappings and len(read1_mappings) > 1 and read2_mappings is None:
alignment_new.flag |= FLAG_MUNMAP
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = convert_cigar(alignment.cigar, read_chr, vci_file, alignment.seq, read1_strand, alignment.pos)
alignment_new.rnext = name_to_id[read1_mappings[0].to_chr]
alignment_new.pnext = 0
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:complex,2:fail): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_complex_2_fail'] += 1
elif read1_mappings and len(read1_mappings) > 1 and read2_mappings and len(read2_mappings) == 1:
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = convert_cigar(alignment.cigar, read_chr, vci_file, alignment.seq, read1_strand, alignment.pos)
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:complex,2:simple): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_complex_2_simple'] += 1
elif read1_mappings and len(read1_mappings) > 1 and read2_mappings and len(read2_mappings) > 1:
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = convert_cigar(alignment.cigar, read_chr, vci_file, alignment.seq, read1_strand, alignment.pos)
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:complex,2:complex): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_complex_2_complex'] += 1
else:
raise exceptions.G2GBAMError("Unknown BAM/SAM conversion/parse situation")
except StopIteration:
LOG.info("All reads processed")
LOG.info(" {:>10} TOTAL ENTRIES".format(total))
LOG.info(" {:>10} TOTAL UNMAPPED ".format(total_unmapped))
LOG.info(" {:>10} TOTAL FAIL QC ".format(total_fail_qc))
if map_statistics['total'] > 0:
LOG.info("")
LOG.info("Mapping Summary Single End")
LOG.info(" {:>10} TOTAL ENTRIES".format(map_statistics['total']))
LOG.info("")
LOG.info(" {:>10} TOTAL SUCCESS".format(map_statistics['success_simple'] + map_statistics['success_complex']))
LOG.info(" {:>10} Simple".format(map_statistics['success_simple']))
LOG.info(" {:>10} Complex".format(map_statistics['success_complex']))
LOG.info("")
LOG.info(" {:>10} TOTAL FAILURES".format(map_statistics['fail_cannot_map']))
LOG.info(" {:>10} Cannot Map ".format(map_statistics['fail_cannot_map']))
if map_statistics_pair['total'] > 0:
total_success = 0
for k, v in map_statistics_pair.items():
if k.startswith('success'):
total_success += v
LOG.info("")
LOG.info("Mapping Summary Paired End")
LOG.info(" {:>10} TOTAL ENTRIES".format(map_statistics_pair['total']))
LOG.info("")
LOG.info(" {:>10} TOTAL SUCCESS".format(total_success))
LOG.info(" {:>10} Read 1 Failed, Read 2 Simple".format(map_statistics_pair['success_1_fail_2_simple']))
LOG.info(" {:>10} Read 1 Failed, Read 2 Complex".format(map_statistics_pair['success_1_fail_2_complex']))
LOG.info(" {:>10} Read 1 Simple, Read 2 Failed".format(map_statistics_pair['success_1_simple_2_fail']))
LOG.info(" {:>10} Read 1 Simple, Read 2 Simple".format(map_statistics_pair['success_1_simple_2_simple']))
LOG.info(" {:>10} Read 1 Simple, Read 2 Complex".format(map_statistics_pair['success_1_simple_2_complex']))
LOG.info(" {:>10} Read 1 Complex, Read 2 Failed".format(map_statistics_pair['success_1_complex_2_fail']))
LOG.info(" {:>10} Read 1 Complex, Read 2 Simple".format(map_statistics_pair['success_1_complex_2_simple']))
LOG.info(" {:>10} Read 1 Complex, Read 2 Complex".format(map_statistics_pair['success_1_complex_2_complex']))
LOG.info("")
LOG.info(" {:>10} TOTAL FAILURES".format(map_statistics_pair['fail_cannot_map']))
LOG.info(" {:>10} Cannot Map".format(map_statistics_pair['fail_cannot_map']))
LOG.info("")
LOG.info("BAM File Converted")
#
# Functions dealing with CIGAR strings
#
#
# BAM OP Description
# 0 M alignment match
# 1 I insertion to reference
# 2 D deletion from reference. region deleted from reference genome
# 3 N skipped region from the reference
# 4 S soft clipping (clipped sequence present in SEQ)
# 5 H hard clipping (clipped sequences NOT present in SEQ)
# 6 P padding (silent deletion from padded reference)
# 7 = sequence match
# 8 X sequence mismatch
#
def cigarlist_to_cigarstring(cigar_list):
"""
Convert a list of tuples into a cigar string.
Example::
[ (0, 10), (1, 1), (0, 75), (2, 2), (0, 20) ]
=> 10M 1I 75M 2D 20M
=> 10M1I75M2D20M
:param cigar_list: a list of tuples (code, length)
:type cigar_list: list
:return: the cigar string
:rtype: string
:raises: :class:`.exceptions.G2GCigarFormatError` on invalid cigar string
"""
cigar = ''
if isinstance(cigar_list, Cigar):
try:
for i in cigar_list:
cigar += str(i.length) + i.code
except KeyError:
raise exceptions.G2GCigarFormatError("Invalid cigar code: " + str(i))
else:
try:
for i in cigar_list:
cigar += str(i[1]) + CIGAR_N2C[i[0]]
except KeyError:
raise exceptions.G2GCigarFormatError("Invalid cigar code: " + str(i))
return cigar
def cigar_to_string(cigar):
"""
Convert a list of tuples into a cigar string.
Example::
[ (0, 10), (1, 1), (0, 75), (2, 2), (0, 20) ]
=> 10M 1I 75M 2D 20M
=> 10M1I75M2D20M
:param cigar_list: a list of tuples (code, length)
:type cigar_list: list
:return: the cigar string
:rtype: string
:raises: :class:`.exceptions.G2GCigarFormatError` on invalid cigar string
"""
cigar = ''
try:
for i in cigar:
cigar += str(i.length) + i.code
except KeyError:
raise exceptions.G2GCigarFormatError("Invalid cigar code: " + str(i))
return cigar
def _cigar_to_list(cigar_string):
"""
Convert a list of tuples into a cigar string
Example::
10M1I75M2D20M
=> 10M 1I 75M 2D 20M
=> [ (0, 10), (1, 1), (0, 75), (2, 2), (0, 20) ]
:param cigar_string: a cigar string
:return: a list of tuples (code, length)
:rtype: list
:raises: :class:`.exceptions.G2GCigarFormatError` on invalid cigar string
"""
matches = REGEX_CIGAR.findall(cigar_string)
possible_length = len(REGEX_CIGAR_LENGTH.findall(cigar_string))
if len(matches) != possible_length:
raise exceptions.G2GCigarFormatError("Invalid cigar string: {0}".format(cigar_string))
lst = []
try:
for m in matches:
lst.append(1)#(CIGAR_CODES_REV[m[1]], int(m[0])))
except KeyError:
raise exceptions.G2GCigarFormatError("Invalid cigar string: {0} : {1} ".format(cigar_string, str(m)))
return lst
def _cigar_convert(cigar, chromosome, vci_file, strand='+', position=0):
"""
PHASE 1
Convert each CIGAR element to new mappings and construct an array on NEW cigar elements
For example, depending on the Intervals in the CHAIN file, let's say we have the following
CIGAR string: 35M49N65M
This could get converted into
35M ==> 4M150D31M
49N ==> -1N (remember, surrounding M's are used to find the length of N which is done on next pass)
65M ==> 65M
First pass yields: 35M49N65M => 4M150D31M-1N65M
:param cigar:
:param chromosome:
:param vci_file:
:param strand:
:param position:
:return:
"""
cigar_new = []
current_pos = position
cigar_no = 0
for c in cigar:
cigar_no += 1
LOG.debug("Element #{0}, '{1}{2}' specified, location: {3}".format(cigar_no, c[1], CIGAR_N2C[c[0]], current_pos))
increment = c[1]
if c[0] == CIGAR_m:
new_mappings = vci_file.find_mappings(chromosome, current_pos, current_pos + c[1])
if not new_mappings:
LOG.debug("Mappings: None")
cigar_new.append(Cigar(CIGAR_S, c[1], 0, 0))
elif len(new_mappings) == 1:
LOG.debug("Mappings: Easy: {0}".format(new_mappings[0]))
cigar_new.append(Cigar(CIGAR_M, new_mappings[0].to_end - new_mappings[0].to_start, new_mappings[0].to_start, new_mappings[0].to_end))
else:
# multiple maps, not so easy
last = None
for m in new_mappings:
LOG.debug("Mappings: Multiple: {0}".format(m))
if not last:
last = m
if current_pos < m.from_start:
# special case of first match not in interval, handle accordingly
LOG.debug("Adding 'S', because {0} < {1}".format(current_pos, m.from_start))
cigar_new.append(Cigar(CIGAR_S, m.from_start - current_pos, 0, 0))
else:
if m.from_start != last.from_end:
LOG.debug("Adding 'M' and 'I', because {0} != {1}".format(m.from_start, last.from_end))
cigar_new.append(Cigar(CIGAR_M, last.to_end - last.to_start, last.to_start, last.to_end))
cigar_new.append(Cigar(CIGAR_I, m.from_start - last.from_end, last.to_start, last.to_end))
elif m.to_start != last.to_end:
LOG.debug("Adding 'M' and 'D', because {0} != {1}".format(m.to_start, last.to_end))
cigar_new.append(Cigar(CIGAR_M, last.to_end - last.to_start, last.to_start, last.to_end))
cigar_new.append(Cigar(CIGAR_D, m.to_start - last.to_end, 0, 0))
last = m
LOG.debug("Adding 'M'")
cigar_new.append(Cigar(CIGAR_M, last.to_end - last.to_start, last.to_start, last.to_end))
elif c[0] == CIGAR_i:
LOG.debug("Adding 'I' and 'D'")
cigar_new.append(Cigar(CIGAR_I, c[1], 0, 0))
cigar_new.append(Cigar(CIGAR_D, -1, 0, 0))
increment = 0
elif c[0] == CIGAR_d:
LOG.debug("Adding 'D'")
cigar_new.append(Cigar(CIGAR_D, -1, 0, 0))
elif c[0] == CIGAR_n:
LOG.debug("Adding 'N'")
cigar_new.append(Cigar(CIGAR_N, -1, 0, 0))
elif c[0] in [CIGAR_s, CIGAR_h]:
LOG.debug("Adding '{0}'".format(CIGAR_N2C[c[0]]))
cigar_new.append(Cigar(CIGAR_N2C[c[0]], c[1], 0, 0))
else:
# other
LOG.debug("OTHER CODE '{0}' found, looking at {1} at {2}".format(CIGAR_N2C[c[0]], c, current_pos))
raise exceptions.G2GCigarFormatError("ERROR: Not handling the values in this cigar string: {0}".format(cigar))
#current_pos += c[1]
current_pos += increment
LOG.debug("Current CIGAR: {0}".format(cigar_new))
return cigar_new
def _cigar_combine_consecutive(cigar):
"""
Combine consecutive features in a cigar string.
For example, 2 N's become 1
:param cigar:
:return:
"""
done = False
while not done:
done = True
for i in xrange(0, len(cigar)-1):
LOG.debug("{0}={1}".format(i, cigar[i]))
LOG.debug("{0}={1}".format(i+1, cigar[i+1]))
if cigar[i].code == cigar[i+1].code:
done = False
break
if not done:
cigar_temp = []
cigar_temp.extend(cigar[:i])
cm1 = cigar[i]
cm2 = cigar[i+1]
cm_new = Cigar(cm1.code, cm1.length + cm2.length, cm1.start, cm2.end)
cigar_temp.append(cm_new)
LOG.debug("Found consecutive elements {0} and {1}, combined into {2}".format(cm1, cm2, cm_new))
cigar_temp.extend(cigar[i+2:])
cigar = cigar_temp
return cigar
def _cigar_fix_pre_and_post_M(cigar):
"""
:param cigar:
:return:
"""
# pre M to S fix
for i in xrange(0, len(cigar)):
if cigar[i].code == CIGAR_M:
break
if i != 0:
first_m = i
length = 0
for i in xrange(0, first_m):
if cigar[i].code in [CIGAR_I, CIGAR_S, CIGAR_H]:
length += cigar[i].length
temp_cigar = [Cigar(CIGAR_S, length, 0, 0)]
temp_cigar.extend(cigar[i+1:])
cigar = temp_cigar
# post M to S fix
for i in reversed(xrange(0, len(cigar))):
if cigar[i].code == CIGAR_M:
break
if i > 0 and i != (len(cigar) - 1):
last_m = i
length = 0
for i in xrange(last_m+1, len(cigar)):
if cigar[i].code in [CIGAR_M, CIGAR_I, CIGAR_S]:
length += cigar[i].length
temp_cigar = []
temp_cigar.extend(cigar[:i-1])
temp_cigar.append(Cigar(CIGAR_S, length, 0, 0))
cigar = temp_cigar
return cigar
def _cigar_remove_softs_between_m(cigar):
"""
Remove soft if surrounded by Ms
:param cigar:
:return:
"""
done = False
while not done:
done = True
for i in xrange(1, len(cigar)-1):
if cigar[i].code == CIGAR_S:
done = False
break
if done:
break
before = None
after = None
for x in reversed(xrange(i)):
if cigar[x].code == CIGAR_M:
before = cigar[x]
break
for x in xrange(i+1, len(cigar)):
if cigar[x].code == CIGAR_M:
after = cigar[x]
break
if before and after:
LOG.debug("Found 'S' between 'M' so removing 'S'")
cigar_temp = []
cigar_temp.extend(cigar[:i])
cigar_temp.extend(cigar[i+1:])
cigar = cigar_temp
LOG.debug(cigar)
else:
done = True
return cigar
def _cigar_fix_lengths(cigar, sequence):
"""
:return:
"""
# Assign length to -1's
#
# Since N's aren't mapped we look at the surrounding M's to find the length of the N's
#
# Example: 35M49N65M ==> 4M150D31M-1N65M, the -1 will be corrected by finding the last position of the previous
# M and first position of the next M
#
# there are a few special cases that are handled
# since there were multiple mappings, we will need to figure out the location on the N's
done = False
while not done:
done = True
# find first element without a length
i = 0
for cm in cigar:
if cm.length == -1:
break
i += 1
if i == len(cigar):
done = True
break
LOG.debug("Found '{0}' at {1}: {2}".format(cm.code, i, cm))
before = None
after = None
# Simple case is surrounded by mapping positions, but might not be the case
for x in reversed(xrange(i)):
if cigar[x].code == CIGAR_M:
before = cigar[x]
break
for x in xrange(i+1, len(cigar)):
if cigar[x].code == CIGAR_M:
after = cigar[x]
break
# special case of 89M2000N11M
# what happens when thi sis converted to 89M-1N11S (no M at end)
# we should have 89M11S
LOG.debug("Before: {0}".format(before))
LOG.debug("After: {0}".format(after))
# check if all cigar elements from here to end do not have a length
a = i
while a < len(cigar) - 1:
if cigar[a].length != -1:
break
a += 1
# if a == len(cigar_mapping) -1 than all the rest have no length
LOG.debug("a={0}, len(cigar_mapping) - 1={1}".format(a, len(cigar) - 1))
if (a == len(cigar) - 1 and cigar[a].start == -1) or not after or not before:
# take the rest as a clip
LOG.debug("Found a clip")
temp_cigar_mappings = cigar[:i]
temp_total = 0
for t in temp_cigar_mappings:
if t.code in [CIGAR_M, CIGAR_I, CIGAR_S]:
temp_total += t.length
temp_cigar_mappings.append(Cigar(CIGAR_S, len(sequence) - temp_total, -1, -1))
cigar = temp_cigar_mappings
done = True
else:
c = cigar[i]
new_c = Cigar(c.code, after.start - before.end, before.end, after.start)
LOG.debug("Replacing, old = {0}, new = {1}".format(c, new_c))
cigar[i] = new_c
done = False
LOG.debug("Removing 0 length elements, if any")
new_cigar = []
for cm in cigar:
if cm[1] == 0:
LOG.debug("Removing {}".format(cm))
continue
new_cigar.append(cm)
return new_cigar
def convert_cigar(cigar, chromosome, vci_file, sequence, strand='+', position=0):
"""
Generate the cigar string of an old alignment.
P1: Map M with bx; Inherit S and H; Inherit I but put -1D right behind it; Put -1D or -1N when it’s there.
P1a: Convert xM, if it has zero length after mapping, to xS
P2: Remove S (including new S originate from unmapped M) if it is surrounded by any pair of consecutive Ms that
survived P2
P3: Adjust the size of D or N that are inbetween Ms. Remove it if they have zero length.
P4: Combine duplicated entries (I guess mostly M or S)
P5: Put yS for the unmapped regions before the first M and/or after the last M (I believe adding S, H, I’s in
those regions should get you y). In this phase remove the remaining -1D or -1N in those regions first.
:param old_alignment: the old alignment
:type old_alignment: :class:`pysam.AlignedRead`
:param chromosome: the chromosome
:type chromosome: string
:param vci_filevci_file: the vci_file file
:type chain: the vci_file file
:return: a new cigar string based upon the mappings
:raises: :class:`.exceptions.G2GCigarFormatError` on invalid cigar string
"""
old_cigar = cigarlist_to_cigarstring(cigar)
LOG.debug("CIGAR CONVERSION : {0}".format(old_cigar))
#
# PHASE 1: Convert each CIGAR element to new mappings and construct an array on NEW cigar elements
#
LOG.debug("CIGAR CONVERSION : PHASE 1 : Converting cigar elements")
new_cigar = _cigar_convert(cigar, chromosome, vci_file, strand, position)
LOG.debug("AFTER PHASE 1 : {0} ".format(new_cigar))
if len(new_cigar) == 1:
LOG.debug("CIGAR CONVERSION : Skipping to end since only 1 element")
else:
#
# PHASE 2: Remove S if surrounded by M
#
LOG.debug("CIGAR CONVERSION : PHASE 2 : Remove S if surrounded by M")
new_cigar = _cigar_remove_softs_between_m(new_cigar)
LOG.debug("AFTER PHASE 2 : {0} ".format(new_cigar))
#
# PHASE 3: Fix element lengths
#
LOG.debug("CIGAR CONVERSION : PHASE 3 : Fix element lengths")
new_cigar = _cigar_fix_lengths(new_cigar, sequence)
LOG.debug("AFTER PHASE 3 : {0} ".format(new_cigar))
#
# PHASE 4: Combine consecutive matching elements
#
LOG.debug("CIGAR CONVERSION : PHASE 4 : Combining elements")
new_cigar = _cigar_combine_consecutive(new_cigar)
LOG.debug("AFTER PHASE 4 : {0} ".format(new_cigar))
#
# PHASE 5: Combine consecutive matching elements
#
LOG.debug("CIGAR CONVERSION : PHASE 5 : Fix pre and post Ms")
new_cigar = _cigar_fix_pre_and_post_M(new_cigar)
LOG.debug("AFTER PHASE 5 : {0} ".format(new_cigar))
#
# Final pass through CIGAR string
#
# test cigar string length
#
# SEQ: segment SEQuence. This field can be a '*' when the sequence is not stored. If not a '*',
# the length of the sequence must equal the sum of lengths of M/I/S/=/X operations in CIGAR.
# An '=' denotes the base is identical to the reference base. No assumptions can be made on the
# letter cases.
#
LOG.debug("CIGAR CONVERSION : PHASE 6 : Testing length and conversion")
cigar_seq_length = 0
# simplify the cigar, throw away the other stuff we used
simple_cigar = []
for c in new_cigar:
simple_cigar.append((CIGAR_C2N[c.code], c.length))
if c.code in [CIGAR_M, CIGAR_I, CIGAR_S, CIGAR_E, CIGAR_X]:
cigar_seq_length += c.length
if cigar_seq_length != len(sequence):
LOG.debug("CIGAR SEQ LENGTH={0} != SEQ_LEN={1}".format(cigar_seq_length, len(sequence)))
# not equal according to chain file format, add the clipping length
simple_cigar.append((CIGAR_s, len(sequence) - cigar_seq_length))
if old_cigar != cigar_to_string(simple_cigar):
LOG.debug("old cigar != new cigar")
else:
LOG.debug("old cigar == new cigar")
LOG.debug("CIGAR CONVERSION : {0} ==> {1}".format(old_cigar, cigar_to_string(simple_cigar)))
LOG.debug(simple_cigar)
return simple_cigar
if __name__ == '__main__':
from .g2g_utils import get_logger, configure_logging
configure_logging(10)
LOG = get_logger()
cigarstring = '5I3D4M9D3S104M7D2I'
cigarlist = _cigar_to_list(cigarstring)
LOG.debug(cigarstring)
print(cigarlist)
cigar_new = _cigar_remove_softs_between_m(cigarlist)
#cigar_new = _cigar_fix_pre_and_post_M(cigarlist)
print(cigar_to_string(cigar_new))
print(cigar_new)
|
churchill-lab/g2gtools
|
g2gtools/bsam.py
|
Python
|
mit
| 41,302
|
[
"pysam"
] |
9c536a45c8b838834723db0447896a24be9eb945d60073ab580d85fc407d06c8
|
"""
Sequence classes
"""
import data
import logging
import re
import string
from cgi import escape
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes import metadata
import galaxy.model
from galaxy import util
from sniff import *
log = logging.getLogger(__name__)
class Sequence( data.Text ):
"""Class describing a sequence"""
"""Add metadata elements"""
MetadataElement( name="sequences", default=0, desc="Number of sequences", readonly=True, visible=False, optional=True, no_value=0 )
def set_meta( self, dataset, **kwd ):
"""
Set the number of sequences and the number of data lines in dataset.
"""
data_lines = 0
sequences = 0
for line in file( dataset.file_name ):
line = line.strip()
if line and line.startswith( '#' ):
# We don't count comment lines for sequence data types
continue
if line and line.startswith( '>' ):
sequences += 1
data_lines +=1
else:
data_lines += 1
dataset.metadata.data_lines = data_lines
dataset.metadata.sequences = sequences
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
if dataset.metadata.sequences:
dataset.blurb = "%s sequences" % util.commaify( str( dataset.metadata.sequences ) )
else:
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class Alignment( data.Text ):
"""Class describing an alignment"""
"""Add metadata elements"""
MetadataElement( name="species", desc="Species", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None )
class Fasta( Sequence ):
"""Class representing a FASTA sequence"""
file_ext = "fasta"
def sniff( self, filename ):
"""
Determines whether the file is in fasta format
A sequence in FASTA format consists of a single-line description, followed by lines of sequence data.
The first character of the description line is a greater-than (">") symbol in the first column.
All lines should be shorter than 80 charcters
For complete details see http://www.ncbi.nlm.nih.gov/blast/fasta.shtml
Rules for sniffing as True:
We don't care about line length (other than empty lines).
The first non-empty line must start with '>' and the Very Next line.strip() must have sequence data and not be a header.
'sequence data' here is loosely defined as non-empty lines which do not start with '>'
This will cause Color Space FASTA (csfasta) to be detected as True (they are, after all, still FASTA files - they have a header line followed by sequence data)
Previously this method did some checking to determine if the sequence data had integers (presumably to differentiate between fasta and csfasta)
This should be done through sniff order, where csfasta (currently has a null sniff function) is detected for first (stricter definition) followed sometime after by fasta
We will only check that the first purported sequence is correctly formatted.
>>> fname = get_test_fname( 'sequence.maf' )
>>> Fasta().sniff( fname )
False
>>> fname = get_test_fname( 'sequence.fasta' )
>>> Fasta().sniff( fname )
True
"""
try:
fh = open( filename )
while True:
line = fh.readline()
if not line:
break #EOF
line = line.strip()
if line: #first non-empty line
if line.startswith( '>' ):
#The next line.strip() must not be '', nor startwith '>'
line = fh.readline().strip()
if line == '' or line.startswith( '>' ):
break
return True
else:
break #we found a non-empty line, but its not a fasta header
fh.close()
except:
pass
return False
class csFasta( Sequence ):
""" Class representing the SOLID Color-Space sequence ( csfasta ) """
file_ext = "csfasta"
def sniff( self, filename ):
"""
Color-space sequence:
>2_15_85_F3
T213021013012303002332212012112221222112212222
>>> fname = get_test_fname( 'sequence.fasta' )
>>> csFasta().sniff( fname )
False
>>> fname = get_test_fname( 'sequence.csfasta' )
>>> csFasta().sniff( fname )
True
"""
try:
fh = open( filename )
while True:
line = fh.readline()
if not line:
break #EOF
line = line.strip()
if line and not line.startswith( '#' ): #first non-empty non-comment line
if line.startswith( '>' ):
line = fh.readline().strip()
if line == '' or line.startswith( '>' ):
break
elif line[0] not in string.ascii_uppercase:
return False
elif len( line ) > 1 and not re.search( '^\d+$', line[1:] ):
return False
return True
else:
break #we found a non-empty line, but it's not a header
fh.close()
except:
pass
return False
class Fastq ( Sequence ):
"""Class representing a generic FASTQ sequence"""
file_ext = "fastq"
def set_meta( self, dataset, **kwd ):
"""
Set the number of sequences and the number of data lines
in dataset.
"""
data_lines = 0
sequences = 0
for line in file( dataset.file_name ):
line = line.strip()
if line and line.startswith( '#' ):
# We don't count comment lines for sequence data types
continue
if line and line.startswith( '@' ):
sequences += 1
data_lines +=1
else:
data_lines += 1
dataset.metadata.data_lines = data_lines
dataset.metadata.sequences = sequences
def sniff ( self, filename ):
"""
Determines whether the file is in generic fastq format
For details, see http://maq.sourceforge.net/fastq.shtml
Note: There are three kinds of FASTQ files, known as "Sanger" (sometimes called "Standard"), Solexa, and Illumina
These differ in the representation of the quality scores
>>> fname = get_test_fname( '1.fastqsanger' )
>>> Fastq().sniff( fname )
True
>>> fname = get_test_fname( '2.fastqsanger' )
>>> Fastq().sniff( fname )
True
"""
headers = get_headers( filename, None )
bases_regexp = re.compile( "^[NGTAC]*" )
# check that first block looks like a fastq block
try:
if len( headers ) >= 4 and headers[0][0] and headers[0][0][0] == "@" and headers[2][0] and headers[2][0][0] == "+" and headers[1][0]:
# Check the sequence line, make sure it contains only G/C/A/T/N
if not bases_regexp.match( headers[1][0] ):
return False
return True
return False
except:
return False
class FastqSanger( Fastq ):
"""Class representing a FASTQ sequence ( the Sanger variant )"""
file_ext = "fastqsanger"
try:
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
import bx.align.maf
except:
pass
#trying to import maf_utilities here throws an ImportError due to a circular import between jobs and tools:
#from galaxy.tools.util.maf_utilities import build_maf_index_species_chromosomes
#Traceback (most recent call last):
# File "./scripts/paster.py", line 27, in <module>
# command.run()
# File "build/bdist.solaris-2.11-i86pc/egg/paste/script/command.py", line 78, in run
# File "build/bdist.solaris-2.11-i86pc/egg/paste/script/command.py", line 117, in invoke
# File "build/bdist.solaris-2.11-i86pc/egg/paste/script/command.py", line 212, in run
# File "build/bdist.solaris-2.11-i86pc/egg/paste/script/serve.py", line 227, in command
# File "build/bdist.solaris-2.11-i86pc/egg/paste/script/serve.py", line 250, in loadapp
# File "build/bdist.solaris-2.11-i86pc/egg/paste/deploy/loadwsgi.py", line 193, in loadapp
# File "build/bdist.solaris-2.11-i86pc/egg/paste/deploy/loadwsgi.py", line 213, in loadobj
# File "build/bdist.solaris-2.11-i86pc/egg/paste/deploy/loadwsgi.py", line 237, in loadcontext
# File "build/bdist.solaris-2.11-i86pc/egg/paste/deploy/loadwsgi.py", line 267, in _loadconfig
# File "build/bdist.solaris-2.11-i86pc/egg/paste/deploy/loadwsgi.py", line 397, in get_context
# File "build/bdist.solaris-2.11-i86pc/egg/paste/deploy/loadwsgi.py", line 439, in _context_from_explicit
# File "build/bdist.solaris-2.11-i86pc/egg/paste/deploy/loadwsgi.py", line 18, in import_string
# File "/afs/bx.psu.edu/home/dan/galaxy/central/lib/pkg_resources.py", line 1912, in load
# entry = __import__(self.module_name, globals(),globals(), ['__name__'])
# File "/afs/bx.psu.edu/home/dan/galaxy/central/lib/galaxy/web/buildapp.py", line 18, in <module>
# from galaxy import config, jobs, util, tools
# File "/afs/bx.psu.edu/home/dan/galaxy/central/lib/galaxy/jobs/__init__.py", line 3, in <module>
# from galaxy import util, model
# File "/afs/bx.psu.edu/home/dan/galaxy/central/lib/galaxy/model/__init__.py", line 13, in <module>
# import galaxy.datatypes.registry
# File "/afs/bx.psu.edu/home/dan/galaxy/central/lib/galaxy/datatypes/registry.py", line 6, in <module>
# import data, tabular, interval, images, sequence, qualityscore, genetics, xml, coverage, tracks, chrominfo
# File "/afs/bx.psu.edu/home/dan/galaxy/central/lib/galaxy/datatypes/sequence.py", line 344, in <module>
# from galaxy.tools.util.maf_utilities import build_maf_index_species_chromosomes
# File "/afs/bx.psu.edu/home/dan/galaxy/central/lib/galaxy/tools/__init__.py", line 15, in <module>
# from galaxy import util, jobs, model
#ImportError: cannot import name jobs
#so we'll copy and paste for now...terribly icky
#*** ANYCHANGE TO THIS METHOD HERE OR IN maf_utilities MUST BE PROPAGATED ***
def COPIED_build_maf_index_species_chromosomes( filename, index_species = None ):
species = []
species_chromosomes = {}
indexes = bx.interval_index_file.Indexes()
blocks = 0
try:
maf_reader = bx.align.maf.Reader( open( filename ) )
while True:
pos = maf_reader.file.tell()
block = maf_reader.next()
if block is None:
break
blocks += 1
for c in block.components:
spec = c.src
chrom = None
if "." in spec:
spec, chrom = spec.split( ".", 1 )
if spec not in species:
species.append( spec )
species_chromosomes[spec] = []
if chrom and chrom not in species_chromosomes[spec]:
species_chromosomes[spec].append( chrom )
if index_species is None or spec in index_species:
forward_strand_start = c.forward_strand_start
forward_strand_end = c.forward_strand_end
try:
forward_strand_start = int( forward_strand_start )
forward_strand_end = int( forward_strand_end )
except ValueError:
continue #start and end are not integers, can't add component to index, goto next component
#this likely only occurs when parse_e_rows is True?
#could a species exist as only e rows? should the
if forward_strand_end > forward_strand_start:
#require positive length; i.e. certain lines have start = end = 0 and cannot be indexed
indexes.add( c.src, forward_strand_start, forward_strand_end, pos, max=c.src_size )
except Exception, e:
#most likely a bad MAF
log.debug( 'Building MAF index on %s failed: %s' % ( filename, e ) )
return ( None, [], {}, 0 )
return ( indexes, species, species_chromosomes, blocks )
class Maf( Alignment ):
"""Class describing a Maf alignment"""
file_ext = "maf"
#Readonly and optional, users can't unset it, but if it is not set, we are generally ok; if required use a metadata validator in the tool definition
MetadataElement( name="blocks", default=0, desc="Number of blocks", readonly=True, optional=True, visible=False, no_value=0 )
MetadataElement( name="species_chromosomes", desc="Species Chromosomes", param=metadata.FileParameter, readonly=True, no_value=None, visible=False, optional=True )
MetadataElement( name="maf_index", desc="MAF Index File", param=metadata.FileParameter, readonly=True, no_value=None, visible=False, optional=True )
def init_meta( self, dataset, copy_from=None ):
Alignment.init_meta( self, dataset, copy_from=copy_from )
def set_meta( self, dataset, overwrite = True, **kwd ):
"""
Parses and sets species, chromosomes, index from MAF file.
"""
#these metadata values are not accessable by users, always overwrite
indexes, species, species_chromosomes, blocks = COPIED_build_maf_index_species_chromosomes( dataset.file_name )
if indexes is None:
return #this is not a MAF file
dataset.metadata.species = species
dataset.metadata.blocks = blocks
#write species chromosomes to a file
chrom_file = dataset.metadata.species_chromosomes
if not chrom_file:
chrom_file = dataset.metadata.spec['species_chromosomes'].param.new_file( dataset = dataset )
chrom_out = open( chrom_file.file_name, 'wb' )
for spec, chroms in species_chromosomes.items():
chrom_out.write( "%s\t%s\n" % ( spec, "\t".join( chroms ) ) )
chrom_out.close()
dataset.metadata.species_chromosomes = chrom_file
index_file = dataset.metadata.maf_index
if not index_file:
index_file = dataset.metadata.spec['maf_index'].param.new_file( dataset = dataset )
indexes.write( open( index_file.file_name, 'wb' ) )
dataset.metadata.maf_index = index_file
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
# The file must exist on disk for the get_file_peek() method
dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
if dataset.metadata.blocks:
dataset.blurb = "%s blocks" % util.commaify( str( dataset.metadata.blocks ) )
else:
# Number of blocks is not known ( this should not happen ), and auto-detect is
# needed to set metadata
dataset.blurb = "? blocks"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
"""Returns formated html of peek"""
return self.make_html_table( dataset )
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
try:
out.append('<tr><th>Species: ')
for species in dataset.metadata.species:
out.append( '%s ' % species )
out.append( '</th></tr>' )
if not dataset.peek:
dataset.set_peek()
data = dataset.peek
lines = data.splitlines()
for line in lines:
line = line.strip()
if not line:
continue
out.append( '<tr><td>%s</td></tr>' % escape( line ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % exc
return out
def sniff( self, filename ):
"""
Determines wether the file is in maf format
The .maf format is line-oriented. Each multiple alignment ends with a blank line.
Each sequence in an alignment is on a single line, which can get quite long, but
there is no length limit. Words in a line are delimited by any white space.
Lines starting with # are considered to be comments. Lines starting with ## can
be ignored by most programs, but contain meta-data of one form or another.
The first line of a .maf file begins with ##maf. This word is followed by white-space-separated
variable=value pairs. There should be no white space surrounding the "=".
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format5
>>> fname = get_test_fname( 'sequence.maf' )
>>> Maf().sniff( fname )
True
>>> fname = get_test_fname( 'sequence.fasta' )
>>> Maf().sniff( fname )
False
"""
headers = get_headers( filename, None )
try:
if len(headers) > 1 and headers[0][0] and headers[0][0] == "##maf":
return True
else:
return False
except:
return False
class Axt( data.Text ):
"""Class describing an axt alignment"""
# gvk- 11/19/09 - This is really an alignment, but we no longer have tools that use this data type, and it is
# here simply for backward compatibility ( although it is still in the datatypes registry ). Subclassing
# from data.Text eliminates managing metadata elements inherited from the Alignemnt class.
file_ext = "axt"
def sniff( self, filename ):
"""
Determines whether the file is in axt format
axt alignment files are produced from Blastz, an alignment tool available from Webb Miller's lab
at Penn State University.
Each alignment block in an axt file contains three lines: a summary line and 2 sequence lines.
Blocks are separated from one another by blank lines.
The summary line contains chromosomal position and size information about the alignment. It
consists of 9 required fields.
The sequence lines contain the sequence of the primary assembly (line 2) and aligning assembly
(line 3) with inserts. Repeats are indicated by lower-case letters.
For complete details see http://genome.ucsc.edu/goldenPath/help/axt.html
>>> fname = get_test_fname( 'alignment.axt' )
>>> Axt().sniff( fname )
True
>>> fname = get_test_fname( 'alignment.lav' )
>>> Axt().sniff( fname )
False
"""
headers = get_headers( filename, None )
if len(headers) < 4:
return False
for hdr in headers:
if len(hdr) > 0 and hdr[0].startswith("##matrix=axt"):
return True
if len(hdr) > 0 and not hdr[0].startswith("#"):
if len(hdr) != 9:
return False
try:
map ( int, [hdr[0], hdr[2], hdr[3], hdr[5], hdr[6], hdr[8]] )
except:
return False
if hdr[7] not in data.valid_strand:
return False
else:
return True
class Lav( data.Text ):
"""Class describing a LAV alignment"""
file_ext = "lav"
# gvk- 11/19/09 - This is really an alignment, but we no longer have tools that use this data type, and it is
# here simply for backward compatibility ( although it is still in the datatypes registry ). Subclassing
# from data.Text eliminates managing metadata elements inherited from the Alignemnt class.
def sniff( self, filename ):
"""
Determines whether the file is in lav format
LAV is an alignment format developed by Webb Miller's group. It is the primary output format for BLASTZ.
The first line of a .lav file begins with #:lav.
For complete details see http://www.bioperl.org/wiki/LAV_alignment_format
>>> fname = get_test_fname( 'alignment.lav' )
>>> Lav().sniff( fname )
True
>>> fname = get_test_fname( 'alignment.axt' )
>>> Lav().sniff( fname )
False
"""
headers = get_headers( filename, None )
try:
if len(headers) > 1 and headers[0][0] and headers[0][0].startswith('#:lav'):
return True
else:
return False
except:
return False
|
volpino/Yeps-EURAC
|
lib/galaxy/datatypes/sequence.py
|
Python
|
mit
| 21,504
|
[
"BLAST",
"BioPerl",
"Galaxy"
] |
f9d07a3b9fe9386efb4bb9b56b1f2f1f90cdbd0551221bae3cc4479ed68d55a2
|
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_female = (
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{prefix_female}} {{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}} {{suffix_female}}',
'{{prefix_female}} {{first_name_female}} {{last_name}} {{suffix_female}}')
formats_male = (
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{prefix_male}} {{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}} {{suffix_male}}',
'{{prefix_male}} {{first_name_male}} {{last_name}} {{suffix_male}}',
)
formats = formats_male + formats_female
first_names_female = (
'Aaliyah', 'Abagail', 'Abbey', 'Abbie', 'Abbigail', 'Abby', 'Abigail',
'Abigale', 'Abigayle', 'Abril', 'Achsah', 'Ada', 'Adah', 'Adaline',
'Adalyn', 'Adalynn', 'Adamaris', 'Adda', 'Addie', 'Addison', 'Addisyn',
'Addyson', 'Adel', 'Adela', 'Adelaide', 'Adele', 'Adelia', 'Adelina',
'Adeline', 'Adell', 'Adella', 'Adelle', 'Adelyn', 'Adelynn', 'Adilene',
'Adina', 'Adison', 'Adline', 'Adria', 'Adriana', 'Adriane', 'Adrianna',
'Adrianne', 'Adriene', 'Adrienne', 'Adyson', 'Affie', 'Afton', 'Agatha',
'Aggie', 'Agnes', 'Agness', 'Agusta', 'Aida', 'Aileen', 'Ailene',
'Aili', 'Aimee', 'Ainsley', 'Aisha', 'Aiyana', 'Aiyanna', 'Aja',
'Akeelah', 'Akira', 'Ala', 'Alabama', 'Alaina', 'Alana', 'Alani',
'Alanna', 'Alannah', 'Alaya', 'Alayna', 'Alba', 'Alberta', 'Albertha',
'Albertina', 'Albertine', 'Albina', 'Alcie', 'Alda', 'Aldona', 'Aleah',
'Alease', 'Alecia', 'Aleen', 'Aleena', 'Alejandra', 'Alena', 'Alene',
'Alesha', 'Alesia', 'Alessandra', 'Aleta', 'Aletha', 'Alethea', 'Alex',
'Alexa', 'Alexandr', 'Alexandra', 'Alexandrea', 'Alexandria', 'Alexia',
'Alexina', 'Alexis', 'Alexus', 'Alexys', 'Alfreda', 'Alia', 'Aliana',
'Alice', 'Alicia', 'Alida', 'Alina', 'Aline', 'Alisa', 'Alisha',
'Alison', 'Alissa', 'Alisson', 'Alivia', 'Aliya', 'Aliyah', 'Aliza',
'Alize', 'Alla', 'Allean', 'Alleen', 'Allena', 'Allene', 'Allie',
'Alline', 'Allison', 'Allisson', 'Ally', 'Allyson', 'Allyssa', 'Alma',
'Almeda', 'Almedia', 'Almeta', 'Almina', 'Almira', 'Almyra', 'Aloma',
'Alondra', 'Alpha', 'Alphonsine', 'Alta', 'Altha', 'Althea', 'Altie',
'Alvena', 'Alvera', 'Alverda', 'Alverta', 'Alvina', 'Alvira', 'Alwilda',
'Alwina', 'Alwine', 'Alyce', 'Alycia', 'Alys', 'Alysa', 'Alyse',
'Alysha', 'Alysia', 'Alyson', 'Alyssa', 'Alyssia', 'Alyvia', 'Alzina',
'Ama', 'Amalia', 'Amalie', 'Amanda', 'Amani', 'Amara', 'Amari',
'Amaris', 'Amaya', 'Amber', 'Amberly', 'Amelia', 'Amelie', 'America',
'Amey', 'Ami', 'Amiah', 'Amie', 'Amina', 'Amira', 'Amirah', 'Amiya',
'Amiyah', 'Amma', 'Ammie', 'Amparo', 'Amy', 'Amya', 'Ana', 'Anabel',
'Anabella', 'Anabelle', 'Anahi', 'Anais', 'Analia', 'Anastacia',
'Anastasia', 'Anaya', 'Andra', 'Andrea', 'Andria', 'Angel', 'Angela',
'Angele', 'Angeles', 'Angelia', 'Angelic', 'Angelica', 'Angelina',
'Angeline', 'Angelique', 'Angelita', 'Angella', 'Angie', 'Anice',
'Anie', 'Anika', 'Anissa', 'Anita', 'Anitra', 'Aniya', 'Aniyah',
'Anjali', 'Anjanette', 'Anjelica', 'Ann', 'Anna', 'Annabel', 'Annabell',
'Annabella', 'Annabelle', 'Annalise', 'Annamae', 'Annamarie', 'Anne',
'Anneliese', 'Annemarie', 'Anner', 'Annetta', 'Annette', 'Annice',
'Annie', 'Annika', 'Annis', 'Annmarie', 'Anona', 'Ansley', 'Antionette',
'Antoinette', 'Antonetta', 'Antonette', 'Antonia', 'Antonina', 'Anya',
'April', 'Ara', 'Arabella', 'Araceli', 'Aracely', 'Arah', 'Araminta',
'Ardath', 'Ardelia', 'Ardell', 'Ardella', 'Ardelle', 'Arden', 'Ardeth',
'Ardis', 'Ardith', 'Ardyce', 'Areli', 'Arely', 'Aretha', 'Argie',
'Aria', 'Ariana', 'Ariane', 'Arianna', 'Arie', 'Ariel', 'Ariella',
'Arielle', 'Arietta', 'Arizona', 'Arkie', 'Arla', 'Arleen', 'Arlena',
'Arlene', 'Arleth', 'Arletta', 'Arley', 'Arlie', 'Arline', 'Arly',
'Arlyne', 'Armani', 'Armida', 'Arminda', 'Arminta', 'Arnetta', 'Arra',
'Arrie', 'Arta', 'Artelia', 'Arvilla', 'Aryana', 'Aryanna', 'Asha',
'Ashanti', 'Ashely', 'Ashlea', 'Ashlee', 'Ashleigh', 'Ashley', 'Ashli',
'Ashlie', 'Ashly', 'Ashlyn', 'Ashlynn', 'Ashtyn', 'Asia', 'Ason',
'Aspen', 'Assunta', 'Astrid', 'Atha', 'Athena', 'Attie', 'Aubree',
'Aubrey', 'Aubrie', 'Audie', 'Audra', 'Audrey', 'Audriana', 'Audrianna',
'Audrina', 'Audry', 'Augusta', 'Augustina', 'Aura', 'Aurelia',
'Aurilla', 'Aurora', 'Aurore', 'Autumn', 'Ava', 'Avah', 'Averi',
'Averie', 'Avie', 'Avis', 'Ayana', 'Ayanna', 'Ayesha', 'Ayla', 'Ayleen',
'Aylin', 'Azalee', 'Azaria', 'Azariah', 'Azul', 'Azzie', 'Babette',
'Baby', 'Bailee', 'Bailey', 'Bama', 'Bambi', 'Barb', 'Barbara',
'Barbie', 'Barbra', 'Baylee', 'Baylie', 'Bea', 'Beadie', 'Beatrice',
'Beatrix', 'Beatriz', 'Beaulah', 'Bebe', 'Beckie', 'Becky', 'Beda',
'Bee', 'Belen', 'Belia', 'Belinda', 'Bell', 'Bella', 'Belle', 'Belva',
'Bena', 'Benita', 'Bennie', 'Berdie', 'Berenice', 'Bernadette',
'Bernadine', 'Bernardine', 'Berneice', 'Bernetta', 'Bernice',
'Berniece', 'Bernita', 'Berta', 'Bertha', 'Bertie', 'Bertina', 'Beryl',
'Bess', 'Besse', 'Bessie', 'Beth', 'Betha', 'Bethann', 'Bethany',
'Bethel', 'Bethzy', 'Betsey', 'Betsy', 'Bette', 'Bettie', 'Bettina',
'Betty', 'Bettye', 'Bettyjane', 'Bettylou', 'Beula', 'Beulah', 'Bev',
'Beverlee', 'Beverley', 'Beverly', 'Beyonce', 'Bianca', 'Biddie',
'Billie', 'Billy', 'Billye', 'Bina', 'Bird', 'Birdella', 'Birdie',
'Birtha', 'Birtie', 'Blair', 'Blake', 'Blanca', 'Blanch', 'Blanche',
'Blanchie', 'Blossom', 'Bobbi', 'Bobbie', 'Bobby', 'Bobbye', 'Bonita',
'Bonnie', 'Bonny', 'Braelyn', 'Brande', 'Brandee', 'Brandi', 'Brandie',
'Brandon', 'Brandy', 'Brea', 'Breana', 'Breann', 'Breanna', 'Breanne',
'Bree', 'Brenda', 'Brenna', 'Breonna', 'Brett', 'Bria', 'Briana',
'Brianda', 'Brianna', 'Brianne', 'Bridget', 'Bridgett', 'Bridgette',
'Brielle', 'Brigette', 'Brigid', 'Brigitte', 'Briley', 'Brinda',
'Brinley', 'Brionna', 'Brisa', 'Bristol', 'Britany', 'Britney',
'Britni', 'Britny', 'Britt', 'Britta', 'Brittaney', 'Brittani',
'Brittanie', 'Brittany', 'Brittnay', 'Brittnee', 'Brittney', 'Brittni',
'Brittnie', 'Brittny', 'Brook', 'Brooke', 'Brooklyn', 'Brooklynn',
'Bryana', 'Bryanna', 'Brylee', 'Bryn', 'Brynlee', 'Brynn', 'Buelah',
'Buena', 'Buffy', 'Bula', 'Bulah', 'Buna', 'Burnice', 'Byrd', 'Byrdie',
'Caddie', 'Cadence', 'Cailyn', 'Caitlin', 'Caitlyn', 'Caitlynn',
'Caldonia', 'Caleigh', 'Cali', 'Calista', 'Calla', 'Calleigh', 'Callie',
'Cambria', 'Cameron', 'Cami', 'Camila', 'Camilla', 'Camille', 'Camisha',
'Cammie', 'Campbell', 'Camryn', 'Candace', 'Candi', 'Candice',
'Candida', 'Candis', 'Candy', 'Candyce', 'Cannie', 'Capitola', 'Cappie',
'Caprice', 'Cara', 'Caren', 'Carey', 'Cari', 'Carie', 'Carin', 'Carina',
'Carisa', 'Carissa', 'Carla', 'Carlee', 'Carleen', 'Carleigh',
'Carlene', 'Carley', 'Carli', 'Carlie', 'Carlota', 'Carlotta', 'Carly',
'Carlyn', 'Carma', 'Carmel', 'Carmela', 'Carmelita', 'Carmella',
'Carmen', 'Caro', 'Carol', 'Carolann', 'Carole', 'Carolee', 'Carolina',
'Caroline', 'Carolyn', 'Carolyne', 'Carolynn', 'Caron', 'Carra',
'Carri', 'Carrie', 'Carrol', 'Carroll', 'Carry', 'Carson', 'Cary',
'Caryl', 'Caryn', 'Casandra', 'Casey', 'Casie', 'Cassandra', 'Cassidy',
'Cassie', 'Cassondra', 'Catalina', 'Catharine', 'Catherine', 'Cathern',
'Cathey', 'Cathi', 'Cathie', 'Cathleen', 'Cathrine', 'Cathryn', 'Cathy',
'Catina', 'Catrina', 'Caydence', 'Cayla', 'Caylee', 'Cecelia', 'Cecile',
'Cecilia', 'Cecily', 'Ceil', 'Celena', 'Celesta', 'Celeste', 'Celestia',
'Celestine', 'Celia', 'Celie', 'Celina', 'Celine', 'Cena', 'Ceola',
'Chaka', 'Chana', 'Chanda', 'Chandler', 'Chandra', 'Chanel', 'Chanelle',
'Chaney', 'Chanie', 'Channie', 'Channing', 'Chantal', 'Chante',
'Chantel', 'Chantelle', 'Charissa', 'Charisse', 'Charity', 'Charla',
'Charlee', 'Charleen', 'Charlene', 'Charley', 'Charlie', 'Charline',
'Charlize', 'Charlotta', 'Charlotte', 'Charlottie', 'Charlsie',
'Charmaine', 'Charolette', 'Chase', 'Chasity', 'Chastity', 'Chaya',
'Chelsea', 'Chelsey', 'Chelsi', 'Chelsie', 'Chelsy', 'Cher', 'Cherelle',
'Cheri', 'Cherie', 'Cherilyn', 'Cherise', 'Cherish', 'Cherrelle',
'Cherri', 'Cherrie', 'Cherry', 'Cherryl', 'Cheryl', 'Cheryle',
'Cheryll', 'Chessie', 'Chestina', 'Cheyanne', 'Cheyenne', 'Chimere',
'China', 'Chiquita', 'Chloe', 'Chloie', 'Chris', 'Chrissie', 'Chrissy',
'Christa', 'Christal', 'Christeen', 'Christel', 'Christen', 'Christena',
'Christene', 'Christi', 'Christian', 'Christiana', 'Christie',
'Christin', 'Christina', 'Christine', 'Christy', 'Chrystal', 'Chyna',
'Chynna', 'Ciara', 'Ciarra', 'Cicely', 'Cielo', 'Ciera', 'Cierra',
'Ciji', 'Cilla', 'Cinda', 'Cindi', 'Cindy', 'Cinnamon', 'Cinthia',
'Citlali', 'Citlalli', 'Clair', 'Claire', 'Clara', 'Clarabelle',
'Clare', 'Claribel', 'Clarice', 'Clarinda', 'Clarine', 'Clarisa',
'Clarissa', 'Classie', 'Claudette', 'Claudia', 'Claudie', 'Claudine',
'Cleda', 'Clella', 'Clem', 'Clemence', 'Clementina', 'Clementine',
'Clemie', 'Clemma', 'Clemmie', 'Cleo', 'Cleola', 'Cleone', 'Cleora',
'Cleta', 'Cleva', 'Clevie', 'Cliffie', 'Cloe', 'Clora', 'Clotilda',
'Clotilde', 'Clyda', 'Clydie', 'Clytie', 'Coleen', 'Coletta', 'Colette',
'Colleen', 'Collette', 'Columbia', 'Concepcion', 'Concetta', 'Concha',
'Connie', 'Constance', 'Consuela', 'Consuelo', 'Contina', 'Cora',
'Coraima', 'Coral', 'Coralie', 'Corda', 'Cordelia', 'Cordella',
'Cordia', 'Cordie', 'Corean', 'Corene', 'Coretta', 'Corey', 'Cori',
'Corie', 'Corina', 'Corine', 'Corinna', 'Corinne', 'Corliss',
'Cornelia', 'Cornie', 'Corrie', 'Corrina', 'Corrine', 'Cortney', 'Cory',
'Courtney', 'Creola', 'Cressie', 'Crete', 'Crissie', 'Crissy', 'Crista',
'Cristal', 'Cristen', 'Cristi', 'Cristin', 'Cristina', 'Cristine',
'Cristy', 'Cruz', 'Crysta', 'Crystal', 'Cuba', 'Cydney', 'Cyndi',
'Cyntha', 'Cynthia', 'Dafne', 'Dagmar', 'Dagny', 'Dahlia', 'Daija',
'Daijah', 'Daisey', 'Daisha', 'Daisie', 'Daisy', 'Daisye', 'Daja',
'Dakota', 'Dale', 'Dalia', 'Dallas', 'Damaris', 'Dana', 'Danae',
'Daneen', 'Danelle', 'Danette', 'Dani', 'Dania', 'Danica', 'Daniela',
'Daniele', 'Daniella', 'Danielle', 'Danika', 'Danita', 'Danna',
'Dannie', 'Dannielle', 'Danyel', 'Danyell', 'Danyelle', 'Daphne',
'Dara', 'Darby', 'Darci', 'Darcie', 'Darcy', 'Daria', 'Darian',
'Dariana', 'Darla', 'Darleen', 'Darlene', 'Darline', 'Darlyne', 'Dasia',
'Davina', 'Dawn', 'Dawna', 'Dawne', 'Dayami', 'Dayana', 'Dayanara',
'Dayle', 'Dayna', 'Dayse', 'Deana', 'Deandra', 'Deann', 'Deanna',
'Deanne', 'Deasia', 'Deb', 'Debbi', 'Debbie', 'Debbra', 'Debby',
'Debera', 'Debi', 'Debora', 'Deborah', 'Deborrah', 'Debra', 'Debrah',
'Debroah', 'Dedra', 'Dee', 'Deeann', 'Deedee', 'Deena', 'Deetta',
'Deidra', 'Deidre', 'Deirdre', 'Deja', 'Dejah', 'Delaney', 'Delcie',
'Delfina', 'Delia', 'Deliah', 'Delila', 'Delilah', 'Delina', 'Delinda',
'Delisa', 'Dell', 'Della', 'Dellar', 'Delle', 'Dellia', 'Dellie',
'Delma', 'Delois', 'Delora', 'Delores', 'Deloris', 'Delpha', 'Delphia',
'Delphine', 'Delsie', 'Delta', 'Dema', 'Demetra', 'Demetria', 'Demi',
'Dena', 'Deneen', 'Denese', 'Denice', 'Denine', 'Denise', 'Denisha',
'Denisse', 'Denita', 'Dennie', 'Desirae', 'Desiree', 'Dessa', 'Dessie',
'Destany', 'Destinee', 'Destiney', 'Destini', 'Destiny', 'Devan',
'Devin', 'Devon', 'Devyn', 'Dewey', 'Deyanira', 'Dezzie', 'Diamond',
'Dian', 'Diana', 'Diandra', 'Diane', 'Diann', 'Dianna', 'Dianne',
'Dicie', 'Dicy', 'Dillie', 'Dimple', 'Dina', 'Dinah', 'Dione', 'Dionne',
'Dixie', 'Diya', 'Djuana', 'Djuna', 'Docia', 'Dola', 'Dollie', 'Dolly',
'Dollye', 'Dolores', 'Doloris', 'Domenica', 'Dominga', 'Dominique',
'Dominque', 'Domonique', 'Dona', 'Donia', 'Donie', 'Donita', 'Donna',
'Donnie', 'Dora', 'Dorathea', 'Dorathy', 'Dorcas', 'Doreen', 'Dorene',
'Doretha', 'Doretta', 'Dori', 'Dorinda', 'Dorine', 'Doris', 'Dorla',
'Dorotha', 'Dorothea', 'Dorothy', 'Dorris', 'Dortha', 'Dorthea',
'Dorthey', 'Dorthy', 'Dosha', 'Doshia', 'Doshie', 'Dosia', 'Dossie',
'Dot', 'Dottie', 'Dotty', 'Dove', 'Dovie', 'Drema', 'Drew', 'Drucilla',
'Drusilla', 'Dulce', 'Dulcie', 'Dusty', 'Dwan', 'Dyan', 'Dylan',
'Earlean', 'Earlene', 'Earlie', 'Earline', 'Earnestine', 'Eartha',
'Easter', 'Eathel', 'Ebba', 'Eboni', 'Ebony', 'Echo', 'Eda', 'Eddie',
'Eden', 'Edie', 'Edith', 'Edla', 'Edmonia', 'Edna', 'Ednah', 'Edra',
'Edrie', 'Edris', 'Edwina', 'Edyth', 'Edythe', 'Effa', 'Effie',
'Eileen', 'Eithel', 'Ela', 'Elaina', 'Elaine', 'Elana', 'Elayne',
'Elba', 'Elberta', 'Elda', 'Eldora', 'Eleanor', 'Eleanora', 'Eleanore',
'Elease', 'Electa', 'Elena', 'Elenor', 'Elenora', 'Elenore', 'Eleonora',
'Eleonore', 'Elfie', 'Elfreda', 'Elfrieda', 'Elgie', 'Elia', 'Eliana',
'Elianna', 'Elida', 'Elinor', 'Elinore', 'Elisa', 'Elisabeth', 'Elise',
'Elisha', 'Elissa', 'Eliza', 'Elizabet', 'Elizabeth', 'Elizbeth',
'Elizebeth', 'Ella', 'Ellamae', 'Ellar', 'Elle', 'Ellen', 'Eller',
'Elliana', 'Ellie', 'Ellyn', 'Elma', 'Elmina', 'Elmira', 'Elmire',
'Elmyra', 'Elna', 'Elnora', 'Elodie', 'Elois', 'Eloisa', 'Eloise',
'Elouise', 'Elsa', 'Else', 'Elsie', 'Elta', 'Elva', 'Elvera', 'Elvia',
'Elvie', 'Elvina', 'Elvira', 'Elwanda', 'Elyse', 'Elyssa', 'Elza',
'Elzada', 'Ema', 'Emaline', 'Ember', 'Emelia', 'Emelie', 'Emeline',
'Emely', 'Emerald', 'Emerson', 'Emery', 'Emilee', 'Emilia', 'Emilie',
'Emily', 'Emma', 'Emmalee', 'Emmaline', 'Emmer', 'Emmie', 'Emmy',
'Emogene', 'Ena', 'Enid', 'Enola', 'Enriqueta', 'Eola', 'Eppie',
'Epsie', 'Era', 'Erica', 'Ericka', 'Erie', 'Erika', 'Erin', 'Eris',
'Erla', 'Erlene', 'Erlinda', 'Erline', 'Erma', 'Ermina', 'Ermine',
'Erna', 'Ernestina', 'Ernestine', 'Erykah', 'Eryn', 'Esmeralda',
'Esperanza', 'Essa', 'Essence', 'Essie', 'Esta', 'Estefani',
'Estefania', 'Estefany', 'Estela', 'Estell', 'Estella', 'Estelle',
'Ester', 'Esther', 'Estie', 'Estrella', 'Etha', 'Ethel', 'Ethelene',
'Ethelyn', 'Ether', 'Ethie', 'Ethyl', 'Ethyle', 'Etna', 'Etta', 'Etter',
'Ettie', 'Eudora', 'Eugenia', 'Eugenie', 'Eula', 'Eulah', 'Eulalia',
'Eulalie', 'Euna', 'Eunice', 'Euphemia', 'Eura', 'Eva', 'Evalena',
'Evaline', 'Evalyn', 'Evangelina', 'Evangeline', 'Eve', 'Evelena',
'Evelin', 'Evelina', 'Eveline', 'Evelyn', 'Evelyne', 'Evelynn', 'Ever',
'Evette', 'Evia', 'Evie', 'Evita', 'Evon', 'Evonne', 'Exa', 'Exie',
'Fabiola', 'Fae', 'Fairy', 'Faith', 'Fallon', 'Falon', 'Fannie',
'Fanny', 'Fannye', 'Farah', 'Farrah', 'Fatima', 'Fawn', 'Fay', 'Faye',
'Felecia', 'Felice', 'Felicia', 'Felicie', 'Felicitas', 'Felicity',
'Felipa', 'Felisha', 'Fern', 'Fernanda', 'Ferne', 'Fidelia', 'Filomena',
'Finley', 'Fiona', 'Flavia', 'Fleda', 'Fleeta', 'Fleta', 'Flo',
'Flonnie', 'Flor', 'Flora', 'Florance', 'Florence', 'Florene',
'Floretta', 'Florida', 'Florie', 'Florine', 'Florrie', 'Flossie',
'Floy', 'Fonda', 'Forest', 'Fran', 'Franc', 'Frances', 'Francesca',
'Francies', 'Francina', 'Francine', 'Francis', 'Francisca',
'Francisquita', 'Frankie', 'Freda', 'Freddie', 'Frederica',
'Fredericka', 'Freeda', 'Freida', 'Frida', 'Frieda', 'Frona', 'Fronia',
'Fronie', 'Fronnie', 'Fumiko', 'Gabriela', 'Gabriella', 'Gabrielle',
'Gail', 'Gale', 'Galilea', 'Garnet', 'Garnett', 'Gay', 'Gaye', 'Gayla',
'Gayle', 'Gaylene', 'Gaynell', 'Gearldine', 'Gemma', 'Gena', 'Gene',
'Genesis', 'Geneva', 'Genevieve', 'Genevra', 'Genie', 'Gennie',
'Genoveva', 'Georganna', 'Georgeann', 'Georgeanna', 'Georgene',
'Georgetta', 'Georgette', 'Georgia', 'Georgiana', 'Georgiann',
'Georgianna', 'Georgie', 'Georgina', 'Georgine', 'Geraldine', 'Geralyn',
'Gerda', 'Geri', 'Germaine', 'Gerri', 'Gerry', 'Gertha', 'Gertie',
'Gertrude', 'Gia', 'Giada', 'Giana', 'Gianna', 'Gidget', 'Gigi',
'Gilda', 'Gillian', 'Gillie', 'Gina', 'Ginger', 'Ginny', 'Giovanna',
'Girtha', 'Gisele', 'Giselle', 'Gisselle', 'Giuliana', 'Gladis',
'Gladyce', 'Gladys', 'Glenda', 'Glendora', 'Glenn', 'Glenna', 'Glennie',
'Glennis', 'Glinda', 'Gloria', 'Glynda', 'Glynis', 'Golda', 'Golden',
'Goldia', 'Goldie', 'Grace', 'Gracelyn', 'Gracia', 'Gracie', 'Graciela',
'Grayce', 'Grecia', 'Gregoria', 'Greta', 'Gretchen', 'Gretta', 'Grisel',
'Griselda', 'Guadalupe', 'Gunda', 'Gussie', 'Gusta', 'Gustie', 'Gwen',
'Gwenda', 'Gwendolyn', 'Gwyn', 'Gwyneth', 'Hadassah', 'Hadley',
'Hailee', 'Hailey', 'Hailie', 'Haleigh', 'Haley', 'Hali', 'Halie',
'Halle', 'Halley', 'Hallie', 'Hana', 'Hanna', 'Hannah', 'Harlene',
'Harley', 'Harlow', 'Harmony', 'Harper', 'Harriet', 'Harriett',
'Harriette', 'Haruko', 'Hasel', 'Hassie', 'Hattie', 'Haven', 'Hayden',
'Haylee', 'Hayleigh', 'Hayley', 'Haylie', 'Hazel', 'Hazelle', 'Hazle',
'Heather', 'Heaven', 'Hedwig', 'Hedy', 'Heidi', 'Heidy', 'Helaine',
'Helen', 'Helena', 'Helene', 'Helga', 'Hellen', 'Helma', 'Helyn',
'Hennie', 'Henretta', 'Henrietta', 'Henriette', 'Herlinda', 'Herma',
'Hermina', 'Hermine', 'Herminia', 'Hertha', 'Hessie', 'Hester',
'Hettie', 'Hetty', 'Hilah', 'Hilary', 'Hilda', 'Hildegard',
'Hildegarde', 'Hildred', 'Hildur', 'Hillary', 'Hilma', 'Holli',
'Hollie', 'Hollis', 'Holly', 'Honora', 'Hope', 'Hortencia', 'Hortense',
'Hortensia', 'Hulda', 'Huldah', 'Hunter', 'Ica', 'Icey', 'Icie', 'Icy',
'Ida', 'Idabelle', 'Idamae', 'Idell', 'Idella', 'Iesha', 'Ieshia',
'Ila', 'Ilah', 'Ilda', 'Ilene', 'Iliana', 'Illa', 'Ilma', 'Ilo',
'Ilona', 'Ima', 'Imani', 'Imelda', 'Imo', 'Imogene', 'Ina', 'India',
'Indiana', 'Inell', 'Ines', 'Inez', 'Infant', 'Inga', 'Ingeborg',
'Inger', 'Ingrid', 'Iola', 'Iona', 'Ione', 'Ira', 'Ireland', 'Irena',
'Irene', 'Iridian', 'Irine', 'Iris', 'Irma', 'Irva', 'Isa', 'Isabel',
'Isabela', 'Isabell', 'Isabella', 'Isabelle', 'Isadora', 'Isamar',
'Isis', 'Isla', 'Isobel', 'Itzel', 'Iva', 'Ivah', 'Ivana', 'Ivanna',
'Ivette', 'Ivey', 'Ivie', 'Ivonne', 'Ivory', 'Ivy', 'Iyana', 'Iyanna',
'Iza', 'Izabella', 'Izabelle', 'Izetta', 'Izola', 'Izora', 'Jacalyn',
'Jacey', 'Jackeline', 'Jacki', 'Jackie', 'Jacklyn', 'Jaclyn', 'Jacque',
'Jacquelin', 'Jacqueline', 'Jacquelyn', 'Jacquline', 'Jacqulyn', 'Jada',
'Jade', 'Jaden', 'Jadyn', 'Jaeda', 'Jaelyn', 'Jaelynn', 'Jaida',
'Jaiden', 'Jaidyn', 'Jailene', 'Jailyn', 'Jaime', 'Jaimee', 'Jakayla',
'Jaleesa', 'Jalisa', 'Jalissa', 'Jaliyah', 'Jalyn', 'Jalynn', 'Jamey',
'Jami', 'Jamie', 'Jamila', 'Jamiya', 'Jammie', 'Jamya', 'Jan', 'Jana',
'Janae', 'Janay', 'Jane', 'Janeen', 'Janel', 'Janell', 'Janelle',
'Janene', 'Janessa', 'Janet', 'Janette', 'Janey', 'Janiah', 'Janice',
'Janie', 'Janine', 'Janis', 'Janiya', 'Janiyah', 'Jann', 'Janna',
'Jannette', 'Jannie', 'January', 'Janyce', 'Jaquelin', 'Jaqueline',
'Jaslene', 'Jaslyn', 'Jasmin', 'Jasmine', 'Jasmyn', 'Jasmyne',
'Jaunita', 'Jaycee', 'Jaycie', 'Jayda', 'Jayde', 'Jayden', 'Jaye',
'Jayla', 'Jaylah', 'Jaylee', 'Jayleen', 'Jaylen', 'Jaylene', 'Jaylin',
'Jaylyn', 'Jaylynn', 'Jayme', 'Jayne', 'Jazlene', 'Jazlyn', 'Jazlynn',
'Jazmin', 'Jazmine', 'Jazmyn', 'Jazmyne', 'Jean', 'Jeana', 'Jeane',
'Jeanetta', 'Jeanette', 'Jeanie', 'Jeanine', 'Jeanmarie', 'Jeanna',
'Jeanne', 'Jeannette', 'Jeannie', 'Jeannine', 'Jeffie', 'Jemima',
'Jena', 'Jenelle', 'Jenifer', 'Jenilee', 'Jenna', 'Jennette', 'Jenni',
'Jennie', 'Jennifer', 'Jenniffer', 'Jenny', 'Jensen', 'Jeraldine',
'Jeri', 'Jerica', 'Jerilyn', 'Jerilynn', 'Jerri', 'Jerrica', 'Jerrie',
'Jerrilyn', 'Jerusha', 'Jeryl', 'Jesenia', 'Jesica', 'Jesse',
'Jessenia', 'Jessi', 'Jessica', 'Jessie', 'Jessika', 'Jessye', 'Jetta',
'Jettie', 'Jewel', 'Jewell', 'Jill', 'Jillian', 'Jimena', 'Jinnie',
'Jo', 'Joan', 'Joana', 'Joanie', 'Joann', 'Joanna', 'Joanne', 'Jocelyn',
'Jocelyne', 'Jocelynn', 'Jodi', 'Jodie', 'Jody', 'Joell', 'Joella',
'Joelle', 'Joellen', 'Joetta', 'Joette', 'Johana', 'Johanna',
'Johannah', 'Johnie', 'Johnna', 'Johnnie', 'Joi', 'Joleen', 'Jolene',
'Jolette', 'Jolie', 'Joline', 'Jonell', 'Joni', 'Jonna', 'Jonnie',
'Jordan', 'Jordin', 'Jordyn', 'Joretta', 'Jorja', 'Josefa', 'Josefina',
'Josefita', 'Joselin', 'Joseline', 'Joselyn', 'Josephine', 'Josette',
'Josie', 'Josiephine', 'Joslyn', 'Jossie', 'Journey', 'Jovita', 'Joy',
'Joyce', 'Joycelyn', 'Joye', 'Juana', 'Juanita', 'Judi', 'Judie',
'Judith', 'Judy', 'Judyth', 'Jule', 'Juli', 'Julia', 'Juliana',
'Juliann', 'Julianna', 'Julianne', 'Julie', 'Juliet', 'Juliette',
'Julisa', 'Julissa', 'June', 'Junia', 'Junie', 'Justice', 'Justina',
'Justine', 'Kaaren', 'Kacey', 'Kaci', 'Kacie', 'Kacy', 'Kadence',
'Kadijah', 'Kaela', 'Kaelyn', 'Kaelynn', 'Kaia', 'Kaila', 'Kailee',
'Kailey', 'Kailyn', 'Kaitlin', 'Kaitlyn', 'Kaitlynn', 'Kaiya', 'Kala',
'Kaleena', 'Kaleigh', 'Kalene', 'Kaley', 'Kali', 'Kalie', 'Kaliyah',
'Kallie', 'Kalyn', 'Kamari', 'Kameron', 'Kami', 'Kamila', 'Kamilah',
'Kamora', 'Kamryn', 'Kamya', 'Kandace', 'Kandi', 'Kandice', 'Kandy',
'Kanesha', 'Kanisha', 'Kara', 'Karan', 'Karel', 'Karen', 'Kari',
'Karie', 'Karin', 'Karina', 'Karis', 'Karissa', 'Karla', 'Karlee',
'Karlene', 'Karley', 'Karli', 'Karlie', 'Karly', 'Karma', 'Karol',
'Karolyn', 'Karon', 'Karren', 'Karri', 'Karrie', 'Karsyn', 'Karyl',
'Karyme', 'Karyn', 'Kasandra', 'Kasey', 'Kasie', 'Kassandra', 'Kassidy',
'Kassie', 'Katarina', 'Kate', 'Katelin', 'Katelyn', 'Katelynn',
'Katerina', 'Kathaleen', 'Katharina', 'Katharine', 'Katharyn',
'Katherin', 'Katherine', 'Kathern', 'Katheryn', 'Kathey', 'Kathi',
'Kathie', 'Kathleen', 'Kathlene', 'Kathlyn', 'Kathrine', 'Kathryn',
'Kathryne', 'Kathy', 'Kathyrn', 'Kati', 'Katia', 'Katie', 'Katina',
'Katlin', 'Katlyn', 'Katlynn', 'Katrina', 'Kattie', 'Katy', 'Kay',
'Kaya', 'Kaycee', 'Kayden', 'Kaydence', 'Kaye', 'Kayla', 'Kaylah',
'Kaylan', 'Kaylee', 'Kayleen', 'Kayleigh', 'Kaylen', 'Kaylene',
'Kayley', 'Kayli', 'Kaylie', 'Kaylin', 'Kaylyn', 'Kaylynn', 'Kazuko',
'Keanna', 'Keara', 'Kecia', 'Keeley', 'Keely', 'Keena', 'Keesha',
'Keila', 'Keira', 'Keisha', 'Kelcie', 'Keli', 'Kelis', 'Kellee',
'Kelley', 'Kelli', 'Kellie', 'Kelly', 'Kelsea', 'Kelsey', 'Kelsi',
'Kelsie', 'Kendal', 'Kendall', 'Kendra', 'Kenia', 'Kenisha', 'Kenley',
'Kenna', 'Kennedi', 'Kennedy', 'Kenya', 'Kenyatta', 'Kenzie', 'Keri',
'Kerri', 'Kerrie', 'Kerry', 'Kesha', 'Keshia', 'Keyla', 'Khadijah',
'Khalilah', 'Khloe', 'Kia', 'Kiana', 'Kianna', 'Kiara', 'Kiarra',
'Kiera', 'Kierra', 'Kiersten', 'Kiley', 'Kim', 'Kimber', 'Kimberely',
'Kimberlee', 'Kimberley', 'Kimberli', 'Kimberlie', 'Kimberly', 'Kimora',
'Kindra', 'Kinley', 'Kinsey', 'Kinsley', 'Kira', 'Kirsten', 'Kirstie',
'Kirstin', 'Kisha', 'Kittie', 'Kitty', 'Kiya', 'Kiyoko', 'Kizzie',
'Kizzy', 'Kloe', 'Kori', 'Kortney', 'Kourtney', 'Kris', 'Krissy',
'Krista', 'Kristal', 'Kristan', 'Kristen', 'Kristi', 'Kristian',
'Kristie', 'Kristin', 'Kristina', 'Kristine', 'Kristy', 'Kristyn',
'Krysta', 'Krystal', 'Krysten', 'Krystin', 'Krystina', 'Krystle', 'Kya',
'Kyara', 'Kyla', 'Kylah', 'Kyle', 'Kylee', 'Kyleigh', 'Kylene', 'Kylie',
'Kyra', 'Kyrie', 'Lacey', 'Laci', 'Lacie', 'Lacy', 'Ladonna', 'Lady',
'Lahoma', 'Laila', 'Lailah', 'Lainey', 'Laisha', 'Lakeisha', 'Laken',
'Lakendra', 'Lakesha', 'Lakeshia', 'Lakisha', 'Lala', 'Lalla', 'Lana',
'Lanette', 'Laney', 'Lani', 'Lanie', 'Lanita', 'Lannie', 'Laquita',
'Lara', 'Larae', 'Laraine', 'Larissa', 'Larue', 'Lashanda', 'Lashawn',
'Lashonda', 'Lashunda', 'Lasonya', 'Lassie', 'Latanya', 'Latarsha',
'Latasha', 'Latesha', 'Latifah', 'Latisha', 'Latonia', 'Latonya',
'Latoria', 'Latosha', 'Latoya', 'Latoyia', 'Latrice', 'Latricia',
'Latrina', 'Launa', 'Laura', 'Laureen', 'Laurel', 'Lauren', 'Laurene',
'Lauretta', 'Laurette', 'Lauri', 'Laurie', 'Laurine', 'Lauryn',
'Lavada', 'Lavelle', 'Lavenia', 'Lavera', 'Lavern', 'Laverna',
'Laverne', 'Lavina', 'Lavinia', 'Lavon', 'Lavona', 'Lavonda', 'Lavonia',
'Lavonne', 'Lawanda', 'Layla', 'Laylah', 'Lea', 'Leafy', 'Leah',
'Leala', 'Leana', 'Leandra', 'Leaner', 'Leann', 'Leanna', 'Leanne',
'Leatha', 'Leatrice', 'Leda', 'Lee', 'Leeann', 'Leesa', 'Leia', 'Leigh',
'Leighton', 'Leila', 'Leilani', 'Leisa', 'Leisha', 'Leitha', 'Lela',
'Lelah', 'Lelar', 'Lelia', 'Lella', 'Lemma', 'Lempi', 'Lena', 'Lenna',
'Lennie', 'Lenora', 'Lenore', 'Leola', 'Leoma', 'Leona', 'Leone',
'Leonia', 'Leonie', 'Leonor', 'Leonora', 'Leonore', 'Leontine', 'Leora',
'Leota', 'Lera', 'Lesa', 'Lesia', 'Leslee', 'Lesley', 'Lesli', 'Leslie',
'Lesly', 'Lessie', 'Lesta', 'Leta', 'Letha', 'Lethia', 'Leticia',
'Letitia', 'Letta', 'Lettie', 'Letty', 'Leva', 'Levina', 'Lexi',
'Lexie', 'Lexis', 'Lexus', 'Leyla', 'Lia', 'Liana', 'Liane', 'Libbie',
'Libby', 'Liberty', 'Lida', 'Liddie', 'Lidia', 'Lidie', 'Lila', 'Lilah',
'Lilia', 'Lilian', 'Liliana', 'Lilianna', 'Lilie', 'Lilla', 'Liller',
'Lillia', 'Lillian', 'Lilliana', 'Lillianna', 'Lillie', 'Lillis',
'Lilly', 'Lily', 'Lilyan', 'Lilyana', 'Lilyanna', 'Lina', 'Linda',
'Lindsay', 'Lindsey', 'Lindy', 'Linette', 'Linna', 'Linnea', 'Linnie',
'Linsey', 'Lisa', 'Lisbeth', 'Lise', 'Lisette', 'Lisha', 'Lissa',
'Lissette', 'Lissie', 'Lita', 'Litha', 'Littie', 'Litzy', 'Livia',
'Liz', 'Liza', 'Lizabeth', 'Lizbeth', 'Lizeth', 'Lizette', 'Lizzie',
'Lockie', 'Loda', 'Logan', 'Lois', 'Lola', 'Lolita', 'Lolla', 'Lollie',
'Loma', 'Lona', 'London', 'Londyn', 'Loni', 'Lonie', 'Lonna', 'Lonnie',
'Lora', 'Loraine', 'Lorayne', 'Lorean', 'Loree', 'Loreen', 'Lorelai',
'Lorelei', 'Loren', 'Lorena', 'Lorene', 'Lorenza', 'Loretta', 'Loretto',
'Lori', 'Loria', 'Loriann', 'Lorie', 'Lorinda', 'Lorine', 'Loris',
'Lorna', 'Lorraine', 'Lorrayne', 'Lorri', 'Lorrie', 'Lossie', 'Lota',
'Lotta', 'Lottie', 'Lou', 'Louann', 'Louanna', 'Louella', 'Louetta',
'Louie', 'Louisa', 'Louise', 'Louisiana', 'Loula', 'Lourdes',
'Louvenia', 'Love', 'Lovey', 'Lovie', 'Lovina', 'Lovisa', 'Loyce', 'Lu',
'Luana', 'Luann', 'Luanne', 'Luberta', 'Lucero', 'Lucetta', 'Lucia',
'Luciana', 'Lucie', 'Lucile', 'Lucille', 'Lucina', 'Lucinda', 'Lucindy',
'Lucretia', 'Lucy', 'Luda', 'Ludie', 'Lue', 'Luella', 'Luetta',
'Lugenia', 'Luisa', 'Lula', 'Lulah', 'Lular', 'Lulie', 'Lulla', 'Lulu',
'Luna', 'Lupe', 'Lura', 'Lurana', 'Lurena', 'Lurline', 'Lutie',
'Luvenia', 'Luverne', 'Luvinia', 'Luz', 'Lyda', 'Lydia', 'Lyla',
'Lylah', 'Lyn', 'Lynda', 'Lyndia', 'Lyndsay', 'Lyndsey', 'Lynette',
'Lynn', 'Lynne', 'Lynnette', 'Lynsey', 'Lyric', 'Mabel', 'Mabell',
'Mabelle', 'Mable', 'Macel', 'Macey', 'Machelle', 'Maci', 'Macie',
'Mackenzie', 'Macy', 'Madaline', 'Madalyn', 'Madalynn', 'Maddison',
'Madeleine', 'Madelene', 'Madeline', 'Madelyn', 'Madelynn', 'Madge',
'Madie', 'Madilyn', 'Madilynn', 'Madisen', 'Madison', 'Madisyn',
'Madlyn', 'Madonna', 'Madora', 'Madyson', 'Mae', 'Maebell', 'Maebelle',
'Maegan', 'Maeve', 'Mafalda', 'Magan', 'Magdalen', 'Magdalena',
'Magdalene', 'Magen', 'Maggie', 'Magnolia', 'Mahala', 'Mahalia',
'Mahalie', 'Mai', 'Maia', 'Maida', 'Maira', 'Maiya', 'Makaila',
'Makala', 'Makayla', 'Makena', 'Makenna', 'Makenzie', 'Malaya',
'Maleah', 'Malia', 'Maliah', 'Malinda', 'Malissa', 'Malissie',
'Maliyah', 'Mallie', 'Mallorie', 'Mallory', 'Malorie', 'Malvina',
'Mame', 'Mamie', 'Mammie', 'Manda', 'Mandi', 'Mandie', 'Mandy',
'Manerva', 'Manervia', 'Manie', 'Manila', 'Manilla', 'Mannie',
'Manuela', 'Manuelita', 'Mara', 'Maralyn', 'Maranda', 'Marcela',
'Marcelina', 'Marceline', 'Marcella', 'Marcelle', 'Marci', 'Marcia',
'Marcie', 'Marcy', 'Mardell', 'Mareli', 'Marely', 'Maren', 'Margaret',
'Margarete', 'Margaretha', 'Margarett', 'Margaretta', 'Margarette',
'Margarita', 'Margarite', 'Marge', 'Margene', 'Margeret', 'Margery',
'Marget', 'Margie', 'Margo', 'Margot', 'Margret', 'Margrett',
'Margretta', 'Marguerite', 'Margueritte', 'Margurite', 'Margy', 'Mari',
'Maria', 'Mariah', 'Mariam', 'Marian', 'Mariana', 'Marianita',
'Mariann', 'Marianna', 'Marianne', 'Maribel', 'Maribeth', 'Maricela',
'Marie', 'Mariel', 'Mariela', 'Marietta', 'Marilee', 'Marilla',
'Marilou', 'Marilyn', 'Marilynn', 'Marin', 'Marina', 'Marinda',
'Marion', 'Marisa', 'Marisela', 'Marisol', 'Marissa', 'Marita',
'Maritza', 'Mariyah', 'Marjorie', 'Marjory', 'Markita', 'Marla',
'Marlana', 'Marlee', 'Marleen', 'Marleigh', 'Marlen', 'Marlena',
'Marlene', 'Marley', 'Marlie', 'Marlo', 'Marlyn', 'Marlys', 'Marni',
'Marnie', 'Marnita', 'Marolyn', 'Marquita', 'Marry', 'Marsha', 'Marta',
'Martha', 'Marti', 'Martika', 'Martina', 'Martine', 'Marty', 'Marva',
'Marvel', 'Mary', 'Maryam', 'Maryann', 'Maryanne', 'Marybelle',
'Marybeth', 'Maryellen', 'Maryjane', 'Maryjo', 'Marylee', 'Marylin',
'Marylou', 'Marylouise', 'Marylyn', 'Masako', 'Mathilda', 'Mathilde',
'Matie', 'Matilda', 'Matilde', 'Mattie', 'Mattye', 'Maud', 'Maude',
'Maudie', 'Maura', 'Maureen', 'Maurine', 'Mavis', 'Maxie', 'Maxine',
'May', 'Maya', 'Maybell', 'Maybelle', 'Maye', 'Mayme', 'Maymie',
'Mayra', 'Mazie', 'Mckayla', 'Mckenna', 'Mckenzie', 'Mckinley',
'Meadow', 'Meagan', 'Meaghan', 'Mechelle', 'Meda', 'Media', 'Medora',
'Meg', 'Megan', 'Meggan', 'Meghan', 'Meghann', 'Melanie', 'Melany',
'Melba', 'Melina', 'Melinda', 'Melisa', 'Melissa', 'Melissia', 'Mell',
'Mellie', 'Mellisa', 'Mellissa', 'Melodee', 'Melodie', 'Melody',
'Melonie', 'Melony', 'Melva', 'Melvina', 'Mena', 'Mendy', 'Mercedes',
'Mercy', 'Meredith', 'Merilyn', 'Merle', 'Merlene', 'Merna', 'Merri',
'Merrie', 'Merrilee', 'Merrily', 'Merry', 'Mertie', 'Meryl', 'Meta',
'Metha', 'Metta', 'Mettie', 'Mia', 'Miah', 'Micaela', 'Micah',
'Micayla', 'Michaela', 'Michaele', 'Michal', 'Michele', 'Michelina',
'Michell', 'Michelle', 'Mickey', 'Mickie', 'Miesha', 'Migdalia',
'Mignon', 'Mikaela', 'Mikaila', 'Mikala', 'Mikalah', 'Mikayla', 'Mila',
'Milagros', 'Milan', 'Milda', 'Mildred', 'Miley', 'Milissa',
'Millicent', 'Millie', 'Milly', 'Mima', 'Mimi', 'Mina', 'Minda',
'Mindi', 'Mindy', 'Minerva', 'Minervia', 'Minna', 'Minnie', 'Minta',
'Mintie', 'Mira', 'Miracle', 'Miranda', 'Mireya', 'Miriah', 'Miriam',
'Mirna', 'Mirtie', 'Missie', 'Missouri', 'Missy', 'Misti', 'Mistie',
'Misty', 'Mittie', 'Mitzi', 'Miya', 'Modena', 'Moesha', 'Moira',
'Mollie', 'Molly', 'Mona', 'Monica', 'Monika', 'Monique', 'Monna',
'Monnie', 'Monserrat', 'Montana', 'Montie', 'Mora', 'Morgan', 'Moriah',
'Mossie', 'Mozell', 'Mozella', 'Mozelle', 'Muriel', 'Murl', 'Mya',
'Myah', 'Myla', 'Mylee', 'Mylie', 'Myra', 'Myranda', 'Myrl', 'Myrle',
'Myrna', 'Myrta', 'Myrtice', 'Myrtie', 'Myrtis', 'Myrtle', 'Nada',
'Nadia', 'Nadine', 'Naima', 'Nakia', 'Nakisha', 'Nakita', 'Nallely',
'Nan', 'Nana', 'Nanci', 'Nancie', 'Nancy', 'Nanette', 'Nanie', 'Nanna',
'Nannette', 'Nannie', 'Naoma', 'Naomi', 'Narcissus', 'Natalee',
'Natalia', 'Natalie', 'Nataly', 'Natalya', 'Natasha', 'Nathalia',
'Nathalie', 'Nathaly', 'Natosha', 'Nautica', 'Nayeli', 'Nayely',
'Nealie', 'Nealy', 'Nedra', 'Neha', 'Nelda', 'Nelia', 'Nelie', 'Nell',
'Nella', 'Nelle', 'Nellie', 'Nelly', 'Nena', 'Neola', 'Neoma', 'Neppie',
'Nereida', 'Neta', 'Netta', 'Nettie', 'Neva', 'Nevada', 'Nevaeh',
'Neveah', 'Nia', 'Nichelle', 'Nichol', 'Nichole', 'Nicki', 'Nicola',
'Nicole', 'Nicolette', 'Nicolle', 'Niki', 'Nikia', 'Nikita', 'Nikki',
'Nikole', 'Nila', 'Nilda', 'Nina', 'Ninnie', 'Nira', 'Nita', 'Nobie',
'Noel', 'Noelia', 'Noelle', 'Noemi', 'Noemie', 'Nohely', 'Nola',
'Nolia', 'Nolie', 'Noma', 'Nona', 'Nonie', 'Nora', 'Norah', 'Noreen',
'Norene', 'Noreta', 'Noretta', 'Norine', 'Norita', 'Norma', 'Nova',
'Novella', 'Nya', 'Nyah', 'Nyasia', 'Nyla', 'Nylah', 'Nyree', 'Ocie',
'Octa', 'Octavia', 'Octavie', 'Oda', 'Odalis', 'Odalys', 'Odelia',
'Odell', 'Odessa', 'Odette', 'Odie', 'Odile', 'Ofelia', 'Ola', 'Olar',
'Olena', 'Olene', 'Oleta', 'Olevia', 'Olga', 'Olie', 'Olinda', 'Oline',
'Oliva', 'Olive', 'Olivia', 'Olivine', 'Ollie', 'Olympia', 'Oma',
'Omie', 'Ona', 'Oneida', 'Oneta', 'Oney', 'Onie', 'Onnie', 'Opal',
'Opha', 'Ophelia', 'Ora', 'Orah', 'Oral', 'Oralia', 'Orelia', 'Orene',
'Orilla', 'Orlena', 'Orma', 'Orpha', 'Orra', 'Orrie', 'Osa', 'Osie',
'Ossie', 'Ota', 'Otelia', 'Otha', 'Ottie', 'Ottilia', 'Ottilie',
'Ouida', 'Ova', 'Ozell', 'Ozella', 'Ozie', 'Paige', 'Pairlee',
'Paisley', 'Paityn', 'Pallie', 'Palma', 'Paloma', 'Pam', 'Pamala',
'Pamela', 'Pamelia', 'Pamella', 'Pandora', 'Pansy', 'Paola', 'Paralee',
'Paris', 'Parker', 'Parlee', 'Parthenia', 'Pat', 'Patience', 'Patrica',
'Patrice', 'Patricia', 'Patsy', 'Patti', 'Pattie', 'Patty', 'Paula',
'Pauletta', 'Paulette', 'Paulina', 'Pauline', 'Payten', 'Payton',
'Pearl', 'Pearla', 'Pearle', 'Pearlene', 'Pearlie', 'Pearline',
'Pearly', 'Peggie', 'Peggy', 'Penelope', 'Penni', 'Pennie', 'Penny',
'Pepper', 'Perla', 'Permelia', 'Perri', 'Petra', 'Peyton', 'Phebe',
'Pheobe', 'Phillis', 'Philomena', 'Philomene', 'Phoebe', 'Phoenix',
'Phylicia', 'Phylis', 'Phyliss', 'Phyllis', 'Pink', 'Pinkey', 'Pinkie',
'Piper', 'Pluma', 'Pollie', 'Polly', 'Porsche', 'Porsha', 'Portia',
'Precious', 'Presley', 'Pricilla', 'Princess', 'Priscila', 'Priscilla',
'Prudence', 'Prudie', 'Qiana', 'Queen', 'Queenie', 'Quiana', 'Quinn',
'Rachael', 'Racheal', 'Rachel', 'Rachelle', 'Racquel', 'Rae', 'Raegan',
'Raelyn', 'Raelynn', 'Rafaela', 'Ragna', 'Raina', 'Ramona', 'Randi',
'Raquel', 'Rashida', 'Raven', 'Rayna', 'Rayne', 'Reagan', 'Reanna',
'Reatha', 'Reba', 'Rebeca', 'Rebecca', 'Rebekah', 'Reece', 'Reese',
'Regan', 'Regena', 'Regenia', 'Regina', 'Reilly', 'Reina', 'Rella',
'Rena', 'Renada', 'Renae', 'Renata', 'Rene', 'Renea', 'Renee', 'Renita',
'Rennie', 'Ressie', 'Reta', 'Retha', 'Retta', 'Rettie', 'Reva', 'Reyna',
'Rhea', 'Rheta', 'Rhianna', 'Rhiannon', 'Rhoda', 'Rhona', 'Rhonda',
'Rianna', 'Richelle', 'Ricki', 'Rihanna', 'Rikki', 'Riley', 'Rilla',
'Rillie', 'Rinda', 'Risa', 'Rita', 'River', 'Riya', 'Robbie', 'Robbin',
'Roberta', 'Robin', 'Robyn', 'Rochelle', 'Rocio', 'Roena', 'Rolanda',
'Roma', 'Romaine', 'Romona', 'Rona', 'Ronda', 'Roni', 'Ronna', 'Ronnie',
'Rory', 'Rosa', 'Rosabelle', 'Rosalee', 'Rosalia', 'Rosalie',
'Rosalind', 'Rosalinda', 'Rosaline', 'Rosalyn', 'Rosamond', 'Rosann',
'Rosanna', 'Rosanne', 'Rosaria', 'Rosario', 'Rose', 'Roseann',
'Roseanna', 'Roseanne', 'Rosella', 'Roselyn', 'Rosemarie', 'Rosemary',
'Rosena', 'Rosetta', 'Rosey', 'Rosia', 'Rosie', 'Rosina', 'Rosita',
'Roslyn', 'Rossie', 'Rosy', 'Rowan', 'Rowena', 'Roxana', 'Roxane',
'Roxann', 'Roxanna', 'Roxanne', 'Roxie', 'Roxy', 'Rozanne', 'Rozella',
'Rubi', 'Rubie', 'Ruby', 'Rubye', 'Ruie', 'Ruth', 'Rutha', 'Ruthann',
'Ruthanne', 'Ruthe', 'Ruthie', 'Ryann', 'Rylan', 'Rylee', 'Ryleigh',
'Rylie', 'Sabina', 'Sable', 'Sabra', 'Sabrina', 'Sada', 'Sade', 'Sadie',
'Sadye', 'Sage', 'Saige', 'Salena', 'Salina', 'Sallie', 'Sally',
'Salma', 'Salome', 'Samantha', 'Samara', 'Samatha', 'Samira', 'Samiyah',
'Sammie', 'Sanaa', 'Sanai', 'Sandi', 'Sandie', 'Sandra', 'Sandy',
'Saniya', 'Saniyah', 'Sanjuana', 'Sanjuanita', 'Sannie', 'Santa',
'Santana', 'Santina', 'Santos', 'Sara', 'Sarah', 'Sarahi', 'Sarai',
'Sariah', 'Sarina', 'Sarita', 'Sarrah', 'Sasha', 'Saundra', 'Savana',
'Savanah', 'Savanna', 'Savannah', 'Savilla', 'Scarlet', 'Scarlett',
'Sebrina', 'Selah', 'Selena', 'Selene', 'Selina', 'Selma', 'Sena',
'Senora', 'Serena', 'Serenity', 'Serina', 'Shae', 'Shaina', 'Shakira',
'Shalon', 'Shalonda', 'Shameka', 'Shamika', 'Shana', 'Shanae', 'Shanda',
'Shandra', 'Shane', 'Shaneka', 'Shanell', 'Shanelle', 'Shanequa',
'Shani', 'Shania', 'Shanice', 'Shaniece', 'Shanika', 'Shaniqua',
'Shanita', 'Shaniya', 'Shanna', 'Shannan', 'Shannen', 'Shannon',
'Shanon', 'Shanta', 'Shante', 'Shantel', 'Shantell', 'Shaquana',
'Shaquita', 'Shara', 'Shardae', 'Sharday', 'Sharde', 'Sharee', 'Sharen',
'Shari', 'Sharita', 'Sharla', 'Sharleen', 'Sharlene', 'Sharman',
'Sharon', 'Sharonda', 'Sharron', 'Sharyl', 'Sharyn', 'Shasta',
'Shatara', 'Shauna', 'Shaunna', 'Shavon', 'Shavonne', 'Shawanda',
'Shawna', 'Shawnda', 'Shawnee', 'Shawnna', 'Shawnte', 'Shay', 'Shayla',
'Shaylee', 'Shayna', 'Shea', 'Sheena', 'Sheila', 'Sheilah', 'Shelba',
'Shelbi', 'Shelbie', 'Shelby', 'Shelia', 'Shelley', 'Shelli', 'Shellie',
'Shelly', 'Shelva', 'Shelvia', 'Shelvie', 'Shena', 'Shenna', 'Sheree',
'Sheri', 'Sheridan', 'Sherie', 'Sherilyn', 'Sherita', 'Sherlyn',
'Sheron', 'Sherree', 'Sherri', 'Sherrie', 'Sherrill', 'Sherron',
'Sherry', 'Sherryl', 'Sheryl', 'Sheryll', 'Sheyla', 'Shianne', 'Shiela',
'Shiloh', 'Shira', 'Shirl', 'Shirlee', 'Shirleen', 'Shirlene',
'Shirley', 'Shirleyann', 'Shirlie', 'Shona', 'Shonda', 'Shonna',
'Shreya', 'Shyann', 'Shyanne', 'Shyla', 'Sibbie', 'Sibyl', 'Siddie',
'Sidney', 'Siena', 'Sienna', 'Sierra', 'Signa', 'Signe', 'Sigrid',
'Silvia', 'Simona', 'Simone', 'Sina', 'Sinda', 'Siobhan', 'Sister',
'Sky', 'Skye', 'Skyla', 'Skylar', 'Skyler', 'Sloane', 'Socorro',
'Sofia', 'Soledad', 'Somer', 'Sommer', 'Sondra', 'Sonia', 'Sonja',
'Sonji', 'Sonya', 'Sophia', 'Sophie', 'Sophronia', 'Spring', 'Stacey',
'Staci', 'Stacia', 'Stacie', 'Stacy', 'Star', 'Starla', 'Starr',
'Stasia', 'Stefani', 'Stefanie', 'Stella', 'Stephaine', 'Stephani',
'Stephania', 'Stephanie', 'Stephany', 'Stephenie', 'Stevie', 'Stormy',
'Sudie', 'Sue', 'Suellen', 'Sula', 'Summer', 'Sunday', 'Sunny',
'Sunshine', 'Susan', 'Susana', 'Susann', 'Susanna', 'Susannah',
'Susanne', 'Susie', 'Sussie', 'Suzan', 'Suzann', 'Suzanna', 'Suzanne',
'Suzette', 'Suzie', 'Suzy', 'Sybil', 'Sybilla', 'Syble', 'Sydell',
'Sydnee', 'Sydney', 'Sydni', 'Sydnie', 'Sylva', 'Sylvania', 'Sylvia',
'Symone', 'Syreeta', 'Tabatha', 'Tabetha', 'Tabitha', 'Tai', 'Taina',
'Taja', 'Takisha', 'Talia', 'Taliyah', 'Tamala', 'Tamara', 'Tamatha',
'Tambra', 'Tameka', 'Tamekia', 'Tamela', 'Tamera', 'Tami', 'Tamia',
'Tamica', 'Tamie', 'Tamika', 'Tamiko', 'Tamisha', 'Tammi', 'Tammie',
'Tammy', 'Tamra', 'Tamya', 'Tana', 'Tanesha', 'Tangela', 'Tania',
'Tanika', 'Tanisha', 'Taniya', 'Taniyah', 'Tanja', 'Tanya', 'Tara',
'Tarah', 'Taraji', 'Tari', 'Tarsha', 'Taryn', 'Tasha', 'Tashina',
'Tasia', 'Tatia', 'Tatiana', 'Tatianna', 'Tatum', 'Tatyana', 'Tatyanna',
'Tawana', 'Tawanda', 'Tawanna', 'Tawny', 'Tawnya', 'Taya', 'Tayla',
'Tayler', 'Taylor', 'Tea', 'Teagan', 'Teela', 'Teena', 'Tella',
'Tempie', 'Tena', 'Tenika', 'Tenisha', 'Tennessee', 'Tennie',
'Tennille', 'Tera', 'Teresa', 'Terese', 'Teressa', 'Teri', 'Terra',
'Terri', 'Terrie', 'Terry', 'Tess', 'Tessa', 'Tessie', 'Texanna',
'Texas', 'Texie', 'Thalia', 'Thea', 'Theda', 'Thekla', 'Thelma',
'Theodocia', 'Theodora', 'Theodosia', 'Theola', 'Theresa', 'Therese',
'Theresia', 'Theta', 'Thomasina', 'Thora', 'Thresa', 'Thursa', 'Thyra',
'Tia', 'Tiana', 'Tianna', 'Tiara', 'Tiarra', 'Tiera', 'Tierra',
'Tiesha', 'Tiffani', 'Tiffanie', 'Tiffany', 'Tilda', 'Tilla', 'Tillie',
'Tina', 'Tiney', 'Tinie', 'Tinnie', 'Tiny', 'Tisa', 'Tisha', 'Tishie',
'Tobi', 'Toby', 'Toccara', 'Tomasa', 'Tomeka', 'Tomika', 'Tommie',
'Tonda', 'Toni', 'Tonia', 'Tonja', 'Tonya', 'Tori', 'Torie', 'Torrie',
'Tory', 'Tosha', 'Toshiko', 'Towanda', 'Toya', 'Tracee', 'Tracey',
'Traci', 'Tracie', 'Tracy', 'Treasure', 'Treena', 'Trena', 'Tresa',
'Tressa', 'Tressie', 'Treva', 'Tricia', 'Trilby', 'Trina', 'Trinidad',
'Trinity', 'Trish', 'Trisha', 'Trista', 'Tristan', 'Tristen', 'Trudi',
'Trudie', 'Trudy', 'Trula', 'Tula', 'Twila', 'Twyla', 'Tyesha', 'Tyra',
'Ula', 'Una', 'Unique', 'Unknown', 'Ura', 'Ursula', 'Vada', 'Val',
'Valarie', 'Valencia', 'Valentina', 'Valentine', 'Valeria', 'Valerie',
'Valery', 'Valinda', 'Vallie', 'Valorie', 'Vanesa', 'Vanessa', 'Vannie',
'Vara', 'Vashti', 'Vassie', 'Veda', 'Vela', 'Velda', 'Velia', 'Vella',
'Velma', 'Velva', 'Velvet', 'Vena', 'Venessa', 'Venice', 'Venie',
'Venita', 'Vennie', 'Venus', 'Veola', 'Vera', 'Verda', 'Verdell',
'Verdie', 'Verena', 'Vergie', 'Verla', 'Verlene', 'Verlie', 'Verna',
'Verne', 'Vernell', 'Vernelle', 'Vernetta', 'Vernia', 'Vernice',
'Vernie', 'Vernita', 'Verona', 'Veronica', 'Versa', 'Versie', 'Vertie',
'Vessie', 'Vesta', 'Veta', 'Veva', 'Vicie', 'Vickey', 'Vicki', 'Vickie',
'Vicky', 'Victoria', 'Victorine', 'Victory', 'Vicy', 'Vida', 'Vikki',
'Villa', 'Vilma', 'Vina', 'Vincenza', 'Viney', 'Vinie', 'Vinnie',
'Viola', 'Violet', 'Violeta', 'Violetta', 'Violette', 'Vira', 'Virdie',
'Virgia', 'Virgie', 'Virginia', 'Viridiana', 'Vita', 'Viva', 'Vivian',
'Viviana', 'Vivien', 'Vivienne', 'Vlasta', 'Vonda', 'Vonetta', 'Vonnie',
'Wanda', 'Waneta', 'Wanita', 'Wava', 'Wende', 'Wendi', 'Wendy',
'Whitley', 'Whitney', 'Wilda', 'Wilhelmina', 'Wilhelmine', 'Willa',
'Willene', 'Willia', 'Willie', 'Williemae', 'Willodean', 'Willow',
'Wilma', 'Windy', 'Winifred', 'Winnie', 'Winnifred', 'Winona', 'Winter',
'Wynona', 'Xena', 'Ximena', 'Xiomara', 'Yadira', 'Yahaira', 'Yajaira',
'Yamilet', 'Yamilex', 'Yareli', 'Yaretzi', 'Yaritza', 'Yasmeen',
'Yasmin', 'Yasmine', 'Yazmin', 'Yesenia', 'Yessenia', 'Yetta',
'Yolanda', 'Yolonda', 'Yoselin', 'Yoshiko', 'Yuliana', 'Yulisa',
'Yulissa', 'Yuridia', 'Yvette', 'Yvonne', 'Zada', 'Zadie', 'Zaida',
'Zana', 'Zandra', 'Zaniyah', 'Zara', 'Zaria', 'Zariah', 'Zela', 'Zelda',
'Zelia', 'Zella', 'Zelma', 'Zelpha', 'Zena', 'Zenobia', 'Zeta', 'Zetta',
'Zettie', 'Zhane', 'Zillah', 'Zilpah', 'Zilpha', 'Zina', 'Zion', 'Zita',
'Zoa', 'Zoe', 'Zoey', 'Zoie', 'Zola', 'Zona', 'Zora', 'Zula',
)
first_names_male = (
'Aaden', 'Aarav', 'Aaron', 'Ab', 'Abb', 'Abbott', 'Abdiel', 'Abdul',
'Abdullah', 'Abe', 'Abel', 'Abelardo', 'Abie', 'Abner', 'Abraham',
'Abram', 'Ace', 'Acey', 'Acie', 'Acy', 'Adalberto', 'Adam', 'Adams',
'Adan', 'Add', 'Adelard', 'Adelbert', 'Aden', 'Adin', 'Aditya', 'Adlai',
'Admiral', 'Adolf', 'Adolfo', 'Adolph', 'Adolphus', 'Adonis', 'Adrain',
'Adrian', 'Adriel', 'Adrien', 'Adron', 'Aedan', 'Agustin', 'Agustus',
'Ah', 'Ahmad', 'Ahmed', 'Aidan', 'Aiden', 'Aidyn', 'Aime', 'Akeem',
'Al', 'Alan', 'Alanzo', 'Albert', 'Alberto', 'Albertus', 'Albin',
'Albion', 'Alby', 'Alcee', 'Alcide', 'Alden', 'Aldo', 'Alec', 'Aleck',
'Alejandro', 'Alek', 'Alessandro', 'Alex', 'Alexande', 'Alexander',
'Alexandre', 'Alexandro', 'Alexis', 'Alexzander', 'Alf', 'Alferd',
'Alfie', 'Alfonse', 'Alfonso', 'Alfonzo', 'Alford', 'Alfred', 'Alfredo',
'Alger', 'Algernon', 'Algie', 'Algot', 'Ali', 'Alijah', 'Allan',
'Allen', 'Allyn', 'Almer', 'Almon', 'Almond', 'Almus', 'Alois',
'Alonso', 'Alonza', 'Alonzo', 'Aloys', 'Aloysius', 'Alpheus', 'Alphons',
'Alphonse', 'Alphonso', 'Alphonsus', 'Alston', 'Alto', 'Alton', 'Alva',
'Alvah', 'Alvan', 'Alvaro', 'Alver', 'Alvia', 'Alvie', 'Alvin', 'Alvis',
'Alvy', 'Alwin', 'Amado', 'Amare', 'Amari', 'Amarion', 'Amasa',
'Ambers', 'Ambrose', 'Americo', 'Amerigo', 'Amil', 'Amin', 'Amir',
'Amit', 'Ammon', 'Amon', 'Amos', 'Ananias', 'Anastacio', 'Anatole',
'Ancel', 'Ancil', 'Anders', 'Anderson', 'Andon', 'Andra', 'Andrae',
'Andre', 'Andreas', 'Andres', 'Andrew', 'Andy', 'Anfernee', 'Angel',
'Angelo', 'Angus', 'Anibal', 'Ansel', 'Anson', 'Anthoney', 'Anthony',
'Antione', 'Antoine', 'Anton', 'Antone', 'Antonio', 'Antony', 'Antwain',
'Antwan', 'Antwon', 'Anwar', 'Arba', 'Arbie', 'Arch', 'Archer',
'Archibald', 'Archie', 'Ardell', 'Arden', 'Ari', 'Aric', 'Arjun',
'Arlan', 'Arland', 'Arlen', 'Arley', 'Arlie', 'Arlin', 'Arlington',
'Arlis', 'Arlo', 'Arlyn', 'Arman', 'Armand', 'Armando', 'Armani',
'Armin', 'Armond', 'Armstead', 'Arnav', 'Arne', 'Arnett', 'Arnie',
'Arno', 'Arnold', 'Arnoldo', 'Arnulfo', 'Aron', 'Arron', 'Arsenio',
'Art', 'Arther', 'Arthor', 'Arthur', 'Artie', 'Artis', 'Arturo',
'Arvel', 'Arvid', 'Arvil', 'Arvin', 'Arvo', 'Aryan', 'Asa', 'Asberry',
'Asbury', 'Ashby', 'Asher', 'Ashton', 'Atha', 'Atlas', 'Atticus',
'Attilio', 'Aubra', 'Aubrey', 'Audie', 'Audley', 'Audy', 'August',
'Auguste', 'Augustin', 'Augustine', 'Augustus', 'Aurelio', 'Aurthur',
'Austen', 'Austin', 'Auston', 'Austyn', 'Auther', 'Author', 'Authur',
'Autry', 'Avery', 'Avon', 'Axel', 'Ayaan', 'Aydan', 'Ayden', 'Aydin',
'Babe', 'Babyboy', 'Bailey', 'Baker', 'Baldwin', 'Ballard', 'Banks',
'Barnard', 'Barnett', 'Barney', 'Barnie', 'Baron', 'Barrett', 'Barrie',
'Barron', 'Barry', 'Bart', 'Bartholomew', 'Bartley', 'Barton', 'Bascom',
'Basil', 'Baxter', 'Bayard', 'Beau', 'Beckett', 'Beckham', 'Bedford',
'Beecher', 'Bell', 'Belton', 'Ben', 'Benard', 'Benedict', 'Benito',
'Benjaman', 'Benjamen', 'Benjamin', 'Benjamine', 'Benji', 'Benjiman',
'Benjman', 'Bennett', 'Bennie', 'Benny', 'Benson', 'Bentley', 'Benton',
'Berkley', 'Berlin', 'Bernard', 'Bernardo', 'Bernhard', 'Bernie',
'Berry', 'Bert', 'Bertie', 'Berton', 'Bertram', 'Bertrand', 'Beryl',
'Bethel', 'Bilal', 'Bill', 'Billie', 'Billy', 'Bird', 'Birt', 'Bishop',
'Bjorn', 'Blain', 'Blaine', 'Blair', 'Blaise', 'Blake', 'Blanchard',
'Blane', 'Blas', 'Blaze', 'Bliss', 'Bluford', 'Bo', 'Bob', 'Bobbie',
'Bobby', 'Bode', 'Bolden', 'Booker', 'Boone', 'Boris', 'Bose', 'Boss',
'Boston', 'Bowman', 'Boyce', 'Boyd', 'Boysie', 'Brad', 'Braden',
'Bradford', 'Bradley', 'Bradly', 'Brady', 'Bradyn', 'Braeden',
'Braedon', 'Braiden', 'Brain', 'Branch', 'Brandan', 'Branden',
'Brandin', 'Brandon', 'Brandt', 'Brandy', 'Brandyn', 'Brannon',
'Branson', 'Brant', 'Brantley', 'Braulio', 'Braxton', 'Brayan',
'Brayden', 'Braydon', 'Braylen', 'Braylon', 'Brendan', 'Brenden',
'Brendon', 'Brennan', 'Brennen', 'Brennon', 'Brent', 'Brenton', 'Bret',
'Brett', 'Brian', 'Brice', 'Bridger', 'Brien', 'Brion', 'Britt',
'Brittany', 'Britton', 'Brock', 'Broderick', 'Brodie', 'Brody',
'Brogan', 'Bronson', 'Brook', 'Brooks', 'Brown', 'Bruce', 'Bruno',
'Bryan', 'Bryant', 'Bryce', 'Brycen', 'Bryon', 'Bryson', 'Bryton',
'Buck', 'Bud', 'Budd', 'Buddie', 'Buddy', 'Buel', 'Buell', 'Buford',
'Bunk', 'Burdette', 'Buren', 'Burgess', 'Burk', 'Burke', 'Burl',
'Burleigh', 'Burley', 'Burnell', 'Burnett', 'Burney', 'Burnice',
'Burnie', 'Burns', 'Burr', 'Burrel', 'Burrell', 'Burt', 'Burton',
'Bush', 'Buster', 'Butch', 'Butler', 'Bynum', 'Byrd', 'Byron', 'Cade',
'Caden', 'Cael', 'Caesar', 'Caiden', 'Cain', 'Cal', 'Cale', 'Caleb',
'Calhoun', 'Callie', 'Callum', 'Calvin', 'Cam', 'Camden', 'Cameron',
'Camilo', 'Campbell', 'Camren', 'Camron', 'Camryn', 'Candido', 'Cannon',
'Canyon', 'Cap', 'Captain', 'Carey', 'Carl', 'Carleton', 'Carlie',
'Carlisle', 'Carlo', 'Carlos', 'Carlton', 'Carlyle', 'Carmel',
'Carmelo', 'Carmen', 'Carmine', 'Carnell', 'Carrie', 'Carrol',
'Carroll', 'Carsen', 'Carson', 'Carter', 'Cary', 'Cas', 'Case', 'Casen',
'Casey', 'Cash', 'Casimer', 'Casimir', 'Casimiro', 'Cason', 'Casper',
'Cass', 'Cassidy', 'Cassie', 'Cassius', 'Caswell', 'Cato', 'Cayden',
'Ceasar', 'Cecil', 'Cedric', 'Cedrick', 'Celestino', 'Cephus', 'Cesar',
'Ceylon', 'Chace', 'Chad', 'Chadd', 'Chadrick', 'Chadwick', 'Chaim',
'Chalmer', 'Chalmers', 'Champ', 'Chance', 'Chancey', 'Chancy',
'Chandler', 'Channing', 'Charle', 'Charles', 'Charley', 'Charlie',
'Charls', 'Charlton', 'Charly', 'Chas', 'Chase', 'Chauncey', 'Chauncy',
'Chaz', 'Che', 'Chesley', 'Chester', 'Chet', 'Cheyenne', 'Chin', 'Chip',
'Chris', 'Christ', 'Christian', 'Christina', 'Christion', 'Christop',
'Christoper', 'Christophe', 'Christopher', 'Chuck', 'Cicero', 'Clabe',
'Claiborne', 'Clair', 'Clarance', 'Clare', 'Clarence', 'Clark',
'Clarke', 'Clarnce', 'Claud', 'Claude', 'Claudie', 'Claudio',
'Claudius', 'Claus', 'Clay', 'Clayton', 'Clearence', 'Cleave', 'Clell',
'Clem', 'Clemence', 'Clemens', 'Clement', 'Clemente', 'Clemmie',
'Clemon', 'Cleo', 'Cleon', 'Cletus', 'Cleve', 'Cleveland', 'Clide',
'Cliff', 'Clifford', 'Clifton', 'Clint', 'Clinton', 'Clive', 'Clovis',
'Cloyd', 'Clyde', 'Coby', 'Codey', 'Codi', 'Codie', 'Cody', 'Coen',
'Cohen', 'Colbert', 'Colby', 'Cole', 'Coleman', 'Coleton', 'Coley',
'Colie', 'Colin', 'Collie', 'Collier', 'Collin', 'Collins', 'Collis',
'Colon', 'Colonel', 'Colt', 'Colten', 'Colter', 'Colton', 'Columbus',
'Colvin', 'Commodore', 'Con', 'Conard', 'Conley', 'Conner', 'Connie',
'Connor', 'Conor', 'Conrad', 'Constantine', 'Conway', 'Coolidge',
'Cooper', 'Corbett', 'Corbin', 'Cordaro', 'Cordell', 'Cordero', 'Corey',
'Cornel', 'Cornelious', 'Cornelius', 'Cornell', 'Corry', 'Cortez',
'Cortney', 'Corwin', 'Cory', 'Cosmo', 'Coty', 'Council', 'Courtland',
'Courtney', 'Coy', 'Craig', 'Crawford', 'Creed', 'Cris', 'Cristian',
'Cristobal', 'Cristofer', 'Cristopher', 'Crockett', 'Cruz', 'Cullen',
'Curley', 'Curt', 'Curtis', 'Curtiss', 'Cyril', 'Cyrus', 'Dabney',
'Dakoda', 'Dakota', 'Dakotah', 'Dale', 'Dallas', 'Dallin', 'Dalton',
'Dalvin', 'Damarcus', 'Damari', 'Damarion', 'Dameon', 'Damian',
'Damien', 'Damion', 'Damon', 'Damond', 'Dan', 'Dana', 'Dandre', 'Dane',
'Dangelo', 'Danial', 'Daniel', 'Dann', 'Dannie', 'Danniel', 'Danny',
'Dante', 'Daquan', 'Darby', 'Darcy', 'Darell', 'Daren', 'Darian',
'Darien', 'Darin', 'Dario', 'Darion', 'Darius', 'Darl', 'Darnell',
'Darold', 'Daron', 'Darrel', 'Darrell', 'Darren', 'Darrian', 'Darrick',
'Darrien', 'Darrin', 'Darrion', 'Darrius', 'Darron', 'Darry', 'Darryl',
'Darryle', 'Darryll', 'Darryn', 'Darvin', 'Darwin', 'Darwyn', 'Daryl',
'Daryle', 'Daryn', 'Dashawn', 'Daulton', 'Daunte', 'Davante', 'Dave',
'Davey', 'Davian', 'David', 'Davie', 'Davin', 'Davion', 'Davis',
'Davon', 'Davonta', 'Davonte', 'Davy', 'Dawson', 'Dax', 'Daxton',
'Dayne', 'Dayton', 'Deacon', 'Dean', 'Deandre', 'Deane', 'Deangelo',
'Deante', 'Declan', 'Dedric', 'Dedrick', 'Deegan', 'Deforest', 'Deion',
'Dejon', 'Dejuan', 'Del', 'Delano', 'Delbert', 'Dell', 'Della', 'Delma',
'Delmar', 'Delmas', 'Delmer', 'Delmus', 'Delos', 'Delphin', 'Delton',
'Delvin', 'Delwin', 'Demarco', 'Demarcus', 'Demario', 'Demarion',
'Demetri', 'Demetric', 'Demetrios', 'Demetrius', 'Demian', 'Demond',
'Demonte', 'Dempsey', 'Denis', 'Dennie', 'Dennis', 'Denny', 'Denton',
'Denver', 'Denzel', 'Denzell', 'Denzil', 'Deon', 'Deondre', 'Deonta',
'Deontae', 'Deonte', 'Dequan', 'Derald', 'Dereck', 'Derek', 'Dereon',
'Deric', 'Derick', 'Derik', 'Derl', 'Deron', 'Derrek', 'Derrell',
'Derrick', 'Derwin', 'Deryl', 'Desean', 'Deshaun', 'Deshawn', 'Desi',
'Desmond', 'Dessie', 'Destin', 'Destry', 'Devan', 'Devante', 'Devaughn',
'Deven', 'Devin', 'Devon', 'Devonta', 'Devontae', 'Devonte', 'Devyn',
'Deward', 'Dewayne', 'Dewey', 'Dewitt', 'Dexter', 'Diallo', 'Diamond',
'Diane', 'Dickie', 'Diego', 'Dijon', 'Dilan', 'Dillan', 'Dillard',
'Dillion', 'Dillon', 'Dimitri', 'Dimitrios', 'Dink', 'Dino', 'Dion',
'Dionicio', 'Dionte', 'Dirk', 'Dixon', 'Doc', 'Dock', 'Doctor', 'Doll',
'Dolph', 'Dolphus', 'Domenic', 'Domenick', 'Domenico', 'Domingo',
'Dominic', 'Dominick', 'Dominik', 'Don', 'Donaciano', 'Donal', 'Donald',
'Donat', 'Donato', 'Donavan', 'Donavon', 'Dondre', 'Donell', 'Donn',
'Donnell', 'Donnie', 'Donny', 'Donovan', 'Donta', 'Dontae', 'Donte',
'Dora', 'Dorian', 'Dorman', 'Dorr', 'Dorris', 'Dorsey', 'Doss', 'Doug',
'Douglas', 'Douglass', 'Dow', 'Doyle', 'Dozier', 'Drake', 'Draven',
'Drew', 'Drury', 'Duane', 'Duard', 'Dudley', 'Duff', 'Duke', 'Duncan',
'Durell', 'Durrell', 'Durward', 'Durwood', 'Dustan', 'Dustin', 'Dusty',
'Dustyn', 'Duwayne', 'Dwain', 'Dwaine', 'Dwane', 'Dwayne', 'Dwight',
'Dwyane', 'Dylan', 'Dyllan', 'Dylon', 'Ean', 'Earl', 'Earle', 'Earley',
'Earlie', 'Early', 'Earnest', 'Easton', 'Ebb', 'Ebbie', 'Eben',
'Ebenezer', 'Eber', 'Ebert', 'Ed', 'Edd', 'Eddie', 'Eddy', 'Eden',
'Edgar', 'Edgardo', 'Edie', 'Edison', 'Edmon', 'Edmond', 'Edmund',
'Edsel', 'Edson', 'Eduardo', 'Edw', 'Edward', 'Edwardo', 'Edwin',
'Effie', 'Efrain', 'Efrem', 'Efren', 'Egbert', 'Einar', 'Eino', 'Elam',
'Elbert', 'Elbridge', 'Elby', 'Elden', 'Elder', 'Eldon', 'Eldred',
'Eldridge', 'Elex', 'Elgie', 'Elgin', 'Eli', 'Elian', 'Elias', 'Elick',
'Elie', 'Eliezer', 'Eliga', 'Eligah', 'Elige', 'Elihu', 'Elijah',
'Eliot', 'Eliseo', 'Elisha', 'Elizah', 'Ell', 'Ellery', 'Elliot',
'Elliott', 'Ellis', 'Ellison', 'Ellsworth', 'Ellwood', 'Elmer', 'Elmo',
'Elmore', 'Elon', 'Elonzo', 'Eloy', 'Elroy', 'Elsworth', 'Elton',
'Elvin', 'Elvis', 'Elwin', 'Elwood', 'Elwyn', 'Ely', 'Elza', 'Elzie',
'Elzy', 'Emanuel', 'Emerson', 'Emery', 'Emett', 'Emil', 'Emile',
'Emiliano', 'Emilio', 'Emit', 'Emma', 'Emmanuel', 'Emmet', 'Emmett',
'Emmit', 'Emmitt', 'Emmons', 'Emory', 'Emry', 'Encarnacion', 'Ennis',
'Enoch', 'Enos', 'Enrico', 'Enrique', 'Enzo', 'Ephraim', 'Ephram',
'Ephriam', 'Epifanio', 'Erasmo', 'Erasmus', 'Erastus', 'Erby', 'Eric',
'Erich', 'Erick', 'Erie', 'Erik', 'Erin', 'Erland', 'Erle', 'Erling',
'Ernest', 'Ernesto', 'Ernie', 'Ernst', 'Errol', 'Ervin', 'Erving',
'Erwin', 'Esau', 'Esco', 'Esequiel', 'Esker', 'Esley', 'Essex',
'Esteban', 'Estel', 'Estes', 'Estevan', 'Estill', 'Eston', 'Ethan',
'Ethelbert', 'Ethen', 'Eugene', 'Eugenio', 'Eusebio', 'Eustace', 'Evan',
'Evander', 'Evans', 'Evelyn', 'Everet', 'Everett', 'Everette', 'Evert',
'Evertt', 'Ewald', 'Ewart', 'Ewell', 'Ewin', 'Ewing', 'Ezekiel',
'Ezell', 'Ezequiel', 'Ezra', 'Ezzard', 'Fabian', 'Faron', 'Farrell',
'Farris', 'Fate', 'Faustino', 'Fayette', 'Fed', 'Federico', 'Felipe',
'Felix', 'Felton', 'Fenton', 'Ferd', 'Ferdinand', 'Ferman', 'Fernand',
'Fernando', 'Ferrell', 'Ferris', 'Festus', 'Fidel', 'Fidencio',
'Fielding', 'Finis', 'Finley', 'Finn', 'Finnegan', 'Firman', 'Fisher',
'Fitzgerald', 'Fitzhugh', 'Fleet', 'Flem', 'Fleming', 'Fletcher',
'Flint', 'Florencio', 'Florentino', 'Florian', 'Floy', 'Floyd', 'Foch',
'Ford', 'Forest', 'Forrest', 'Foster', 'Fount', 'Foy', 'Frances',
'Francesco', 'Francis', 'Francisco', 'Franco', 'Frank', 'Frankie',
'Franklin', 'Franklyn', 'Franz', 'Frazier', 'Fred', 'Freddie', 'Freddy',
'Frederic', 'Frederick', 'Fredie', 'Fredric', 'Fredrick', 'Fredy',
'Freeman', 'Fremont', 'French', 'Friend', 'Fritz', 'Fuller', 'Fulton',
'Furman', 'Gabe', 'Gabriel', 'Gael', 'Gaetano', 'Gage', 'Gaige', 'Gail',
'Gaines', 'Gaither', 'Gale', 'Galen', 'Gannon', 'Gardner', 'Garett',
'Garey', 'Garfield', 'Garland', 'Garner', 'Garnet', 'Garnett', 'Garold',
'Garret', 'Garrett', 'Garrick', 'Garrison', 'Garry', 'Garth', 'Garvin',
'Gary', 'Gasper', 'Gaston', 'Gauge', 'Gaven', 'Gavin', 'Gavyn', 'Gay',
'Gayle', 'Gaylen', 'Gaylon', 'Gaylord', 'Gearld', 'Geary', 'Gee',
'Genaro', 'Gene', 'General', 'Genie', 'Gennaro', 'Geno', 'Geo', 'Geoff',
'Geoffrey', 'George', 'Georgie', 'Geovanni', 'Gerald', 'Geraldo',
'Gerard', 'Gerardo', 'Gerhard', 'Gerhardt', 'Germaine', 'German',
'Gerold', 'Gerrit', 'Gerry', 'Giancarlo', 'Gianni', 'Gibson', 'Gideon',
'Gifford', 'Gil', 'Gilbert', 'Gilberto', 'Giles', 'Gilford', 'Gilman',
'Gilmer', 'Gilmore', 'Gino', 'Giovani', 'Giovanni', 'Giovanny',
'Giuseppe', 'Gladstone', 'Glen', 'Glendon', 'Glenn', 'Glenwood',
'Glover', 'Glynn', 'Godfrey', 'Goebel', 'Golden', 'Gonzalo', 'Gorden',
'Gordon', 'Gorge', 'Gottlieb', 'Governor', 'Grady', 'Grafton', 'Graham',
'Grant', 'Granville', 'Graves', 'Gray', 'Graydon', 'Grayling',
'Grayson', 'Green', 'Greene', 'Greg', 'Gregg', 'Greggory', 'Gregorio',
'Gregory', 'Greyson', 'Griffin', 'Griffith', 'Grove', 'Grover', 'Guido',
'Guilford', 'Guillermo', 'Gunnar', 'Gunner', 'Gurney', 'Gus', 'Guss',
'Gussie', 'Gust', 'Gustaf', 'Gustav', 'Gustave', 'Gustavo', 'Gustavus',
'Guthrie', 'Guy', 'Haden', 'Hadley', 'Haiden', 'Hakeem', 'Hakim', 'Hal',
'Halbert', 'Hale', 'Hall', 'Halley', 'Hallie', 'Halsey', 'Ham',
'Hamilton', 'Hamp', 'Hampton', 'Hamza', 'Handy', 'Hank', 'Hans',
'Hansel', 'Hansford', 'Hanson', 'Harden', 'Hardie', 'Hardin', 'Harding',
'Hardy', 'Harl', 'Harlan', 'Harland', 'Harlen', 'Harley', 'Harlie',
'Harlon', 'Harlow', 'Harm', 'Harman', 'Harmon', 'Harold', 'Harper',
'Harrell', 'Harrie', 'Harris', 'Harrison', 'Harrold', 'Harry', 'Hart',
'Hartley', 'Hartwell', 'Harve', 'Harvey', 'Harvie', 'Harvy', 'Hasan',
'Haskell', 'Hassan', 'Hattie', 'Haven', 'Hayden', 'Hayes', 'Hays',
'Hayward', 'Haywood', 'Hazen', 'Heath', 'Heber', 'Hebert', 'Hector',
'Helmer', 'Hence', 'Henderson', 'Henery', 'Henri', 'Henry', 'Herb',
'Herbert', 'Heriberto', 'Herman', 'Hermann', 'Hermon', 'Hernan',
'Herschel', 'Hershel', 'Hershell', 'Hervey', 'Heyward', 'Hezekiah',
'Hezzie', 'Hideo', 'Hilario', 'Hilary', 'Hilbert', 'Hill', 'Hillard',
'Hillary', 'Hillery', 'Hilliard', 'Hilmer', 'Hilton', 'Hiram',
'Hiroshi', 'Hjalmar', 'Hjalmer', 'Hobart', 'Hobert', 'Hobson', 'Hoke',
'Holden', 'Holland', 'Hollie', 'Hollis', 'Holmes', 'Homer', 'Hoover',
'Hope', 'Horace', 'Horacio', 'Horatio', 'Horton', 'Hosea', 'Hosie',
'Hosteen', 'Houston', 'Howard', 'Howell', 'Hoy', 'Hoyt', 'Hubbard',
'Hubert', 'Hudson', 'Huey', 'Hugh', 'Hughes', 'Hughey', 'Hughie',
'Hugo', 'Humberto', 'Humphrey', 'Hung', 'Hunt', 'Hunter', 'Hurbert',
'Hurley', 'Huston', 'Huy', 'Hyman', 'Hymen', 'Hyrum', 'Ian', 'Ibrahim',
'Ida', 'Ignacio', 'Ignatius', 'Ignatz', 'Ike', 'Illya', 'Imanol',
'Immanuel', 'Infant', 'Ingram', 'Ira', 'Irl', 'Irven', 'Irvin',
'Irvine', 'Irving', 'Irwin', 'Isaac', 'Isaak', 'Isadore', 'Isai',
'Isaiah', 'Isaias', 'Isam', 'Ishaan', 'Isham', 'Ishmael', 'Isiah',
'Isidor', 'Isidore', 'Isidro', 'Ismael', 'Isom', 'Israel', 'Isreal',
'Issac', 'Iva', 'Ivan', 'Iver', 'Iverson', 'Ivey', 'Ivor', 'Ivory',
'Ivy', 'Izaiah', 'Izayah', 'Jabari', 'Jabbar', 'Jabez', 'Jace', 'Jack',
'Jackson', 'Jacky', 'Jacob', 'Jacoby', 'Jacques', 'Jacquez', 'Jade',
'Jaden', 'Jadiel', 'Jadon', 'Jadyn', 'Jaeden', 'Jagger', 'Jaheem',
'Jaheim', 'Jahiem', 'Jahir', 'Jaiden', 'Jaidyn', 'Jaime', 'Jaimie',
'Jair', 'Jairo', 'Jajuan', 'Jake', 'Jakob', 'Jakobe', 'Jaleel', 'Jalen',
'Jalon', 'Jamaal', 'Jamal', 'Jamar', 'Jamarcus', 'Jamari', 'Jamarion',
'Jame', 'Jameel', 'Jamel', 'James', 'Jameson', 'Jamey', 'Jamie',
'Jamil', 'Jamin', 'Jamir', 'Jamison', 'Jammie', 'Jan', 'Jaquan',
'Jaquez', 'Jarad', 'Jared', 'Jaren', 'Jaret', 'Jarett', 'Jarod',
'Jaron', 'Jarrad', 'Jarred', 'Jarrell', 'Jarret', 'Jarrett', 'Jarrod',
'Jarvis', 'Jase', 'Jasen', 'Jasiah', 'Jason', 'Jasper', 'Javen',
'Javier', 'Javion', 'Javon', 'Javonte', 'Jax', 'Jaxen', 'Jaxon',
'Jaxson', 'Jaxton', 'Jay', 'Jayce', 'Jaycob', 'Jaydan', 'Jayden',
'Jaydin', 'Jaydon', 'Jaylan', 'Jaylen', 'Jaylin', 'Jaylon', 'Jayme',
'Jaymes', 'Jayson', 'Jayvion', 'Jayvon', 'Jean', 'Jeb', 'Jed',
'Jedediah', 'Jedidiah', 'Jeff', 'Jefferey', 'Jefferson', 'Jeffery',
'Jeffie', 'Jeffrey', 'Jeffry', 'Jelani', 'Jemal', 'Jennings', 'Jens',
'Jensen', 'Jep', 'Jeptha', 'Jerad', 'Jerald', 'Jeramiah', 'Jeramie',
'Jeramy', 'Jere', 'Jered', 'Jerel', 'Jereme', 'Jeremey', 'Jeremiah',
'Jeremie', 'Jeremy', 'Jerimiah', 'Jerimy', 'Jermain', 'Jermaine',
'Jermey', 'Jerod', 'Jerold', 'Jerome', 'Jeromy', 'Jerrad', 'Jerrel',
'Jerrell', 'Jerrod', 'Jerrold', 'Jerry', 'Jess', 'Jesse', 'Jessee',
'Jessie', 'Jessy', 'Jesus', 'Jethro', 'Jett', 'Jettie', 'Jevon',
'Jewell', 'Jiles', 'Jim', 'Jimmie', 'Jimmy', 'Joaquin', 'Job', 'Jobe',
'Joe', 'Joel', 'Joeseph', 'Joesph', 'Joey', 'Johan', 'Johathan', 'John',
'Johnathan', 'Johnathon', 'Johney', 'Johnie', 'Johnnie', 'Johnny',
'Johnpaul', 'Johnson', 'Johny', 'Jon', 'Jonah', 'Jonas', 'Jonatan',
'Jonathan', 'Jonathon', 'Jones', 'Jonnie', 'Jordan', 'Jorden', 'Jordi',
'Jordon', 'Jordy', 'Jordyn', 'Jorge', 'Jory', 'Jose', 'Josef',
'Joseluis', 'Joseph', 'Josephus', 'Josh', 'Joshua', 'Joshuah', 'Josiah',
'Josue', 'Jovan', 'Jovani', 'Jovanni', 'Jovanny', 'Jovany', 'Joy',
'Juan', 'Judah', 'Judd', 'Jude', 'Judge', 'Judson', 'Juelz', 'Jule',
'Jules', 'Julian', 'Julien', 'Julio', 'Julious', 'Julius', 'Juluis',
'Junior', 'Junious', 'Junius', 'Justen', 'Justice', 'Justin', 'Juston',
'Justus', 'Justyn', 'Juwan', 'Kade', 'Kadeem', 'Kaden', 'Kadin',
'Kadyn', 'Kaeden', 'Kael', 'Kahlil', 'Kai', 'Kaiden', 'Kale', 'Kaleb',
'Kalel', 'Kalen', 'Kalvin', 'Kamari', 'Kamden', 'Kameron', 'Kamren',
'Kamron', 'Kamryn', 'Kane', 'Kanye', 'Kareem', 'Kareen', 'Karim',
'Karl', 'Karson', 'Karter', 'Kasen', 'Kasey', 'Kash', 'Kason', 'Kavon',
'Kayden', 'Kaye', 'Kayson', 'Kazuo', 'Keagan', 'Keandre', 'Keanu',
'Keaton', 'Keegan', 'Keenan', 'Keenen', 'Kegan', 'Keifer', 'Keion',
'Keith', 'Kelan', 'Kelby', 'Kellan', 'Kellen', 'Kelley', 'Kelly',
'Kelsey', 'Kelton', 'Kelvin', 'Kem', 'Ken', 'Kenan', 'Kendal',
'Kendall', 'Kendell', 'Kendrick', 'Kenji', 'Kennard', 'Kennedy',
'Kenneth', 'Kenney', 'Kennith', 'Kennth', 'Kenny', 'Kent', 'Kenton',
'Kenya', 'Kenyatta', 'Kenyon', 'Keon', 'Kermit', 'Kerry', 'Kerwin',
'Keshaun', 'Keshawn', 'Kevan', 'Keven', 'Kevin', 'Kevon', 'Keyon',
'Keyshawn', 'Khalid', 'Khalil', 'Khari', 'Khiry', 'Kian', 'Kiara',
'Kiefer', 'Kiel', 'Kieran', 'Kieth', 'Kiley', 'Killian', 'Kim',
'Kimball', 'Kimberly', 'King', 'Kingston', 'Kinte', 'Kip', 'Kipp',
'Kirby', 'Kirk', 'Kirt', 'Kit', 'Kiyoshi', 'Knox', 'Knute', 'Kobe',
'Koby', 'Koda', 'Kody', 'Koen', 'Kolby', 'Kole', 'Kolten', 'Kolton',
'Konner', 'Konnor', 'Korbin', 'Kordell', 'Korey', 'Kory', 'Kraig',
'Kris', 'Krish', 'Kristen', 'Kristian', 'Kristin', 'Kristofer',
'Kristoffer', 'Kristopher', 'Kunta', 'Kurt', 'Kurtis', 'Kwame', 'Kyan',
'Kylan', 'Kyle', 'Kyler', 'Kymani', 'Kyree', 'Kyson', 'Lacey', 'Lacy',
'Ladarius', 'Laddie', 'Lafayette', 'Lafe', 'Lamar', 'Lamarcus',
'Lambert', 'Lamont', 'Lamonte', 'Lance', 'Landan', 'Landen', 'Landin',
'Landon', 'Landyn', 'Lane', 'Lannie', 'Lanny', 'Laquan', 'Lark',
'Larkin', 'Laron', 'Larry', 'Lars', 'Larue', 'Lary', 'Lashawn',
'Latrell', 'Laurance', 'Laurel', 'Laurence', 'Lavar', 'Lavern',
'Laverne', 'Lavon', 'Lawerence', 'Lawrance', 'Lawrence', 'Lawson',
'Lawton', 'Lawyer', 'Layne', 'Layton', 'Lazaro', 'Le', 'Lea', 'Leamon',
'Leander', 'Leandro', 'Lee', 'Leeroy', 'Leif', 'Leigh', 'Leighton',
'Leland', 'Lem', 'Lemmie', 'Lemon', 'Lemuel', 'Len', 'Lena', 'Lenard',
'Lennie', 'Lennon', 'Lenny', 'Lenon', 'Lenord', 'Lenwood', 'Leo',
'Leon', 'Leonard', 'Leonardo', 'Leonce', 'Leonel', 'Leonidas',
'Leopold', 'Leopoldo', 'Leroy', 'Les', 'Lesley', 'Leslie', 'Less',
'Lessie', 'Lester', 'Levar', 'Levern', 'Levi', 'Levie', 'Levin',
'Levon', 'Levy', 'Lew', 'Lewis', 'Lex', 'Lexie', 'Liam', 'Lige',
'Lilburn', 'Lillard', 'Lim', 'Lincoln', 'Lindbergh', 'Lindell',
'Linden', 'Lindsay', 'Lindsey', 'Lindy', 'Link', 'Linn', 'Linnie',
'Linton', 'Linus', 'Linwood', 'Linzy', 'Lionel', 'Lisandro', 'Lish',
'Lisle', 'Liston', 'Little', 'Littleton', 'Llewellyn', 'Lloyd', 'Logan',
'Lon', 'London', 'Lone', 'Loney', 'Long', 'Lonie', 'Lonnie', 'Lonny',
'Lonzo', 'Lora', 'Loran', 'Loren', 'Lorenz', 'Lorenza', 'Lorenzo',
'Lorin', 'Loring', 'Lorne', 'Lott', 'Lou', 'Louie', 'Louis', 'Love',
'Lovell', 'Lovett', 'Lovie', 'Lowell', 'Loy', 'Loyal', 'Loyd', 'Luc',
'Luca', 'Lucas', 'Lucian', 'Luciano', 'Lucien', 'Lucio', 'Lucious',
'Lucius', 'Lucky', 'Ludwig', 'Lue', 'Luigi', 'Luis', 'Luka', 'Lukas',
'Luke', 'Lula', 'Lum', 'Lupe', 'Luster', 'Lute', 'Luther', 'Luverne',
'Lydell', 'Lyle', 'Lyman', 'Lyn', 'Lyndon', 'Lynn', 'Lynwood', 'Lyric',
'Mac', 'Macarthur', 'Mace', 'Maceo', 'Mack', 'Mackenzie', 'Madden',
'Maddox', 'Maddux', 'Madison', 'Mae', 'Mahlon', 'Major', 'Makai',
'Makhi', 'Mal', 'Malachi', 'Malakai', 'Malaki', 'Malcolm', 'Malcom',
'Male', 'Malik', 'Malvin', 'Mamie', 'Manford', 'Manley', 'Manly',
'Mannie', 'Manning', 'Mansfield', 'Manson', 'Manuel', 'Marc', 'Marcel',
'Marcelino', 'Marcell', 'Marcello', 'Marcellus', 'Marcelo', 'Marchello',
'Marco', 'Marcos', 'Marcus', 'Margarito', 'Mariano', 'Mario', 'Marion',
'Marius', 'Mark', 'Markel', 'Markell', 'Markus', 'Marland', 'Marley',
'Marlin', 'Marlo', 'Marlon', 'Marlyn', 'Marques', 'Marquez', 'Marquis',
'Marquise', 'Marrion', 'Marsh', 'Marshal', 'Marshall', 'Mart',
'Martell', 'Martez', 'Martin', 'Marty', 'Marvin', 'Masao', 'Mason',
'Mat', 'Mateo', 'Math', 'Mathew', 'Mathews', 'Mathias', 'Matias',
'Matt', 'Matteo', 'Matthew', 'Matthias', 'Maurice', 'Mauricio', 'Mauro',
'Maury', 'Maverick', 'Max', 'Maxie', 'Maxim', 'Maximilian',
'Maximiliano', 'Maximillian', 'Maximo', 'Maximus', 'Maxwell', 'Maxx',
'May', 'Maynard', 'Mayo', 'Mcarthur', 'Mckinley', 'Mearl', 'Mekhi',
'Mel', 'Melbourne', 'Mell', 'Melton', 'Melville', 'Melvin', 'Melvyn',
'Memphis', 'Menachem', 'Mercer', 'Merl', 'Merle', 'Merlin', 'Merlyn',
'Merrill', 'Merritt', 'Merton', 'Mervin', 'Mervyn', 'Merwin', 'Messiah',
'Metro', 'Meyer', 'Micah', 'Michael', 'Michal', 'Michale', 'Micheal',
'Michel', 'Michial', 'Mickey', 'Micky', 'Miguel', 'Miguelangel',
'Mikal', 'Mike', 'Mikeal', 'Mikel', 'Mikhail', 'Milan', 'Milas',
'Milburn', 'Miles', 'Milford', 'Millard', 'Miller', 'Mills', 'Milo',
'Milton', 'Miner', 'Minor', 'Minoru', 'Misael', 'Mitch', 'Mitchel',
'Mitchell', 'Moe', 'Mohamed', 'Mohammad', 'Mohammed', 'Moises',
'Monroe', 'Mont', 'Montana', 'Monte', 'Montel', 'Montgomery', 'Montie',
'Montrell', 'Monty', 'Moody', 'Mordechai', 'Morgan', 'Morris',
'Mortimer', 'Morton', 'Mose', 'Moses', 'Moshe', 'Muhammad', 'Murdock',
'Murl', 'Murphy', 'Murray', 'Murry', 'Mustafa', 'Mychal', 'Myer',
'Mykel', 'Myles', 'Myrl', 'Myron', 'Myrtle', 'Najee', 'Nakia', 'Namon',
'Napoleon', 'Nash', 'Nasir', 'Nat', 'Nathan', 'Nathanael', 'Nathanial',
'Nathaniel', 'Nathen', 'Neal', 'Ned', 'Needham', 'Neely', 'Nehemiah',
'Neil', 'Nello', 'Nels', 'Nelson', 'Nery', 'Nestor', 'Nevin', 'Newell',
'Newman', 'Newt', 'Newton', 'Nicholas', 'Nicholaus', 'Nick', 'Nicklaus',
'Nickolas', 'Nicky', 'Nico', 'Nicolas', 'Nigel', 'Nikhil', 'Nikko',
'Niko', 'Nikolai', 'Nikolas', 'Nile', 'Niles', 'Nils', 'Nim', 'Noah',
'Noble', 'Noe', 'Noel', 'Nolan', 'Nolen', 'Norbert', 'Norberto',
'Norman', 'Normand', 'Norris', 'North', 'Norton', 'Norval', 'Norwood',
'Nunzio', 'Oakley', 'Obe', 'Obed', 'Obie', 'Ocie', 'Octave', 'Octavio',
'Octavius', 'Oda', 'Oddie', 'Odell', 'Odie', 'Odin', 'Odis', 'Odus',
'Offie', 'Ogden', 'Okey', 'Ola', 'Olaf', 'Olan', 'Oland', 'Ole', 'Olen',
'Oley', 'Olie', 'Olin', 'Oliver', 'Ollie', 'Olof', 'Omar', 'Omari',
'Omarion', 'Omer', 'Oneal', 'Ora', 'Oral', 'Oran', 'Orange', 'Oren',
'Orie', 'Orin', 'Orion', 'Oris', 'Orla', 'Orland', 'Orlando', 'Orley',
'Orlin', 'Orlo', 'Orren', 'Orrie', 'Orrin', 'Orris', 'Orson', 'Orval',
'Orvel', 'Orvil', 'Orville', 'Orvin', 'Orvis', 'Osbaldo', 'Osborn',
'Osborne', 'Oscar', 'Osie', 'Ossie', 'Osvaldo', 'Oswald', 'Oswaldo',
'Otha', 'Othel', 'Otho', 'Otis', 'Ott', 'Ottie', 'Ottis', 'Otto', 'Ova',
'Ovid', 'Ovila', 'Owen', 'Owens', 'Ozell', 'Ozie', 'Ozzie', 'Pablo',
'Page', 'Palmer', 'Paris', 'Park', 'Parker', 'Parley', 'Parrish',
'Pascal', 'Pasquale', 'Pat', 'Pate', 'Patric', 'Patrick', 'Paul',
'Paulo', 'Paxton', 'Payton', 'Pearley', 'Pedro', 'Percival', 'Percy',
'Perley', 'Pernell', 'Perry', 'Pershing', 'Pete', 'Peter', 'Peyton',
'Phil', 'Philip', 'Phillip', 'Philo', 'Phoenix', 'Pierce', 'Pierre',
'Pinkney', 'Pleas', 'Pleasant', 'Ples', 'Plummer', 'Polk', 'Porfirio',
'Porter', 'Posey', 'Powell', 'Pranav', 'Pratt', 'Prentice', 'Prentiss',
'Presley', 'Press', 'Preston', 'Price', 'Primus', 'Prince', 'Prosper',
'Pryor', 'Purl', 'Quentin', 'Quincy', 'Quinn', 'Quint', 'Quinten',
'Quintin', 'Quinton', 'Rae', 'Raekwon', 'Rafael', 'Rafe', 'Raheem',
'Rahn', 'Rahsaan', 'Rahul', 'Raiden', 'Rakeem', 'Raleigh', 'Ralph',
'Ramiro', 'Ramon', 'Ramsey', 'Rance', 'Rand', 'Randal', 'Randall',
'Randel', 'Randell', 'Randle', 'Randolf', 'Randolph', 'Randy', 'Ransom',
'Raoul', 'Raphael', 'Raquan', 'Ras', 'Rashaad', 'Rashaan', 'Rashad',
'Rashawn', 'Rasheed', 'Raul', 'Raven', 'Ray', 'Rayan', 'Rayburn',
'Rayfield', 'Rayford', 'Raymon', 'Raymond', 'Raymundo', 'Raynard',
'Rayshawn', 'Reagan', 'Reason', 'Red', 'Redden', 'Redmond', 'Reece',
'Reed', 'Reese', 'Refugio', 'Regan', 'Reggie', 'Reginal', 'Reginald',
'Regis', 'Reid', 'Reilly', 'Reinaldo', 'Reinhold', 'Reino', 'Remington',
'Remy', 'Renaldo', 'Renard', 'Rene', 'Reno', 'Reuben', 'Reubin', 'Rex',
'Rexford', 'Rey', 'Reyes', 'Reynaldo', 'Reynold', 'Reynolds', 'Rhett',
'Rhoda', 'Rhys', 'Rian', 'Ricardo', 'Ricci', 'Rice', 'Rich', 'Richard',
'Richie', 'Richmond', 'Rick', 'Rickey', 'Ricki', 'Rickie', 'Ricky',
'Rico', 'Ridge', 'Rigoberto', 'Riley', 'Rishi', 'Ritchie', 'River',
'Rob', 'Robb', 'Robbie', 'Robbin', 'Robby', 'Robert', 'Roberto',
'Robin', 'Robley', 'Robt', 'Roby', 'Rocco', 'Rock', 'Rocky', 'Rod',
'Roddy', 'Roderic', 'Roderick', 'Rodger', 'Rodney', 'Rodolfo',
'Rodrick', 'Rodrigo', 'Roe', 'Roel', 'Rogelio', 'Roger', 'Rogers',
'Rohan', 'Roland', 'Rolando', 'Rolf', 'Roll', 'Rolla', 'Rolland',
'Rollie', 'Rollin', 'Rollo', 'Roma', 'Roman', 'Rome', 'Romello',
'Romeo', 'Romie', 'Ron', 'Ronal', 'Ronald', 'Ronaldo', 'Ronan',
'Rondal', 'Ronin', 'Ronnie', 'Ronny', 'Roosevelt', 'Rory', 'Rosario',
'Rosco', 'Roscoe', 'Rosendo', 'Rosevelt', 'Ross', 'Rossie', 'Roswell',
'Rowan', 'Rowland', 'Roy', 'Royal', 'Royce', 'Rube', 'Ruben', 'Rubin',
'Ruby', 'Rudolf', 'Rudolfo', 'Rudolph', 'Rudy', 'Rueben', 'Ruel',
'Ruffin', 'Ruffus', 'Rufus', 'Rupert', 'Rush', 'Russ', 'Russel',
'Russell', 'Rustin', 'Rusty', 'Rutherford', 'Ryan', 'Ryder', 'Ryker',
'Rylan', 'Ryland', 'Rylee', 'Ryley', 'Ryne', 'Sabastian', 'Sage',
'Saint', 'Sal', 'Salomon', 'Salvador', 'Salvatore', 'Sam', 'Samie',
'Samir', 'Sammie', 'Sammy', 'Sampson', 'Samson', 'Samual', 'Samuel',
'Sanders', 'Sandy', 'Sanford', 'Santana', 'Santiago', 'Santino',
'Santo', 'Santos', 'Saul', 'Saverio', 'Savion', 'Savon', 'Sawyer',
'Schley', 'Schuyler', 'Scot', 'Scott', 'Scottie', 'Scotty', 'Seaborn',
'Seamus', 'Sean', 'Sebastian', 'Sedrick', 'Seldon', 'Selmer', 'Semaj',
'Seneca', 'Sergio', 'Seth', 'Severo', 'Severt', 'Seward', 'Seymour',
'Shad', 'Shade', 'Shafter', 'Shamar', 'Shan', 'Shane', 'Shannon',
'Shanon', 'Shaquan', 'Shaquille', 'Sharif', 'Sharon', 'Shaun', 'Shawn',
'Shay', 'Shayne', 'Shea', 'Shedrick', 'Shelby', 'Sheldon', 'Shelley',
'Shellie', 'Shelly', 'Shelton', 'Shemar', 'Shep', 'Shepherd',
'Sheridan', 'Sherman', 'Sherrill', 'Sherwin', 'Sherwood', 'Shirley',
'Shoji', 'Shon', 'Shyheim', 'Sid', 'Sidney', 'Sie', 'Sigmund', 'Sigurd',
'Silas', 'Silver', 'Silvester', 'Silvio', 'Sim', 'Simeon', 'Simmie',
'Simon', 'Simpson', 'Sincere', 'Sing', 'Skip', 'Skylar', 'Skyler',
'Slade', 'Smith', 'Sol', 'Soloman', 'Solomon', 'Solon', 'Son', 'Sonny',
'Soren', 'Spencer', 'Spenser', 'Spurgeon', 'Squire', 'Stacey', 'Stacy',
'Stafford', 'Stan', 'Stanford', 'Stanislaus', 'Stanley', 'Stanton',
'Starling', 'Stefan', 'Stephan', 'Stephanie', 'Stephen', 'Stephon',
'Sterling', 'Stetson', 'Stevan', 'Steve', 'Steven', 'Stevie', 'Steward',
'Stewart', 'Stone', 'Stonewall', 'Stoney', 'Storm', 'Stuart',
'Sullivan', 'Sumner', 'Susie', 'Sydney', 'Syed', 'Sylas', 'Sylvan',
'Sylvanus', 'Sylvester', 'Tab', 'Tad', 'Taft', 'Tahj', 'Taj', 'Tal',
'Talan', 'Talen', 'Tallie', 'Talmadge', 'Talmage', 'Talon', 'Tandy',
'Tanner', 'Tarik', 'Tariq', 'Tate', 'Tatsuo', 'Taurean', 'Taurus',
'Tavares', 'Tavaris', 'Tavian', 'Tavion', 'Tavon', 'Tayler', 'Taylor',
'Tayshaun', 'Teagan', 'Ted', 'Teddie', 'Teddy', 'Tegan', 'Telly',
'Terance', 'Terell', 'Terence', 'Terrance', 'Terrell', 'Terrence',
'Terrill', 'Terry', 'Tevin', 'Tex', 'Thad', 'Thaddeus', 'Theadore',
'Thedore', 'Theo', 'Theodis', 'Theodore', 'Theophile', 'Therman',
'Theron', 'Thomas', 'Thompson', 'Thor', 'Thornton', 'Thorwald', 'Thos',
'Thurlow', 'Thurman', 'Thurston', 'Tilden', 'Tillman', 'Tilman', 'Tim',
'Timmie', 'Timmothy', 'Timmy', 'Timothy', 'Tito', 'Titus', 'Tobe',
'Tobias', 'Tobie', 'Tobin', 'Toby', 'Tod', 'Todd', 'Toivo', 'Tolbert',
'Tollie', 'Tom', 'Toma', 'Tomas', 'Tomie', 'Tommie', 'Tommy', 'Toney',
'Tony', 'Torey', 'Toriano', 'Torrance', 'Torrence', 'Torrey', 'Torry',
'Tory', 'Toshio', 'Toy', 'Trace', 'Tracey', 'Tracy', 'Trae', 'Travis',
'Travon', 'Trayvon', 'Tre', 'Tremaine', 'Tremayne', 'Trent', 'Trenten',
'Trenton', 'Trever', 'Trevin', 'Trevion', 'Trevon', 'Trevor', 'Trey',
'Treyton', 'Treyvon', 'Trinidad', 'Trinity', 'Tripp', 'Tristan',
'Tristen', 'Tristian', 'Tristin', 'Triston', 'Troy', 'True', 'Trumaine',
'Truman', 'Trystan', 'Tuan', 'Tucker', 'Turner', 'Ty', 'Tye', 'Tyler',
'Tylor', 'Tyquan', 'Tyree', 'Tyreek', 'Tyreese', 'Tyrek', 'Tyreke',
'Tyrel', 'Tyrell', 'Tyrese', 'Tyrik', 'Tyrin', 'Tyriq', 'Tyrique',
'Tyron', 'Tyrone', 'Tyrus', 'Tyshawn', 'Tyson', 'Ulises', 'Ulysses',
'Unknown', 'Unnamed', 'Urban', 'Uriah', 'Uriel', 'Urijah', 'Val',
'Valentin', 'Valentine', 'Valentino', 'Van', 'Vance', 'Vander',
'Vashon', 'Vaughn', 'Vera', 'Vere', 'Vergil', 'Verl', 'Verle', 'Verlin',
'Verlon', 'Verlyn', 'Vern', 'Verna', 'Vernal', 'Verne', 'Vernell',
'Verner', 'Vernie', 'Vernon', 'Vester', 'Vic', 'Vicente', 'Vick',
'Victor', 'Victoriano', 'Vidal', 'Vince', 'Vincent', 'Vincenzo',
'Vinson', 'Vinton', 'Virge', 'Virgel', 'Virgie', 'Virgil', 'Virgle',
'Vito', 'Vollie', 'Volney', 'Von', 'Wade', 'Waino', 'Waldemar', 'Waldo',
'Walker', 'Wallace', 'Wally', 'Walt', 'Walter', 'Walton', 'Ward',
'Wardell', 'Warner', 'Warren', 'Wash', 'Washington', 'Watson', 'Watt',
'Waverly', 'Wayde', 'Wayland', 'Waylon', 'Wayman', 'Waymon', 'Wayne',
'Weaver', 'Webb', 'Webster', 'Weldon', 'Wellington', 'Wells', 'Welton',
'Wendel', 'Wendell', 'Wenzel', 'Werner', 'Wes', 'Wesley', 'Wess',
'West', 'Westin', 'Westley', 'Weston', 'Wheeler', 'Whit', 'Whitney',
'Wilber', 'Wilbert', 'Wilbur', 'Wilburn', 'Wiley', 'Wilford', 'Wilfred',
'Wilfredo', 'Wilfrid', 'Wilhelm', 'Wiliam', 'Wilkie', 'Will', 'Willaim',
'Willam', 'Willard', 'William', 'Williams', 'Willian', 'Williard',
'Willie', 'Willis', 'Willy', 'Wilmer', 'Wilson', 'Wilton', 'Windell',
'Winfield', 'Winford', 'Winfred', 'Wing', 'Winifred', 'Winnie',
'Winston', 'Winthrop', 'Winton', 'Wirt', 'Wm', 'Wong', 'Wood', 'Woodie',
'Woodroe', 'Woodrow', 'Woodson', 'Woody', 'Worley', 'Worth', 'Wright',
'Wyatt', 'Wylie', 'Wyman', 'Xander', 'Xavier', 'Xzavier', 'Yaakov',
'Yadiel', 'Yael', 'Yahir', 'Yair', 'Yancy', 'Yandel', 'Yee', 'Yehuda',
'Yoel', 'York', 'Yosef', 'Yoshio', 'Young', 'Yurem', 'Yusuf',
'Zachariah', 'Zachary', 'Zachery', 'Zack', 'Zackary', 'Zackery', 'Zaid',
'Zaiden', 'Zain', 'Zaire', 'Zakary', 'Zander', 'Zane', 'Zavier',
'Zavion', 'Zayden', 'Zayne', 'Zeb', 'Zebulon', 'Zechariah', 'Zed',
'Zeke', 'Zenas', 'Zeno', 'Zigmund', 'Zion', 'Zollie',
)
first_names = first_names_male + first_names_female
last_names = (
'Abbott', 'Abernathy', 'Abshire', 'Adams', 'Altenwerth', 'Anderson',
'Ankunding', 'Armstrong', 'Auer', 'Aufderhar', 'Bahringer', 'Bailey',
'Balistreri', 'Barrows', 'Bartell', 'Bartoletti', 'Barton', 'Bashirian',
'Batz', 'Bauch', 'Baumbach', 'Bayer', 'Beahan', 'Beatty', 'Bechtelar',
'Becker', 'Bednar', 'Beer', 'Beier', 'Berge', 'Bergnaum', 'Bergstrom',
'Bernhard', 'Bernier', 'Bins', 'Blanda', 'Blick', 'Block', 'Bode',
'Boehm', 'Bogan', 'Bogisich', 'Borer', 'Bosco', 'Botsford', 'Boyer',
'Boyle', 'Bradtke', 'Brakus', 'Braun', 'Breitenberg', 'Brekke', 'Brown',
'Bruen', 'Buckridge', 'Carroll', 'Carter', 'Cartwright', 'Casper',
'Cassin', 'Champlin', 'Christiansen', 'Cole', 'Collier', 'Collins',
'Conn', 'Connelly', 'Conroy', 'Considine', 'Corkery', 'Cormier',
'Corwin', 'Cremin', 'Crist', 'Crona', 'Cronin', 'Crooks', 'Cruickshank',
'Cummerata', 'Cummings', 'Dach', 'D\'Amore', 'Daniel', 'Dare',
'Daugherty', 'Davis', 'Deckow', 'Denesik', 'Dibbert', 'Dickens',
'Dicki', 'Dickinson', 'Dietrich', 'Donnelly', 'Dooley', 'Douglas',
'Doyle', 'DuBuque', 'Durgan', 'Ebert', 'Effertz', 'Eichmann', 'Emard',
'Emmerich', 'Erdman', 'Ernser', 'Fadel', 'Fahey', 'Farrell', 'Fay',
'Feeney', 'Feest', 'Feil', 'Ferry', 'Fisher', 'Flatley', 'Frami',
'Franecki', 'Friesen', 'Fritsch', 'Funk', 'Gaylord', 'Gerhold',
'Gerlach', 'Gibson', 'Gislason', 'Gleason', 'Gleichner', 'Glover',
'Goldner', 'Goodwin', 'Gorczany', 'Gottlieb', 'Goyette', 'Grady',
'Graham', 'Grant', 'Green', 'Greenfelder', 'Greenholt', 'Grimes',
'Gulgowski', 'Gusikowski', 'Gutkowski', 'Gutmann', 'Haag', 'Hackett',
'Hagenes', 'Hahn', 'Haley', 'Halvorson', 'Hamill', 'Hammes', 'Hand',
'Hane', 'Hansen', 'Harber', 'Harris', 'Hartmann', 'Harvey', 'Hauck',
'Hayes', 'Heaney', 'Heathcote', 'Hegmann', 'Heidenreich', 'Heller',
'Herman', 'Hermann', 'Hermiston', 'Herzog', 'Hessel', 'Hettinger',
'Hickle', 'Hilll', 'Hills', 'Hilpert', 'Hintz', 'Hirthe', 'Hodkiewicz',
'Hoeger', 'Homenick', 'Hoppe', 'Howe', 'Howell', 'Hudson', 'Huel',
'Huels', 'Hyatt', 'Jacobi', 'Jacobs', 'Jacobson', 'Jakubowski',
'Jaskolski', 'Jast', 'Jenkins', 'Jerde', 'Johns', 'Johnson', 'Johnston',
'Jones', 'Kassulke', 'Kautzer', 'Keebler', 'Keeling', 'Kemmer',
'Kerluke', 'Kertzmann', 'Kessler', 'Kiehn', 'Kihn', 'Kilback', 'King',
'Kirlin', 'Klein', 'Kling', 'Klocko', 'Koch', 'Koelpin', 'Koepp',
'Kohler', 'Konopelski', 'Koss', 'Kovacek', 'Kozey', 'Krajcik',
'Kreiger', 'Kris', 'Kshlerin', 'Kub', 'Kuhic', 'Kuhlman', 'Kuhn',
'Kulas', 'Kunde', 'Kunze', 'Kuphal', 'Kutch', 'Kuvalis', 'Labadie',
'Lakin', 'Lang', 'Langosh', 'Langworth', 'Larkin', 'Larson', 'Leannon',
'Lebsack', 'Ledner', 'Leffler', 'Legros', 'Lehner', 'Lemke', 'Lesch',
'Leuschke', 'Lind', 'Lindgren', 'Littel', 'Little', 'Lockman', 'Lowe',
'Lubowitz', 'Lueilwitz', 'Luettgen', 'Lynch', 'Macejkovic', 'Maggio',
'Mann', 'Mante', 'Marks', 'Marquardt', 'Marvin', 'Mayer', 'Mayert',
'McClure', 'McCullough', 'McDermott', 'McGlynn', 'McKenzie',
'McLaughlin', 'Medhurst', 'Mertz', 'Metz', 'Miller', 'Mills',
'Mitchell', 'Moen', 'Mohr', 'Monahan', 'Moore', 'Morar', 'Morissette',
'Mosciski', 'Mraz', 'Mueller', 'Muller', 'Murazik', 'Murphy', 'Murray',
'Nader', 'Nicolas', 'Nienow', 'Nikolaus', 'Nitzsche', 'Nolan',
'Oberbrunner', 'O\'Connell', 'O\'Conner', 'O\'Hara', 'O\'Keefe',
'O\'Kon', 'Okuneva', 'Olson', 'Ondricka', 'O\'Reilly', 'Orn', 'Ortiz',
'Osinski', 'Pacocha', 'Padberg', 'Pagac', 'Parisian', 'Parker',
'Paucek', 'Pfannerstill', 'Pfeffer', 'Pollich', 'Pouros', 'Powlowski',
'Predovic', 'Price', 'Prohaska', 'Prosacco', 'Purdy', 'Quigley',
'Quitzon', 'Rath', 'Ratke', 'Rau', 'Raynor', 'Reichel', 'Reichert',
'Reilly', 'Reinger', 'Rempel', 'Renner', 'Reynolds', 'Rice', 'Rippin',
'Ritchie', 'Robel', 'Roberts', 'Rodriguez', 'Rogahn', 'Rohan',
'Rolfson', 'Romaguera', 'Roob', 'Rosenbaum', 'Rowe', 'Ruecker',
'Runolfsdottir', 'Runolfsson', 'Runte', 'Russel', 'Rutherford', 'Ryan',
'Sanford', 'Satterfield', 'Sauer', 'Sawayn', 'Schaden', 'Schaefer',
'Schamberger', 'Schiller', 'Schimmel', 'Schinner', 'Schmeler',
'Schmidt', 'Schmitt', 'Schneider', 'Schoen', 'Schowalter', 'Schroeder',
'Schulist', 'Schultz', 'Schumm', 'Schuppe', 'Schuster', 'Senger',
'Shanahan', 'Shields', 'Simonis', 'Sipes', 'Skiles', 'Smith', 'Smitham',
'Spencer', 'Spinka', 'Sporer', 'Stamm', 'Stanton', 'Stark', 'Stehr',
'Steuber', 'Stiedemann', 'Stokes', 'Stoltenberg', 'Stracke', 'Streich',
'Stroman', 'Strosin', 'Swaniawski', 'Swift', 'Terry', 'Thiel',
'Thompson', 'Tillman', 'Torp', 'Torphy', 'Towne', 'Toy', 'Trantow',
'Tremblay', 'Treutel', 'Tromp', 'Turcotte', 'Turner', 'Ullrich',
'Upton', 'Vandervort', 'Veum', 'Volkman', 'Von', 'VonRueden', 'Waelchi',
'Walker', 'Walsh', 'Walter', 'Ward', 'Waters', 'Watsica', 'Weber',
'Wehner', 'Weimann', 'Weissnat', 'Welch', 'West', 'White', 'Wiegand',
'Wilderman', 'Wilkinson', 'Will', 'Williamson', 'Willms', 'Windler',
'Wintheiser', 'Wisoky', 'Wisozk', 'Witting', 'Wiza', 'Wolf', 'Wolff',
'Wuckert', 'Wunsch', 'Wyman', 'Yost', 'Yundt', 'Zboncak', 'Zemlak',
'Ziemann', 'Zieme', 'Zulauf',
)
prefixes_female = ('Mrs.', 'Ms.', 'Miss', 'Dr.')
prefixes_male = ('Mr.', 'Dr.')
suffixes_female = ('MD', 'DDS', 'PhD', 'DVM')
suffixes_male = ('Jr.', 'Sr.', 'I', 'II', 'III',
'IV', 'V', 'MD', 'DDS', 'PhD', 'DVM')
|
danhuss/faker
|
faker/providers/person/en/__init__.py
|
Python
|
mit
| 86,729
|
[
"Amber",
"Brian",
"COLUMBUS",
"CRYSTAL",
"Dalton",
"Desmond",
"MOE"
] |
e079965bbc80bb3e25cc4efc17e35660d25522f769a6c95a6398ced8b9450bcb
|
"""Calculator for General Energy-based Fragmentation"""
from sys import stdout
from ase.units import Ha
from ase.calculators.nwchem import NWChem
from numpy import zeros, append, array, transpose, savetxt, abs
#maybe later we can make let it inherit from the Calculator base class
class GEBF(object):
"""Initializes the General Energy-based Fragmentation object
Parameters:
atoms: atoms (object)
fragments: list of lists containing indices defining moleculesor generally fragments, can be optional and
molecular fragments will be generated based on the covalent radii
of the atoms in the atoms object
if a single number is provided it is used as a fudge factor for
the molecule generation
gradients: boolean determining whether gradients are to be calculated
fullgrad: boolean determining whether fully correct gradients are to be
calculated or only the ones based on molecule by molecule embedding
"""
#these are currently just the same as for NWChem except for the included bq
# default_parameters = dict(
# xc='LDA',
# smearing=None,
# charge=None,
# task='gradient',
# geometry='nocenter noautosym',
# convergence={'energy': None,
# 'density': None,
# 'gradient': None,
# 'lshift': None,
# # set lshift to 0.0 for nolevelshifting
# },
# basis='3-21G',
# basispar=None,
# ecp=None,
# so=None,
# spinorbit=False,
# odft=False,
# bq=None,
# raw='') # additional outside of dft block control string
#
# implemented_properties = ['energy'] #, forces]
def __init__(self, atoms, fragments=None, gradients=False, fullgrad=False):
self.fragments = fragments
#check if GEBF is implemented for the calculator attached to the atoms object
#also check if there is a caclulator object attached
#FU| that is probably the wrong way of doing it
self.atoms = atoms
#just for now, to make it work below
#is there an ASE function to get distance matrix?
#does that yield the same distance matrix than our method?
#self.distmat = None
self.distmat = self.atoms.get_distances_all()
if atoms.get_calculator() is None:
printf("Atoms object has no Calculator attached, cannot continue\n")
exit ( 1 )
if not any(isinstance(atoms.get_calculator(), e) for e in [NWChem]):
printf("GEBF calculator is currently only supported with NWChem\n")
exit ( 1 )
if fragments is None:
self.fudge = 1.0
self.generate_molecules(fudge=self.fudge)
elif isinstance(fragments, float):
self.fudge = fragments
self.generate_molecules(fudge=self.fudge)
else:
self.fragments = fragments
def generate_molecules(self, fudge=1.0):
"""Generate molecules based on distance comparison to covalent radii
Todo: add consistency check with database molecules
"""
self.fragments = []
def generate_fragments(self):
"""Generate fragments according to original GEBF literature.
Not yet clear how exactly but can do later
"""
#and maybe, but only maybe, we should here also use self.molecules, nah always call it fragments
self.fragments = []
#or add atoms to GEBF, i.e., GEBF.atoms
#do we really need the fragments here a second time, after the GEBF has already been initialized with fragments argument?
def run(self):
if (self.fragments is None) or (self.fragments == []):
printf("Something is wrong, no fragments defined\n")
exit ( 1 )
#get initial guess on charges
#there also exists a function atoms.set_initial_charges, but I don't know what it actually does... (need to check that, I mean ;-)
charges = zeros(len(self.atoms))
energies = zeros(len(self.fragments))
calc = self.atoms.get_calculator()
nfrg = len(self.fragments)
#unit trouble, for now we'll do everythin in atomic units, i.e., Ha
for i, frag in enumerate(self.fragments):
stdout.flush()
stdout.write('working on %i / %i\r' %(i, nfrg))
tmpat = self.atoms[frag].copy()
tmpat.set_calculator(calc)
#tmpat.get_potential_energy()
charges[frag] = tmpat.get_charges()
energies[i] = tmpat.get_potential_energy() / Ha
self.atoms.set_array('charges', charges)
savetxt('initial-charges.dat', charges)
savetxt('initial-energies.dat', energies)
#basically something like this:
converged = False
totnr = [energies.sum()]
itr = 0
while not converged:
#keep running
stdout.write('\n')
nchg, nenr = self.one_iteration()
nenr /= Ha
#here, I guess, we should use the newly acquired charges. They should be the correct thing to remove...
slfnr = self.get_mm_self_energy(self.atoms, nchg)
stdout.write('self energy of point charges: %21.10f\n' %slfnr)
totnr.append(nenr.sum() + slfnr)
totdel = totnr[-1] - totnr[itr]
stdout.write('total energy in iteration %i: %21.10f\n' %(itr, totnr[-1]))
dchg = nchg - charges
denr = nenr - energies
#perform additional checks here, whether change in all energies and in all charges is smaller than convergence criterion
energies = nenr
charges = nchg
self.atoms.set_array('charges', charges)
savetxt('charges-%i.dat' %itr, charges)
savetxt('energies-%i.dat' %itr, energies)
if abs(totdel) < 1.e-6:
converged = True
itr += 1
continue
#subtract self-energy of charges
#check here, we need to reset distmat if we are doing more than only one GEBF run, e.g., for an MD run
def one_iteration(self):
calc = self.atoms.get_calculator()
ochg = self.atoms.get_array('charges')
charges = zeros(len(self.atoms))
energies = zeros(len(self.fragments))
nfrg = len(self.fragments)
for i, frag in enumerate(self.fragments):
#print frag
bqs = [elem for k, elem in enumerate(range(len(self.atoms))) if k not in frag]
calc.parameters.bq = append(self.atoms.positions[bqs], transpose(array(ochg[bqs], ndmin=2)), axis=1)
tmpat = self.atoms[frag].copy()
tmpat.set_calculator(calc)
#print tmpat.get_calculator()
#tmpat.get_potential_energy()
charges[frag] = tmpat.get_charges()
chgtmp = ochg.copy()
chgtmp[frag] = charges[frag]
slfnr = self.get_mm_self_energy(self.atoms, chgtmp)
energies[i] = tmpat.get_potential_energy() - slfnr
stdout.flush()
stdout.write(' %i / %i done\r' %(i, nfrg))
#self.atoms.set_array('charges', charges)
return charges, energies
def get_mm_self_energy(self, atoms, charges):
"""calculate self-energy of the mm charges"""
#we could do this here also with the atoms object
numchg = len(charges)
numatm = len(atoms)
if numatm != numchg:
printf("Number of charges and atoms not the same, cannot continue\n")
exit ( 1 )
slfnr = 0.
#check here, we need to reset distmat if we are doing more than only one GEBF run, e.g., for an MD run
if self.distmat is None:
self.distmat = zeros((numchg, numchg))
for i in range(numchg):
for j in range(i+1, numchg):
self.distmat[i,j] = atoms.get_distance(i, j)
#distmat[j,i] = distmat[i,j]
for i in range(numchg):
for j in range(i+1, numchg):
rij = self.distmat[i, j]
#rij = atoms.get_distance(i, j)
#print charges[i], charges[j], rij
slfnr += ( charges[i] * charges[j] ) / rij
return slfnr
|
PHOTOX/fuase
|
ase/ase/calculators/gebf-separate-calc.py
|
Python
|
gpl-2.0
| 8,324
|
[
"ASE",
"NWChem"
] |
a6516ffa73acf8dc3658b4983380563196cfdead1fe15cd3e4c3e6f9e0ffd98d
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import easy_thumbnails.fields
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('main', '0035_auto_20141204_1708'),
]
operations = [
migrations.AlterField(
model_name='user',
name='how_found',
field=multiselectfield.db.fields.MultiSelectField(choices=[('internet', 'The Internet'), ('show', 'A presentation, brochure, flyer,... '), ('branch', 'The local branch'), ('member', 'Another member'), ('friends', 'Friends or family'), ('other', 'Other ...')], max_length=41, verbose_name='How did you hear about care4care ?'),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='photo',
field=easy_thumbnails.fields.ThumbnailerImageField(upload_to='photos/', default='photos/default_avatar.png'),
preserve_default=True,
),
migrations.AlterField(
model_name='verifieduser',
name='offered_job',
field=multiselectfield.db.fields.MultiSelectField(choices=[('1', 'Visit home'), ('2', 'Companionship'), ('3', 'Transport by car'), ('4', 'Shopping'), ('5', 'House sitting'), ('6', 'Manual jobs'), ('7', 'Gardening'), ('8', 'Pet sitting'), ('9', 'Personal care'), ('a', 'Administrative'), ('b', 'Other ...')], max_length=21, verbose_name='What jobs you want to do?', blank=True),
preserve_default=True,
),
]
|
MaximeBiset/care4care
|
main/migrations/0036_auto_20141204_1818.py
|
Python
|
agpl-3.0
| 1,581
|
[
"VisIt"
] |
c809e115024d018ab4664f37837b2cffe98284e10544a8dd513b081dea2ab4c5
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SketchRNN RNN definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def orthogonal(shape):
"""Orthogonal initilaizer."""
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v
return q.reshape(shape)
def orthogonal_initializer(scale=1.0):
"""Orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
return tf.constant(orthogonal(shape) * scale, dtype)
return _initializer
def lstm_ortho_initializer(scale=1.0):
"""LSTM orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
size_x = shape[0]
size_h = shape[1] // 4 # assumes lstm.
t = np.zeros(shape)
t[:, :size_h] = orthogonal([size_x, size_h]) * scale
t[:, size_h:size_h * 2] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 2:size_h * 3] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 3:] = orthogonal([size_x, size_h]) * scale
return tf.constant(t, dtype)
return _initializer
class LSTMCell(tf.contrib.rnn.RNNCell):
"""Vanilla LSTM cell.
Uses ortho initializer, and also recurrent dropout without memory loss
(https://arxiv.org/abs/1603.05118)
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.9):
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def state_size(self):
return 2 * self.num_units
@property
def output_size(self):
return self.num_units
def get_output(self, state):
unused_c, h = tf.split(state, 2, 1)
return h
def __call__(self, x, state, scope=None):
with tf.variable_scope(scope or type(self).__name__):
c, h = tf.split(state, 2, 1)
x_size = x.get_shape().as_list()[1]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
# Keep W_xh and W_hh separate here as well to use different init methods.
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
bias = tf.get_variable(
'bias', [4 * self.num_units],
initializer=tf.constant_initializer(0.0))
concat = tf.concat([x, h], 1)
w_full = tf.concat([w_xh, w_hh], 0)
hidden = tf.matmul(concat, w_full) + bias
i, j, f, o = tf.split(hidden, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat([new_c, new_h], 1) # fuk tuples.
def layer_norm_all(h,
batch_size,
base,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Layer Norm (faster version, but not using defun)."""
# Performs layer norm on multiple base at once (ie, i, g, j, o for lstm)
# Reshapes h in to perform layer norm in parallel
h_reshape = tf.reshape(h, [batch_size, base, num_units])
mean = tf.reduce_mean(h_reshape, [2], keep_dims=True)
var = tf.reduce_mean(tf.square(h_reshape - mean), [2], keep_dims=True)
epsilon = tf.constant(epsilon)
rstd = tf.rsqrt(var + epsilon)
h_reshape = (h_reshape - mean) * rstd
# reshape back to original
h = tf.reshape(h_reshape, [batch_size, base * num_units])
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [4 * num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [4 * num_units], initializer=tf.constant_initializer(0.0))
if use_bias:
return gamma * h + beta
return gamma * h
def layer_norm(x,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Calculate layer norm."""
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
x_shifted = x - mean
var = tf.reduce_mean(tf.square(x_shifted), axes, keep_dims=True)
inv_std = tf.rsqrt(var + epsilon)
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [num_units], initializer=tf.constant_initializer(0.0))
output = gamma * (x_shifted) * inv_std
if use_bias:
output += beta
return output
def raw_layer_norm(x, epsilon=1e-3):
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
std = tf.sqrt(
tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True) + epsilon)
output = (x - mean) / (std)
return output
def super_linear(x,
output_size,
scope=None,
reuse=False,
init_w='ortho',
weight_start=0.0,
use_bias=True,
bias_start=0.0,
input_size=None):
"""Performs linear operation. Uses ortho init defined earlier."""
shape = x.get_shape().as_list()
with tf.variable_scope(scope or 'linear'):
if reuse:
tf.get_variable_scope().reuse_variables()
w_init = None # uniform
if input_size is None:
x_size = shape[1]
else:
x_size = input_size
if init_w == 'zeros':
w_init = tf.constant_initializer(0.0)
elif init_w == 'constant':
w_init = tf.constant_initializer(weight_start)
elif init_w == 'gaussian':
w_init = tf.random_normal_initializer(stddev=weight_start)
elif init_w == 'ortho':
w_init = lstm_ortho_initializer(1.0)
w = tf.get_variable(
'super_linear_w', [x_size, output_size], tf.float32, initializer=w_init)
if use_bias:
b = tf.get_variable(
'super_linear_b', [output_size],
tf.float32,
initializer=tf.constant_initializer(bias_start))
return tf.matmul(x, w) + b
return tf.matmul(x, w)
class LayerNormLSTMCell(tf.contrib.rnn.RNNCell):
"""Layer-Norm, with Ortho Init. and Recurrent Dropout without Memory Loss.
https://arxiv.org/abs/1607.06450 - Layer Norm
https://arxiv.org/abs/1603.05118 - Recurrent Dropout without Memory Loss
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90):
"""Initialize the Layer Norm LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def input_size(self):
return self.num_units
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.num_units
def get_output(self, state):
h, unused_c = tf.split(state, 2, 1)
return h
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
h, c = tf.split(state, 2, 1)
h_size = self.num_units
x_size = x.get_shape().as_list()[1]
batch_size = x.get_shape().as_list()[0]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
concat = tf.concat([x, h], 1) # concat for speed.
w_full = tf.concat([w_xh, w_hh], 0)
concat = tf.matmul(concat, w_full) #+ bias # live life without garbage.
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
concat = layer_norm_all(concat, batch_size, 4, h_size, 'ln_all')
i, j, f, o = tf.split(concat, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(layer_norm(new_c, h_size, 'ln_c')) * tf.sigmoid(o)
return new_h, tf.concat([new_h, new_c], 1)
class HyperLSTMCell(tf.contrib.rnn.RNNCell):
"""HyperLSTM with Ortho Init, Layer Norm, Recurrent Dropout, no Memory Loss.
https://arxiv.org/abs/1609.09106
http://blog.otoro.net/2016/09/28/hyper-networks/
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90,
use_layer_norm=True,
hyper_num_units=256,
hyper_embedding_size=32,
hyper_use_recurrent_dropout=False):
"""Initialize the Layer Norm HyperLSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
use_layer_norm: boolean. (default True)
Controls whether we use LayerNorm layers in main LSTM & HyperLSTM cell.
hyper_num_units: int, number of units in HyperLSTM cell.
(default is 128, recommend experimenting with 256 for larger tasks)
hyper_embedding_size: int, size of signals emitted from HyperLSTM cell.
(default is 16, recommend trying larger values for large datasets)
hyper_use_recurrent_dropout: boolean. (default False)
Controls whether HyperLSTM cell also uses recurrent dropout.
Recommend turning this on only if hyper_num_units becomes large (>= 512)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
self.use_layer_norm = use_layer_norm
self.hyper_num_units = hyper_num_units
self.hyper_embedding_size = hyper_embedding_size
self.hyper_use_recurrent_dropout = hyper_use_recurrent_dropout
self.total_num_units = self.num_units + self.hyper_num_units
if self.use_layer_norm:
cell_fn = LayerNormLSTMCell
else:
cell_fn = LSTMCell
self.hyper_cell = cell_fn(
hyper_num_units,
use_recurrent_dropout=hyper_use_recurrent_dropout,
dropout_keep_prob=dropout_keep_prob)
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.total_num_units
def get_output(self, state):
total_h, unused_total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
return h
def hyper_norm(self, layer, scope='hyper', use_bias=True):
num_units = self.num_units
embedding_size = self.hyper_embedding_size
# recurrent batch norm init trick (https://arxiv.org/abs/1603.09025).
init_gamma = 0.10 # cooijmans' da man.
with tf.variable_scope(scope):
zw = super_linear(
self.hyper_output,
embedding_size,
init_w='constant',
weight_start=0.00,
use_bias=True,
bias_start=1.0,
scope='zw')
alpha = super_linear(
zw,
num_units,
init_w='constant',
weight_start=init_gamma / embedding_size,
use_bias=False,
scope='alpha')
result = tf.multiply(alpha, layer)
if use_bias:
zb = super_linear(
self.hyper_output,
embedding_size,
init_w='gaussian',
weight_start=0.01,
use_bias=False,
bias_start=0.0,
scope='zb')
beta = super_linear(
zb,
num_units,
init_w='constant',
weight_start=0.00,
use_bias=False,
scope='beta')
result += beta
return result
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
total_h, total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
c = total_c[:, 0:self.num_units]
self.hyper_state = tf.concat(
[total_h[:, self.num_units:], total_c[:, self.num_units:]], 1)
batch_size = x.get_shape().as_list()[0]
x_size = x.get_shape().as_list()[1]
self._input_size = x_size
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
bias = tf.get_variable(
'bias', [4 * self.num_units],
initializer=tf.constant_initializer(0.0))
# concatenate the input and hidden states for hyperlstm input
hyper_input = tf.concat([x, h], 1)
hyper_output, hyper_new_state = self.hyper_cell(hyper_input,
self.hyper_state)
self.hyper_output = hyper_output
self.hyper_state = hyper_new_state
xh = tf.matmul(x, w_xh)
hh = tf.matmul(h, w_hh)
# split Wxh contributions
ix, jx, fx, ox = tf.split(xh, 4, 1)
ix = self.hyper_norm(ix, 'hyper_ix', use_bias=False)
jx = self.hyper_norm(jx, 'hyper_jx', use_bias=False)
fx = self.hyper_norm(fx, 'hyper_fx', use_bias=False)
ox = self.hyper_norm(ox, 'hyper_ox', use_bias=False)
# split Whh contributions
ih, jh, fh, oh = tf.split(hh, 4, 1)
ih = self.hyper_norm(ih, 'hyper_ih', use_bias=True)
jh = self.hyper_norm(jh, 'hyper_jh', use_bias=True)
fh = self.hyper_norm(fh, 'hyper_fh', use_bias=True)
oh = self.hyper_norm(oh, 'hyper_oh', use_bias=True)
# split bias
ib, jb, fb, ob = tf.split(bias, 4, 0) # bias is to be broadcasted.
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i = ix + ih + ib
j = jx + jh + jb
f = fx + fh + fb
o = ox + oh + ob
if self.use_layer_norm:
concat = tf.concat([i, j, f, o], 1)
concat = layer_norm_all(concat, batch_size, 4, self.num_units, 'ln_all')
i, j, f, o = tf.split(concat, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(layer_norm(new_c, self.num_units, 'ln_c')) * tf.sigmoid(o)
hyper_h, hyper_c = tf.split(hyper_new_state, 2, 1)
new_total_h = tf.concat([new_h, hyper_h], 1)
new_total_c = tf.concat([new_c, hyper_c], 1)
new_total_state = tf.concat([new_total_h, new_total_c], 1)
return new_h, new_total_state
|
jesseengel/magenta
|
magenta/models/sketch_rnn/rnn.py
|
Python
|
apache-2.0
| 16,509
|
[
"Gaussian"
] |
175a0b305042af5e77e52fa7047caf0f23a21a4014b43e63b0faafe7caf09f4a
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
from espressomd.interactions import HarmonicBond
from espressomd.interactions import FeneBond
from espressomd.observables import StressTensor
from tests_common import fene_force, fene_potential, fene_force2
import numpy as np
# allowed deviation from analytical results
tol = 1.0e-13
# analytical result for convective stress
def stress_kinetic(vel, box_l):
return np.einsum('ij,ik->jk', vel, vel) / np.prod(system.box_l)
# analytical result for stress originating from bond force
def stress_bonded(pos, box_l):
stress = np.zeros([3, 3])
for p1, p2 in zip(pos[0::2], pos[1::2]):
r = p1 - p2
f = -1.0e4 * r
stress += np.einsum('i,j', f, r) / np.prod(system.box_l)
return stress
# analytical result for stress originating from non-bonded force
def stress_nonbonded(particle_pairs, box_l):
stress = np.zeros([3, 3])
for p1, p2 in particle_pairs:
if (p1.type == 0 and p2.type == 0) or (p1.type == 1 and p2.type == 2):
d = p1.pos - p2.pos
r = np.sqrt(np.sum(d**2))
r_hat = d / r
f = (24.0 * 1.0 * (2.0 * 1.0**12 / r**13 - 1.0**6 / r**7)) * r_hat
stress += np.einsum('i,j', f, d) / np.prod(system.box_l)
return stress
def stress_nonbonded_inter(particle_pairs, box_l):
stress = np.zeros([3, 3])
for p1, p2 in particle_pairs:
if p1.type == 1 and p2.type == 2 and p1.mol_id != p2.mol_id:
r = p1.pos - p2.pos
d = np.sqrt(np.sum(r**2))
r_hat = r / d
f = (24.0 * 1.0 * (2.0 * 1.0**12 / d**13 - 1.0**6 / d**7)) * r_hat
stress += np.einsum('i,j', f, r) / np.prod(system.box_l)
return stress
def stress_nonbonded_intra(particle_pairs, box_l):
stress = np.zeros([3, 3])
for p1, p2 in particle_pairs:
if p1.type == 0 and p2.type == 0 and p1.mol_id == p2.mol_id:
r = p1.pos - p2.pos
d = np.sqrt(np.sum(r**2))
r_hat = r / d
f = (24.0 * 1.0 * (2.0 * 1.0**12 / d**13 - 1.0**6 / d**7)) * r_hat
stress += np.einsum('i,j', f, r) / np.prod(system.box_l)
return stress
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
@utx.skipIfMissingFeatures(['LENNARD_JONES'])
class Stress(ut.TestCase):
def test(self):
# system parameters
box_l = 10.0
system.box_l = [box_l, box_l, box_l]
skin = 0.4
time_step = 0.01
system.time_step = time_step
# thermostat and cell system
system.thermostat.set_langevin(kT=0.0, gamma=1.0, seed=41)
system.cell_system.skin = skin
system.periodicity = [1, 1, 1]
# particles and bond
system.part.add(id=0, pos=[9.9, 9.75, 9.9], type=0, mol_id=0)
system.part.add(id=1, pos=[9.9, 10.25, 9.9], type=0, mol_id=0)
system.part.add(id=2, pos=[0.1, 9.7, 0.1], type=1, mol_id=1)
system.part.add(id=3, pos=[0.1, 10.3, 0.1], type=2, mol_id=2)
harmonic = HarmonicBond(k=1e4, r_0=0)
system.bonded_inter.add(harmonic)
system.part[0].add_bond((harmonic, 1))
system.part[2].add_bond((harmonic, 3))
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.non_bonded_inter[1, 2].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(steps=0)
system.part[0].v = [10.0, 20.0, 30.0]
system.part[1].v = [-15, -25, -35]
system.part[2].v = [27.0, 23.0, 17.0]
system.part[3].v = [13.0, 11.0, 19.0]
pos = system.part[:].pos
vel = system.part[:].v
sim_stress_kinetic = system.analysis.stress_tensor()['kinetic']
sim_stress_bonded = system.analysis.stress_tensor()['bonded']
sim_stress_bonded_harmonic = system.analysis.stress_tensor()[
'bonded', len(system.bonded_inter) - 1]
sim_stress_nonbonded = system.analysis.stress_tensor()['non_bonded']
sim_stress_nonbonded_inter = system.analysis.stress_tensor()[
'non_bonded_inter']
sim_stress_nonbonded_inter12 = system.analysis.stress_tensor()[
'non_bonded_inter', 1, 2]
sim_stress_nonbonded_intra = system.analysis.stress_tensor()[
'non_bonded_intra']
sim_stress_nonbonded_intra00 = system.analysis.stress_tensor()[
'non_bonded_intra', 0, 0]
sim_stress_total = system.analysis.stress_tensor()['total']
sim_pressure_kinetic = system.analysis.pressure()['kinetic']
sim_pressure_bonded = system.analysis.pressure()['bonded']
sim_pressure_bonded_harmonic = system.analysis.pressure()[
'bonded', len(system.bonded_inter) - 1]
sim_pressure_nonbonded = system.analysis.pressure()['non_bonded']
sim_pressure_nonbonded_inter = system.analysis.pressure()[
'non_bonded_inter']
sim_pressure_nonbonded_inter12 = system.analysis.pressure()[
'non_bonded_inter', 1, 2]
sim_pressure_nonbonded_intra = system.analysis.pressure()[
'non_bonded_intra']
sim_pressure_nonbonded_intra00 = system.analysis.pressure()[
'non_bonded_intra', 0, 0]
sim_pressure_total = system.analysis.pressure()['total']
anal_stress_kinetic = stress_kinetic(vel, box_l)
anal_stress_bonded = stress_bonded(pos, box_l)
anal_stress_nonbonded = stress_nonbonded(system.part.pairs(), box_l)
anal_stress_nonbonded_inter = stress_nonbonded_inter(
system.part.pairs(), box_l)
anal_stress_nonbonded_intra = stress_nonbonded_intra(
system.part.pairs(), box_l)
anal_stress_total = anal_stress_kinetic + \
anal_stress_bonded + anal_stress_nonbonded
anal_pressure_kinetic = np.einsum('ii', anal_stress_kinetic) / 3.0
anal_pressure_bonded = np.einsum('ii', anal_stress_bonded) / 3.0
anal_pressure_nonbonded = np.einsum('ii', anal_stress_nonbonded) / 3.0
anal_pressure_nonbonded_inter = np.einsum(
'ii', anal_stress_nonbonded_inter) / 3.0
anal_pressure_nonbonded_intra = np.einsum(
'ii', anal_stress_nonbonded_intra) / 3.0
anal_pressure_total = anal_pressure_kinetic + \
anal_pressure_bonded + anal_pressure_nonbonded
system.part.clear()
self.assertTrue(np.max(np.abs(sim_stress_kinetic - anal_stress_kinetic))
< tol, 'kinetic stress does not match analytical result')
self.assertTrue(np.max(np.abs(sim_stress_bonded - anal_stress_bonded))
< tol, 'bonded stress does not match analytical result')
self.assertTrue(np.max(np.abs(sim_stress_bonded_harmonic - anal_stress_bonded))
< tol, 'bonded stress harmonic bond does not match analytical result')
self.assertTrue(np.max(np.abs(sim_stress_nonbonded - anal_stress_nonbonded))
< tol, 'non-bonded stress does not match analytical result')
self.assertTrue(np.max(np.abs(sim_stress_nonbonded_inter - anal_stress_nonbonded_inter))
< tol, 'non-bonded intermolecular stress does not match analytical result')
self.assertTrue(np.max(np.abs(sim_stress_nonbonded_inter12 - anal_stress_nonbonded_inter)) <
tol, 'non-bonded intermolecular stress molecules 1 and 2 does not match analytical result')
self.assertTrue(np.max(np.abs(sim_stress_nonbonded_intra - anal_stress_nonbonded_intra))
< tol, 'non-bonded intramolecular stress does not match analytical result')
self.assertTrue(np.max(np.abs(sim_stress_nonbonded_intra00 - anal_stress_nonbonded_intra))
< tol, 'non-bonded intramolecular stress molecule 0 does not match analytical result')
self.assertTrue(np.max(np.abs(sim_stress_total - anal_stress_total))
< tol, 'total stress does not match analytical result')
self.assertTrue(np.max(np.abs(sim_stress_total - sim_stress_kinetic - sim_stress_bonded - sim_stress_nonbonded))
< tol, 'total stress is not given as the sum of all major stress components')
self.assertTrue(np.abs(sim_pressure_kinetic - anal_pressure_kinetic)
< tol, 'kinetic pressure does not match analytical result')
self.assertTrue(np.abs(sim_pressure_bonded - anal_pressure_bonded)
< tol, 'bonded pressure does not match analytical result')
self.assertTrue(np.abs(sim_pressure_bonded_harmonic - anal_pressure_bonded)
< tol, 'bonded pressure harmonic bond does not match analytical result')
self.assertTrue(np.abs(sim_pressure_nonbonded - anal_pressure_nonbonded)
< tol, 'non-bonded pressure does not match analytical result')
self.assertTrue(np.abs(sim_pressure_nonbonded_inter - anal_pressure_nonbonded_inter)
< tol, 'non-bonded intermolecular pressure does not match analytical result')
self.assertTrue(
np.abs(sim_pressure_nonbonded_inter12 -
anal_pressure_nonbonded_inter) < tol,
'non-bonded intermolecular pressure molecule 1 and 2 does not match analytical result')
self.assertTrue(np.abs(sim_pressure_nonbonded_intra - anal_pressure_nonbonded_intra)
< tol, 'non-bonded intramolecular pressure does not match analytical result')
self.assertTrue(np.abs(sim_pressure_nonbonded_intra00 - anal_pressure_nonbonded_intra) <
tol, 'non-bonded intramolecular pressure molecule 0 does not match analytical result')
self.assertTrue(np.abs(sim_pressure_total - anal_pressure_total)
< tol, 'total pressure does not match analytical result')
self.assertTrue(np.max(np.abs(sim_pressure_total - sim_pressure_kinetic - sim_pressure_bonded - sim_pressure_nonbonded))
< tol, 'total pressure is not given as the sum of all major pressure components')
# Compare stress tensor observable to stress tensor from analysis
np.testing.assert_allclose(
StressTensor().calculate(),
system.analysis.stress_tensor()["total"].reshape(9),
atol=1E-10)
@utx.skipIfMissingFeatures(['EXTERNAL_FORCES'])
class StressFENE(ut.TestCase):
def get_anal_stress_fene(self, pos_1, pos_2, k, d_r_max, r_0):
stress = np.zeros([3, 3])
vec_r = pos_1 - pos_2
f = -fene_force2(vec_r, k, d_r_max, r_0)
stress += np.einsum('i,j', f, vec_r) / np.prod(system.box_l)
return stress
def test_fene(self):
# system parameters
box_l = 10.0
system.box_l = [box_l, box_l, box_l]
skin = 0.4
time_step = 0.01
system.time_step = time_step
# thermostat and cell system
system.cell_system.skin = skin
system.periodicity = [1, 1, 1]
# particles and bond
system.part.add(
id=0, pos=[9.9, 9.75, 9.9], type=0, mol_id=0, fix=[1, 1, 1])
system.part.add(
id=1, pos=[9.9, 10.25, 9.9], type=0, mol_id=0, fix=[1, 1, 1])
k = 1e4
d_r_max = 1.5
r_0 = 0.1
fene = FeneBond(k=k, d_r_max=d_r_max, r_0=r_0)
system.bonded_inter.add(fene)
system.part[0].add_bond((fene, 1))
system.integrator.run(steps=0)
sim_stress_bonded = system.analysis.stress_tensor()['bonded']
sim_stress_fene = system.analysis.stress_tensor()[
'bonded', len(system.bonded_inter) - 1]
total_bonded_stresses = np.zeros([3, 3])
for i in range(len(system.bonded_inter)):
total_bonded_stresses = np.add(
total_bonded_stresses, system.analysis.stress_tensor()['bonded', i])
anal_stress_fene = self.get_anal_stress_fene(
system.part[0].pos, system.part[1].pos, k, d_r_max, r_0)
self.assertTrue(np.max(np.abs(sim_stress_bonded - anal_stress_fene))
< tol, 'bonded stress does not match analytical result')
self.assertTrue(np.max(np.abs(sim_stress_fene - anal_stress_fene))
< tol, 'bonded stress for fene does not match analytical result')
self.assertTrue(np.max(np.abs(sim_stress_bonded - total_bonded_stresses))
< tol, 'bonded stresses do not sum up to the total value')
sim_pressure_fene = system.analysis.pressure()[
'bonded', len(system.bonded_inter) - 1]
anal_pressure_fene = np.einsum("ii", anal_stress_fene) / 3.0
self.assertTrue(np.max(np.abs(sim_pressure_fene - anal_pressure_fene))
< tol, 'bonded pressure for fene does not match analytical result')
# Compare stress tensor observable to stress tensor from analysis
np.testing.assert_allclose(
StressTensor().calculate(),
system.analysis.stress_tensor()["total"].reshape(9),
atol=1E-10)
system.part.clear()
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/python/stress.py
|
Python
|
gpl-3.0
| 14,010
|
[
"ESPResSo"
] |
427cc4afe60320ed508ccd75dccb3e97810a05b17fee98c4328627664189ff41
|
"""
A simple BioPython converter to move from phylip to clustal formats for the alignments
"""
import os
import sys
import argparse
from Bio import SeqIO
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Convert an alignment file from phylip format to clustal format")
parser.add_argument('-i', help='Alignment input file', required=True)
parser.add_argument('-o', help='Output file name (optional: default = input file with clustal appended)')
args = parser.parse_args()
outfile = args.i + ".clustal"
if args.o:
outfile = args.o
records=SeqIO.parse(args.i, 'phylip')
with open(outfile, 'w') as out:
SeqIO.write(records, out, 'clustal')
|
linsalrob/EdwardsLab
|
crAssphage/phylip2clustal.py
|
Python
|
mit
| 713
|
[
"Biopython"
] |
eccf35c49793f139455d1b2403344c3a8fd81bf143c1af59265f1f1cd1ce501a
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*************************************
espressopp.interaction.HarmonicUnique
*************************************
.. math::
U = K (d - d_{cur})^2;
.. function:: espressopp.interaction.HarmonicUnique(K)
:param K: (default: 1.0)
:type K: real
.. function:: espressopp.interaction.FixedPairDistListHarmonicUnique(system, fpl, potential)
:param system:
:param fpl:
:param potential:
:type system:
:type fpl:
:type potential:
.. function:: espressopp.interaction.FixedPairDistListHarmonicUnique.getFixedPairList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedPairDistListHarmonicUnique.setFixedPairList(fixedpairlist)
:param fixedpairlist:
:type fixedpairlist:
.. function:: espressopp.interaction.FixedPairDistListHarmonicUnique.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.PotentialUniqueDist import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_HarmonicUnique, \
interaction_FixedPairDistListHarmonicUnique
class HarmonicUniqueLocal(PotentialUniqueDistLocal, interaction_HarmonicUnique):
def __init__(self, K=1.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_HarmonicUnique, K)
class FixedPairDistListHarmonicUniqueLocal(InteractionLocal, interaction_FixedPairDistListHarmonicUnique):
def __init__(self, system, fpl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairDistListHarmonicUnique, system, fpl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def setFixedPairList(self, fixedpairlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fixedpairlist)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
if pmi.isController:
class HarmonicUnique(PotentialUniqueDist):
'The HarmonicUnique potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.HarmonicUniqueLocal',
pmiproperty = ['K']
)
class FixedPairDistListHarmonicUnique(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairDistListHarmonicUniqueLocal',
pmicall = ['setPotential','setFixedPairList','getFixedPairList']
)
|
kkreis/espressopp
|
src/interaction/HarmonicUnique.py
|
Python
|
gpl-3.0
| 3,865
|
[
"ESPResSo"
] |
c01d34318bda13bd8dba864478129d475451acb394c1e4486f62fdc7efca51d4
|
#!/usr/bin/env python
import re,urllib2
class Get_public_ip:
def getip(self):
try:
myip = self.visit("http://ipv4.icanhazip.com/")
except:
try:
myip = self.visit("http://www.whereismyip.com/")
except:
myip = "So sorry!!!"
return myip
def visit(self,url):
opener = urllib2.urlopen(url)
if url == opener.geturl():
str = opener.read()
return re.search('\d+\.\d+\.\d+\.\d+',str).group(0)
if __name__ == "__main__":
getmyip = Get_public_ip()
print getmyip.getip()
|
youprofit/lnmp
|
include/get_public_ipaddr.py
|
Python
|
apache-2.0
| 604
|
[
"VisIt"
] |
7a7b1e2d513014662a497b2b1bbb36c2b9d4a2b350b7613b297db190536cf2a4
|
# Authors: Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
# Many of the computations in this code were derived from Matti Hämäläinen's
# C code.
from copy import deepcopy
from functools import partial
from gzip import GzipFile
import os
import os.path as op
import numpy as np
from .io.constants import FIFF
from .io.meas_info import create_info, Info, read_fiducials
from .io.tree import dir_tree_find
from .io.tag import find_tag, read_tag
from .io.open import fiff_open
from .io.write import (start_block, end_block, write_int,
write_float_sparse_rcs, write_string,
write_float_matrix, write_int_matrix,
write_coord_trans, start_file, end_file, write_id)
from .io.pick import channel_type, _picks_to_idx
from .bem import read_bem_surfaces
from .fixes import _get_img_fdata
from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
_tessellate_sphere_surf, _get_surf_neighbors,
_normalize_vectors, _triangle_neighbors, mesh_dist,
complete_surface_info, _compute_nearest, fast_cross_3d,
_CheckInside)
from .utils import (get_subjects_dir, check_fname, logger, verbose, fill_doc,
_ensure_int, check_version, _get_call_line, warn,
_check_fname, _check_path_like, has_nibabel, _check_sphere,
_validate_type, _check_option, _is_numeric, _pl, _suggest,
object_size, sizeof_fmt)
from .parallel import parallel_func, check_n_jobs
from .transforms import (invert_transform, apply_trans, _print_coord_trans,
combine_transforms, _get_trans,
_coord_frame_name, Transform, _str_to_frame,
_ensure_trans, read_ras_mni_t)
def read_freesurfer_lut(fname=None):
"""Read a Freesurfer-formatted LUT.
Parameters
----------
fname : str | None
The filename. Can be None to read the standard Freesurfer LUT.
Returns
-------
atlas_ids : dict
Mapping from label names to IDs.
colors : dict
Mapping from label names to colors.
"""
lut = _get_lut(fname)
names, ids = lut['name'], lut['id']
colors = np.array([lut['R'], lut['G'], lut['B'], lut['A']], float).T
atlas_ids = dict(zip(names, ids))
colors = dict(zip(names, colors))
return atlas_ids, colors
def _get_lut(fname=None):
"""Get a FreeSurfer LUT."""
_validate_type(fname, ('path-like', None), 'fname')
if fname is None:
fname = op.join(op.dirname(__file__), 'data', 'FreeSurferColorLUT.txt')
_check_fname(fname, 'read', must_exist=True)
dtype = [('id', '<i8'), ('name', 'U'),
('R', '<i8'), ('G', '<i8'), ('B', '<i8'), ('A', '<i8')]
lut = {d[0]: list() for d in dtype}
with open(fname, 'r') as fid:
for line in fid:
line = line.strip()
if line.startswith('#') or not line:
continue
line = line.split()
if len(line) != len(dtype):
raise RuntimeError(f'LUT is improperly formatted: {fname}')
for d, part in zip(dtype, line):
lut[d[0]].append(part)
lut = {d[0]: np.array(lut[d[0]], dtype=d[1]) for d in dtype}
assert len(lut['name']) > 0
return lut
def _get_lut_id(lut, label):
"""Convert a label to a LUT ID number."""
assert isinstance(label, str)
mask = (lut['name'] == label)
assert mask.sum() == 1
return lut['id'][mask]
_src_kind_dict = {
'vol': 'volume',
'surf': 'surface',
'discrete': 'discrete',
}
class SourceSpaces(list):
"""Represent a list of source space.
Currently implemented as a list of dictionaries containing the source
space information
Parameters
----------
source_spaces : list
A list of dictionaries containing the source space information.
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
Attributes
----------
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
"""
def __init__(self, source_spaces, info=None): # noqa: D102
# First check the types is actually a valid config
_validate_type(source_spaces, list, 'source_spaces')
super(SourceSpaces, self).__init__(source_spaces) # list
self.kind # will raise an error if there is a problem
if info is None:
self.info = dict()
else:
self.info = dict(info)
@property
def kind(self):
types = list()
for si, s in enumerate(self):
_validate_type(s, dict, 'source_spaces[%d]' % (si,))
types.append(s.get('type', None))
_check_option('source_spaces[%d]["type"]' % (si,),
types[-1], ('surf', 'discrete', 'vol'))
if all(k == 'surf' for k in types[:2]):
surf_check = 2
if len(types) == 2:
kind = 'surface'
else:
kind = 'mixed'
else:
surf_check = 0
if all(k == 'discrete' for k in types):
kind = 'discrete'
else:
kind = 'volume'
if any(k == 'surf' for k in types[surf_check:]):
raise RuntimeError('Invalid source space with kinds %s' % (types,))
return kind
@verbose
def plot(self, head=False, brain=None, skull=None, subjects_dir=None,
trans=None, verbose=None):
"""Plot the source space.
Parameters
----------
head : bool
If True, show head surface.
brain : bool | str
If True, show the brain surfaces. Can also be a str for
surface type (e.g., 'pial', same as True). Default is None,
which means 'white' for surface source spaces and False otherwise.
skull : bool | str | list of str | list of dict | None
Whether to plot skull surface. If string, common choices would be
'inner_skull', or 'outer_skull'. Can also be a list to plot
multiple skull surfaces. If a list of dicts, each dict must
contain the complete surface info (such as you get from
:func:`mne.make_bem_model`). True is an alias of 'outer_skull'.
The subjects bem and bem/flash folders are searched for the 'surf'
files. Defaults to None, which is False for surface source spaces,
and True otherwise.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
trans : str | 'auto' | dict | None
The full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration. If trans is None, an identity
matrix is assumed. This is only needed when the source space is in
head coordinates.
%(verbose_meth)s
Returns
-------
fig : instance of mayavi.mlab.Figure
The figure.
"""
from .viz import plot_alignment
surfaces = list()
bem = None
if brain is None:
brain = 'white' if any(ss['type'] == 'surf'
for ss in self) else False
if isinstance(brain, str):
surfaces.append(brain)
elif brain:
surfaces.append('brain')
if skull is None:
skull = False if self.kind == 'surface' else True
if isinstance(skull, str):
surfaces.append(skull)
elif skull is True:
surfaces.append('outer_skull')
elif skull is not False: # list
if isinstance(skull[0], dict): # bem
skull_map = {FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner_skull',
FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer_skull',
FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer_skin'}
for this_skull in skull:
surfaces.append(skull_map[this_skull['id']])
bem = skull
else: # list of str
for surf in skull:
surfaces.append(surf)
if head:
surfaces.append('head')
if self[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
coord_frame = 'head'
if trans is None:
raise ValueError('Source space is in head coordinates, but no '
'head<->MRI transform was given. Please '
'specify the full path to the appropriate '
'*-trans.fif file as the "trans" parameter.')
else:
coord_frame = 'mri'
info = create_info(0, 1000., 'eeg')
return plot_alignment(
info, trans=trans, subject=self._subject,
subjects_dir=subjects_dir, surfaces=surfaces,
coord_frame=coord_frame, meg=(), eeg=False, dig=False, ecog=False,
bem=bem, src=self
)
def __getitem__(self, *args, **kwargs):
"""Get an item."""
out = super().__getitem__(*args, **kwargs)
if isinstance(out, list):
out = SourceSpaces(out)
return out
def __repr__(self): # noqa: D105
ss_repr = []
extra = []
for si, ss in enumerate(self):
ss_type = ss['type']
r = _src_kind_dict[ss_type]
if ss_type == 'vol':
if 'seg_name' in ss:
r += " (%s)" % (ss['seg_name'],)
else:
r += ", shape=%s" % (ss['shape'],)
elif ss_type == 'surf':
r += (" (%s), n_vertices=%i" % (_get_hemi(ss)[0], ss['np']))
r += ', n_used=%i' % (ss['nuse'],)
if si == 0:
extra += ['%s coords'
% (_coord_frame_name(int(ss['coord_frame'])))]
ss_repr.append('<%s>' % r)
subj = self._subject
if subj is not None:
extra += ['subject %r' % (subj,)]
sz = object_size(self)
if sz is not None:
extra += [f'~{sizeof_fmt(sz)}']
return "<SourceSpaces: [%s] %s>" % (
', '.join(ss_repr), ', '.join(extra))
@property
def _subject(self):
return self[0].get('subject_his_id', None)
def __add__(self, other):
"""Combine source spaces."""
out = self.copy()
out += other
return SourceSpaces(out)
def copy(self):
"""Make a copy of the source spaces.
Returns
-------
src : instance of SourceSpaces
The copied source spaces.
"""
return deepcopy(self)
def __deepcopy__(self, memodict):
"""Make a deepcopy."""
# don't copy read-only views (saves a ton of mem for split-vol src)
info = deepcopy(self.info, memodict)
ss = list()
for s in self:
for key in ('rr', 'nn'):
if key in s:
arr = s[key]
id_ = id(arr)
if id_ not in memodict:
if not arr.flags.writeable:
memodict[id_] = arr
ss.append(deepcopy(s, memodict))
return SourceSpaces(ss, info)
@verbose
def save(self, fname, overwrite=False, *, verbose=None):
"""Save the source spaces to a fif file.
Parameters
----------
fname : str
File to write.
%(overwrite)s
%(verbose_meth)s
"""
write_source_spaces(fname, self, overwrite)
@verbose
def export_volume(self, fname, include_surfaces=True,
include_discrete=True, dest='mri', trans=None,
mri_resolution=False, use_lut=True, overwrite=False,
verbose=None):
"""Export source spaces to nifti or mgz file.
Parameters
----------
fname : str
Name of nifti or mgz file to write.
include_surfaces : bool
If True, include surface source spaces.
include_discrete : bool
If True, include discrete source spaces.
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of the
original T1 image. If 'surf' the coordinate system of the
FreeSurfer surface is used (Surface RAS).
trans : dict, str, or None
Either a transformation filename (usually made using mne_analyze)
or an info dict (usually opened using read_trans()). If string, an
ending of ``.fif`` or ``.fif.gz`` will be assumed to be in FIF
format, any other ending will be assumed to be a text file with a
4x4 transformation matrix (like the ``--trans`` MNE-C option.
Must be provided if source spaces are in head coordinates and
include_surfaces and mri_resolution are True.
mri_resolution : bool | str
If True, the image is saved in MRI resolution
(e.g. 256 x 256 x 256), and each source region (surface or
segmentation volume) filled in completely. If "sparse", only a
single voxel in the high-resolution MRI is filled in for each
source point.
.. versionchanged:: 0.21.0
Support for "sparse" was added.
use_lut : bool
If True, assigns a numeric value to each source space that
corresponds to a color on the freesurfer lookup table.
%(overwrite)s
.. versionadded:: 0.19
%(verbose_meth)s
Notes
-----
This method requires nibabel.
"""
_check_fname(fname, overwrite)
_validate_type(mri_resolution, (bool, str), 'mri_resolution')
if isinstance(mri_resolution, str):
_check_option('mri_resolution', mri_resolution, ["sparse"],
extra='when mri_resolution is a string')
else:
mri_resolution = bool(mri_resolution)
fname = str(fname)
# import nibabel or raise error
try:
import nibabel as nib
except ImportError:
raise ImportError('This function requires nibabel.')
# Check coordinate frames of each source space
coord_frames = np.array([s['coord_frame'] for s in self])
# Raise error if trans is not provided when head coordinates are used
# and mri_resolution and include_surfaces are true
if (coord_frames == FIFF.FIFFV_COORD_HEAD).all():
coords = 'head' # all sources in head coordinates
if mri_resolution and include_surfaces:
if trans is None:
raise ValueError('trans containing mri to head transform '
'must be provided if mri_resolution and '
'include_surfaces are true and surfaces '
'are in head coordinates')
elif trans is not None:
logger.info('trans is not needed and will not be used unless '
'include_surfaces and mri_resolution are True.')
elif (coord_frames == FIFF.FIFFV_COORD_MRI).all():
coords = 'mri' # all sources in mri coordinates
if trans is not None:
logger.info('trans is not needed and will not be used unless '
'sources are in head coordinates.')
# Raise error if all sources are not in the same space, or sources are
# not in mri or head coordinates
else:
raise ValueError('All sources must be in head coordinates or all '
'sources must be in mri coordinates.')
# use lookup table to assign values to source spaces
logger.info('Reading FreeSurfer lookup table')
# read the lookup table
lut = _get_lut()
# Setup a dictionary of source types
src_types = dict(volume=[], surface_discrete=[])
# Populate dictionary of source types
for src in self:
# volume sources
if src['type'] == 'vol':
src_types['volume'].append(src)
# surface and discrete sources
elif src['type'] in ('surf', 'discrete'):
src_types['surface_discrete'].append(src)
else:
raise ValueError('Unrecognized source type: %s.' % src['type'])
# Raise error if there are no volume source spaces
if len(src_types['volume']) == 0:
raise ValueError('Source spaces must contain at least one volume.')
# Get shape, inuse array and interpolation matrix from volume sources
src = src_types['volume'][0]
aseg_data = None
if mri_resolution:
# read the mri file used to generate volumes
if mri_resolution is True:
aseg_data = _get_img_fdata(nib.load(src['mri_file']))
# get the voxel space shape
shape3d = (src['mri_width'], src['mri_depth'],
src['mri_height'])
else:
# get the volume source space shape
# read the shape in reverse order
# (otherwise results are scrambled)
shape3d = src['shape']
# calculate affine transform for image (MRI_VOXEL to RAS)
if mri_resolution:
# MRI_VOXEL to MRI transform
transform = src['vox_mri_t']
else:
# MRI_VOXEL to MRI transform
# NOTE: 'src' indicates downsampled version of MRI_VOXEL
transform = src['src_mri_t']
# Figure out how to get from our input source space to output voxels
fro_dst_t = invert_transform(transform)
dest = transform['to']
if coords == 'head':
head_mri_t = _get_trans(trans, 'head', 'mri')[0]
fro_dst_t = combine_transforms(head_mri_t, fro_dst_t, 'head', dest)
else:
fro_dst_t = fro_dst_t
# Fill in the volumes
img = np.zeros(shape3d)
for ii, vs in enumerate(src_types['volume']):
# read the lookup table value for segmented volume
if 'seg_name' not in vs:
raise ValueError('Volume sources should be segments, '
'not the entire volume.')
# find the color value for this volume
use_id = 1.
if mri_resolution is True or use_lut:
id_ = _get_lut_id(lut, vs['seg_name'])
if use_lut:
use_id = id_
if mri_resolution == 'sparse':
idx = apply_trans(fro_dst_t, vs['rr'][vs['vertno']])
idx = tuple(idx.round().astype(int).T)
elif mri_resolution is True: # fill the represented vol
# get the values for this volume
idx = (aseg_data == id_)
else:
assert mri_resolution is False
idx = vs['inuse'].reshape(shape3d, order='F').astype(bool)
img[idx] = use_id
# loop through the surface and discrete source spaces
# get the surface names (assumes left, right order. may want
# to add these names during source space generation
for src in src_types['surface_discrete']:
val = 1
if src['type'] == 'surf':
if not include_surfaces:
continue
if use_lut:
surf_name = {
FIFF.FIFFV_MNE_SURF_LEFT_HEMI: 'Left',
FIFF.FIFFV_MNE_SURF_RIGHT_HEMI: 'Right',
}[src['id']] + '-Cerebral-Cortex'
val = _get_lut_id(lut, surf_name)
else:
assert src['type'] == 'discrete'
if not include_discrete:
continue
if use_lut:
logger.info('Discrete sources do not have values on '
'the lookup table. Defaulting to 1.')
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
if mri_resolution is True:
use_rr = src['rr']
else:
assert mri_resolution is False or mri_resolution == 'sparse'
use_rr = src['rr'][src['vertno']]
srf_vox = apply_trans(fro_dst_t['trans'], use_rr)
# convert to numeric indices
ix_, iy_, iz_ = srf_vox.T.round().astype(int)
# clip indices outside of volume space
ix = np.clip(ix_, 0, shape3d[0] - 1),
iy = np.clip(iy_, 0, shape3d[1] - 1)
iz = np.clip(iz_, 0, shape3d[2] - 1)
# compare original and clipped indices
n_diff = ((ix_ != ix) | (iy_ != iy) | (iz_ != iz)).sum()
# generate use warnings for clipping
if n_diff > 0:
warn(f'{n_diff} {src["type"]} vertices lay outside of volume '
f'space. Consider using a larger volume space.')
# get surface id or use default value
# update image to include surface voxels
img[ix, iy, iz] = val
if dest == 'mri':
# combine with MRI to RAS transform
transform = combine_transforms(
transform, vs['mri_ras_t'],
transform['from'], vs['mri_ras_t']['to'])
# now setup the affine for volume image
affine = transform['trans'].copy()
# make sure affine converts from m to mm
affine[:3] *= 1e3
# setup image for file
if fname.endswith(('.nii', '.nii.gz')): # save as nifit
# setup the nifti header
hdr = nib.Nifti1Header()
hdr.set_xyzt_units('mm')
# save the nifti image
img = nib.Nifti1Image(img, affine, header=hdr)
elif fname.endswith('.mgz'): # save as mgh
# convert to float32 (float64 not currently supported)
img = img.astype('float32')
# save the mgh image
img = nib.freesurfer.mghformat.MGHImage(img, affine)
else:
raise(ValueError('Unrecognized file extension'))
# write image to file
nib.save(img, fname)
def _add_patch_info(s):
"""Patch information in a source space.
Generate the patch information from the 'nearest' vector in
a source space. For vertex in the source space it provides
the list of neighboring vertices in the high resolution
triangulation.
Parameters
----------
s : dict
The source space.
"""
nearest = s['nearest']
if nearest is None:
s['pinfo'] = None
s['patch_inds'] = None
return
logger.info(' Computing patch statistics...')
indn = np.argsort(nearest)
nearest_sorted = nearest[indn]
steps = np.where(nearest_sorted[1:] != nearest_sorted[:-1])[0] + 1
starti = np.r_[[0], steps]
stopi = np.r_[steps, [len(nearest)]]
pinfo = list()
for start, stop in zip(starti, stopi):
pinfo.append(np.sort(indn[start:stop]))
s['pinfo'] = pinfo
# compute patch indices of the in-use source space vertices
patch_verts = nearest_sorted[steps - 1]
s['patch_inds'] = np.searchsorted(patch_verts, s['vertno'])
logger.info(' Patch information added...')
@verbose
def _read_source_spaces_from_tree(fid, tree, patch_stats=False, verbose=None):
"""Read the source spaces from a FIF file.
Parameters
----------
fid : file descriptor
An open file descriptor.
tree : dict
The FIF tree structure if source is a file id.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
%(verbose)s
Returns
-------
src : SourceSpaces
The source spaces.
"""
# Find all source spaces
spaces = dir_tree_find(tree, FIFF.FIFFB_MNE_SOURCE_SPACE)
if len(spaces) == 0:
raise ValueError('No source spaces found')
src = list()
for s in spaces:
logger.info(' Reading a source space...')
this = _read_one_source_space(fid, s)
logger.info(' [done]')
if patch_stats:
_complete_source_space_info(this)
src.append(this)
logger.info(' %d source spaces read' % len(spaces))
return SourceSpaces(src)
@verbose
def read_source_spaces(fname, patch_stats=False, verbose=None):
"""Read the source spaces from a FIF file.
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
%(verbose)s
Returns
-------
src : SourceSpaces
The source spaces.
See Also
--------
write_source_spaces, setup_source_space, setup_volume_source_space
"""
# be more permissive on read than write (fwd/inv can contain src)
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'_src.fif', '_src.fif.gz',
'-fwd.fif', '-fwd.fif.gz',
'_fwd.fif', '_fwd.fif.gz',
'-inv.fif', '-inv.fif.gz',
'_inv.fif', '_inv.fif.gz'))
ff, tree, _ = fiff_open(fname)
with ff as fid:
src = _read_source_spaces_from_tree(fid, tree, patch_stats=patch_stats,
verbose=verbose)
src.info['fname'] = fname
node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
if node:
node = node[0]
for p in range(node['nent']):
kind = node['directory'][p].kind
pos = node['directory'][p].pos
tag = read_tag(fid, pos)
if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
src.info['working_dir'] = tag.data
elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
src.info['command_line'] = tag.data
return src
def _read_one_source_space(fid, this):
"""Read one source space."""
res = dict()
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_ID)
if tag is None:
res['id'] = int(FIFF.FIFFV_MNE_SURF_UNKNOWN)
else:
res['id'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE)
if tag is None:
raise ValueError('Unknown source space type')
else:
src_type = int(tag.data)
if src_type == FIFF.FIFFV_MNE_SPACE_SURFACE:
res['type'] = 'surf'
elif src_type == FIFF.FIFFV_MNE_SPACE_VOLUME:
res['type'] = 'vol'
elif src_type == FIFF.FIFFV_MNE_SPACE_DISCRETE:
res['type'] = 'discrete'
else:
raise ValueError('Unknown source space type (%d)' % src_type)
if res['type'] == 'vol':
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS)
if tag is not None:
res['shape'] = tuple(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_COORD_TRANS)
if tag is not None:
res['src_mri_t'] = tag.data
parent_mri = dir_tree_find(this, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
# MNE 2.7.3 (and earlier) didn't store necessary information
# about volume coordinate translations. Although there is a
# FFIF_COORD_TRANS in the higher level of the FIFF file, this
# doesn't contain all the info we need. Safer to return an
# error unless a user really wants us to add backward compat.
raise ValueError('Can not find parent MRI location. The volume '
'source space may have been made with an MNE '
'version that is too old (<= 2.7.3). Consider '
'updating and regenerating the inverse.')
mri = parent_mri[0]
for d in mri['directory']:
if d.kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, d.pos)
trans = tag.data
if trans['from'] == FIFF.FIFFV_MNE_COORD_MRI_VOXEL:
res['vox_mri_t'] = tag.data
if trans['to'] == FIFF.FIFFV_MNE_COORD_RAS:
res['mri_ras_t'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR)
if tag is not None:
res['interpolator'] = tag.data
if tag.data.data.size == 0:
del res['interpolator']
else:
logger.info("Interpolation matrix for MRI not found.")
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE)
if tag is not None:
res['mri_file'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MRI_WIDTH)
if tag is not None:
res['mri_width'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_HEIGHT)
if tag is not None:
res['mri_height'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_DEPTH)
if tag is not None:
res['mri_depth'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MNE_FILE_NAME)
if tag is not None:
res['mri_volume_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS)
if tag is not None:
nneighbors = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS)
offset = 0
neighbors = []
for n in nneighbors:
neighbors.append(tag.data[offset:offset + n])
offset += n
res['neighbor_vert'] = neighbors
tag = find_tag(fid, this, FIFF.FIFF_COMMENT)
if tag is not None:
res['seg_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
if tag is None:
raise ValueError('Number of vertices not found')
res['np'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI)
if tag is None:
res['ntri'] = 0
else:
res['ntri'] = int(tag.data)
else:
res['ntri'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
raise ValueError('Coordinate frame information not found')
res['coord_frame'] = tag.data[0]
# Vertices, normals, and triangles
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS)
if tag is None:
raise ValueError('Vertex data not found')
res['rr'] = tag.data.astype(np.float64) # double precision for mayavi
if res['rr'].shape[0] != res['np']:
raise ValueError('Vertex information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
if tag is None:
raise ValueError('Vertex normals not found')
res['nn'] = tag.data.copy()
if res['nn'].shape[0] != res['np']:
raise ValueError('Vertex normal information is incorrect')
if res['ntri'] > 0:
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES)
if tag is None:
raise ValueError('Triangulation not found')
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
if res['tris'].shape[0] != res['ntri']:
raise ValueError('Triangulation information is incorrect')
else:
res['tris'] = None
# Which vertices are active
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE)
if tag is None:
res['nuse'] = 0
res['inuse'] = np.zeros(res['nuse'], dtype=np.int64)
res['vertno'] = None
else:
res['nuse'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION)
if tag is None:
raise ValueError('Source selection information missing')
res['inuse'] = tag.data.astype(np.int64).T
if len(res['inuse']) != res['np']:
raise ValueError('Incorrect number of entries in source space '
'selection')
res['vertno'] = np.where(res['inuse'])[0]
# Use triangulation
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES)
if tag1 is None or tag2 is None:
res['nuse_tri'] = 0
res['use_tris'] = None
else:
res['nuse_tri'] = tag1.data
res['use_tris'] = tag2.data - 1 # index start at 0 in Python
# Patch-related information
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST)
if tag1 is None or tag2 is None:
res['nearest'] = None
res['nearest_dist'] = None
else:
res['nearest'] = tag1.data
res['nearest_dist'] = tag2.data.T
_add_patch_info(res)
# Distances
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT)
if tag1 is None or tag2 is None:
res['dist'] = None
res['dist_limit'] = None
else:
res['dist'] = tag1.data
res['dist_limit'] = tag2.data
# Add the upper triangle
res['dist'] = res['dist'] + res['dist'].T
if (res['dist'] is not None):
logger.info(' Distance information added...')
tag = find_tag(fid, this, FIFF.FIFF_SUBJ_HIS_ID)
if tag is None:
res['subject_his_id'] = None
else:
res['subject_his_id'] = tag.data
return res
@verbose
def _complete_source_space_info(this, verbose=None):
"""Add more info on surface."""
# Main triangulation
logger.info(' Completing triangulation info...')
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['tri_area'] = _normalize_vectors(this['tri_nn']) / 2.0
logger.info('[done]')
# Selected triangles
logger.info(' Completing selection triangulation info...')
if this['nuse_tri'] > 0:
r1 = this['rr'][this['use_tris'][:, 0], :]
r2 = this['rr'][this['use_tris'][:, 1], :]
r3 = this['rr'][this['use_tris'][:, 2], :]
this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
this['use_tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['use_tri_area'] = np.linalg.norm(this['use_tri_nn'], axis=1) / 2.
logger.info('[done]')
def find_source_space_hemi(src):
"""Return the hemisphere id for a source space.
Parameters
----------
src : dict
The source space to investigate.
Returns
-------
hemi : int
Deduced hemisphere id.
"""
xave = src['rr'][:, 0].sum()
if xave < 0:
hemi = int(FIFF.FIFFV_MNE_SURF_LEFT_HEMI)
else:
hemi = int(FIFF.FIFFV_MNE_SURF_RIGHT_HEMI)
return hemi
def label_src_vertno_sel(label, src):
"""Find vertex numbers and indices from label.
Parameters
----------
label : Label
Source space label.
src : dict
Source space.
Returns
-------
vertices : list of length 2
Vertex numbers for lh and rh.
src_sel : array of int (len(idx) = len(vertices[0]) + len(vertices[1]))
Indices of the selected vertices in sourse space.
"""
if src[0]['type'] != 'surf':
return Exception('Labels are only supported with surface source '
'spaces')
vertno = [src[0]['vertno'], src[1]['vertno']]
if label.hemi == 'lh':
vertno_sel = np.intersect1d(vertno[0], label.vertices)
src_sel = np.searchsorted(vertno[0], vertno_sel)
vertno[0] = vertno_sel
vertno[1] = np.array([], int)
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(vertno[1], label.vertices)
src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
vertno[0] = np.array([], int)
vertno[1] = vertno_sel
elif label.hemi == 'both':
vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
src_sel = np.hstack((src_sel_lh, src_sel_rh))
vertno = [vertno_sel_lh, vertno_sel_rh]
else:
raise Exception("Unknown hemisphere type")
return vertno, src_sel
def _get_vertno(src):
return [s['vertno'] for s in src]
###############################################################################
# Write routines
@verbose
def _write_source_spaces_to_fid(fid, src, verbose=None):
"""Write the source spaces to a FIF file.
Parameters
----------
fid : file descriptor
An open file descriptor.
src : list
The list of source spaces.
%(verbose)s
"""
for s in src:
logger.info(' Write a source space...')
start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
_write_one_source_space(fid, s, verbose)
end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
logger.info(' [done]')
logger.info(' %d source spaces written' % len(src))
@verbose
def write_source_spaces(fname, src, overwrite=False, verbose=None):
"""Write source spaces to a file.
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
src : SourceSpaces
The source spaces (as returned by read_source_spaces).
%(overwrite)s
%(verbose)s
See Also
--------
read_source_spaces
"""
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'_src.fif', '_src.fif.gz'))
_check_fname(fname, overwrite=overwrite)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MNE)
if src.info:
start_block(fid, FIFF.FIFFB_MNE_ENV)
write_id(fid, FIFF.FIFF_BLOCK_ID)
data = src.info.get('working_dir', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
data = src.info.get('command_line', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
end_block(fid, FIFF.FIFFB_MNE_ENV)
_write_source_spaces_to_fid(fid, src, verbose)
end_block(fid, FIFF.FIFFB_MNE)
end_file(fid)
def _write_one_source_space(fid, this, verbose=None):
"""Write one source space."""
from scipy import sparse
if this['type'] == 'surf':
src_type = FIFF.FIFFV_MNE_SPACE_SURFACE
elif this['type'] == 'vol':
src_type = FIFF.FIFFV_MNE_SPACE_VOLUME
elif this['type'] == 'discrete':
src_type = FIFF.FIFFV_MNE_SPACE_DISCRETE
else:
raise ValueError('Unknown source space type (%s)' % this['type'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE, src_type)
if this['id'] >= 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_ID, this['id'])
data = this.get('subject_his_id', None)
if data:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, data)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, this['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, this['np'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS, this['rr'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, this['nn'])
# Which vertices are active
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION, this['inuse'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE, this['nuse'])
if this['ntri'] > 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI, this['ntri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES,
this['tris'] + 1)
if this['type'] != 'vol' and this['use_tris'] is not None:
# Use triangulation
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
this['use_tris'] + 1)
if this['type'] == 'vol':
neighbor_vert = this.get('neighbor_vert', None)
if neighbor_vert is not None:
nneighbors = np.array([len(n) for n in neighbor_vert])
neighbors = np.concatenate(neighbor_vert)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS, nneighbors)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS, neighbors)
write_coord_trans(fid, this['src_mri_t'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS, this['shape'])
start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
write_coord_trans(fid, this['mri_ras_t'])
write_coord_trans(fid, this['vox_mri_t'])
mri_volume_name = this.get('mri_volume_name', None)
if mri_volume_name is not None:
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, mri_volume_name)
mri_width, mri_height, mri_depth, nvox = _src_vol_dims(this)
interpolator = this.get('interpolator')
if interpolator is None:
interpolator = sparse.csr_matrix((nvox, this['np']))
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR,
interpolator)
if 'mri_file' in this and this['mri_file'] is not None:
write_string(fid, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE,
this['mri_file'])
write_int(fid, FIFF.FIFF_MRI_WIDTH, mri_width)
write_int(fid, FIFF.FIFF_MRI_HEIGHT, mri_height)
write_int(fid, FIFF.FIFF_MRI_DEPTH, mri_depth)
end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
# Patch-related information
if this['nearest'] is not None:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST, this['nearest'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST,
this['nearest_dist'])
# Distances
if this['dist'] is not None:
# Save only upper triangular portion of the matrix
dists = this['dist'].copy()
dists = sparse.triu(dists, format=dists.format)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST, dists)
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT,
this['dist_limit'])
# Segmentation data
if this['type'] == 'vol' and ('seg_name' in this):
# Save the name of the segment
write_string(fid, FIFF.FIFF_COMMENT, this['seg_name'])
##############################################################################
# Head to MRI volume conversion
@verbose
def head_to_mri(pos, subject, mri_head_t, subjects_dir=None,
verbose=None):
"""Convert pos from head coordinate system to MRI ones.
This function converts to MRI RAS coordinates and not to surface
RAS.
Parameters
----------
pos : array, shape (n_pos, 3)
The coordinates (in m) in head coordinate system.
%(subject)s
mri_head_t : instance of Transform
MRI<->Head coordinate transformation.
%(subjects_dir)s
%(verbose)s
Returns
-------
coordinates : array, shape (n_pos, 3)
The MRI RAS coordinates (in mm) of pos.
Notes
-----
This function requires nibabel.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
head_mri_t = _ensure_trans(mri_head_t, 'head', 'mri')
_, _, mri_ras_t, _, _ = _read_mri_info(t1_fname)
head_ras_t = combine_transforms(head_mri_t, mri_ras_t, 'head', 'ras')
return 1e3 * apply_trans(head_ras_t, pos) # mm
##############################################################################
# Surface to MNI conversion
@verbose
def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, verbose=None):
"""Convert the array of vertices for a hemisphere to MNI coordinates.
Parameters
----------
vertices : int, or list of int
Vertex number(s) to convert.
hemis : int, or list of int
Hemisphere(s) the vertices belong to.
%(subject)s
subjects_dir : str, or None
Path to SUBJECTS_DIR if it is not set in the environment.
%(verbose)s
Returns
-------
coordinates : array, shape (n_vertices, 3)
The MNI coordinates (in mm) of the vertices.
"""
singleton = False
if not isinstance(vertices, list) and not isinstance(vertices, np.ndarray):
singleton = True
vertices = [vertices]
if not isinstance(hemis, list) and not isinstance(hemis, np.ndarray):
hemis = [hemis] * len(vertices)
if not len(hemis) == len(vertices):
raise ValueError('hemi and vertices must match in length')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs = [op.join(subjects_dir, subject, 'surf', '%s.white' % h)
for h in ['lh', 'rh']]
# read surface locations in MRI space
rr = [read_surface(s)[0] for s in surfs]
# take point locations in MRI space and convert to MNI coordinates
xfm = read_talxfm(subject, subjects_dir)
xfm['trans'][:3, 3] *= 1000. # m->mm
data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)])
if singleton:
data = data[0]
return apply_trans(xfm['trans'], data)
##############################################################################
# Volume to MNI conversion
@verbose
def head_to_mni(pos, subject, mri_head_t, subjects_dir=None,
verbose=None):
"""Convert pos from head coordinate system to MNI ones.
Parameters
----------
pos : array, shape (n_pos, 3)
The coordinates (in m) in head coordinate system.
%(subject)s
mri_head_t : instance of Transform
MRI<->Head coordinate transformation.
%(subjects_dir)s
%(verbose)s
Returns
-------
coordinates : array, shape (n_pos, 3)
The MNI coordinates (in mm) of pos.
Notes
-----
This function requires either nibabel.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# before we go from head to MRI (surface RAS)
head_mni_t = combine_transforms(
_ensure_trans(mri_head_t, 'head', 'mri'),
read_talxfm(subject, subjects_dir), 'head', 'mni_tal')
return apply_trans(head_mni_t, pos) * 1000.
@verbose
def read_talxfm(subject, subjects_dir=None, verbose=None):
"""Compute MRI-to-MNI transform from FreeSurfer talairach.xfm file.
Parameters
----------
%(subject)s
%(subjects_dir)s
%(verbose)s
Returns
-------
mri_mni_t : instance of Transform
The affine transformation from MRI to MNI space for the subject.
"""
# Adapted from freesurfer m-files. Altered to deal with Norig
# and Torig correctly
subjects_dir = get_subjects_dir(subjects_dir)
# Setup the RAS to MNI transform
ras_mni_t = read_ras_mni_t(subject, subjects_dir)
ras_mni_t['trans'][:3, 3] /= 1000. # mm->m
# We want to get from Freesurfer surface RAS ('mri') to MNI ('mni_tal').
# This file only gives us RAS (non-zero origin) ('ras') to MNI ('mni_tal').
# Se we need to get the ras->mri transform from the MRI headers.
# To do this, we get Norig and Torig
# (i.e. vox_ras_t and vox_mri_t, respectively)
path = op.join(subjects_dir, subject, 'mri', 'orig.mgz')
if not op.isfile(path):
path = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(path):
raise IOError('mri not found: %s' % path)
_, _, mri_ras_t, _, _ = _read_mri_info(path)
mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, 'mri', 'mni_tal')
return mri_mni_t
def _read_mri_info(path, units='m', return_img=False):
if has_nibabel():
import nibabel
mgz = nibabel.load(path)
hdr = mgz.header
n_orig = hdr.get_vox2ras()
t_orig = hdr.get_vox2ras_tkr()
dims = hdr.get_data_shape()
zooms = hdr.get_zooms()[:3]
else:
mgz = None
hdr = _get_mgz_header(path)
n_orig = hdr['vox2ras']
t_orig = hdr['vox2ras_tkr']
dims = hdr['dims']
zooms = hdr['zooms']
# extract the MRI_VOXEL to RAS (non-zero origin) transform
vox_ras_t = Transform('mri_voxel', 'ras', n_orig)
# extract the MRI_VOXEL to MRI transform
vox_mri_t = Transform('mri_voxel', 'mri', t_orig)
# construct the MRI to RAS (non-zero origin) transform
mri_ras_t = combine_transforms(
invert_transform(vox_mri_t), vox_ras_t, 'mri', 'ras')
assert units in ('m', 'mm')
if units == 'm':
conv = np.array([[1e-3, 1e-3, 1e-3, 1]]).T
# scaling and translation terms
vox_ras_t['trans'] *= conv
vox_mri_t['trans'] *= conv
# just the translation term
mri_ras_t['trans'][:, 3:4] *= conv
out = (vox_ras_t, vox_mri_t, mri_ras_t, dims, zooms)
if return_img:
out += (mgz,)
return out
###############################################################################
# Creation and decimation
@verbose
def _check_spacing(spacing, verbose=None):
"""Check spacing parameter."""
# check to make sure our parameters are good, parse 'spacing'
types = ('a string with values "ico#", "oct#", "all", or an int >= 2')
space_err = ('"spacing" must be %s, got type %s (%r)'
% (types, type(spacing), spacing))
if isinstance(spacing, str):
if spacing == 'all':
stype = 'all'
sval = ''
elif isinstance(spacing, str) and spacing[:3] in ('ico', 'oct'):
stype = spacing[:3]
sval = spacing[3:]
try:
sval = int(sval)
except Exception:
raise ValueError('%s subdivision must be an integer, got %r'
% (stype, sval))
lim = 0 if stype == 'ico' else 1
if sval < lim:
raise ValueError('%s subdivision must be >= %s, got %s'
% (stype, lim, sval))
else:
raise ValueError(space_err)
else:
stype = 'spacing'
sval = _ensure_int(spacing, 'spacing', types)
if sval < 2:
raise ValueError('spacing must be >= 2, got %d' % (sval,))
if stype == 'all':
logger.info('Include all vertices')
ico_surf = None
src_type_str = 'all'
else:
src_type_str = '%s = %s' % (stype, sval)
if stype == 'ico':
logger.info('Icosahedron subdivision grade %s' % sval)
ico_surf = _get_ico_surface(sval)
elif stype == 'oct':
logger.info('Octahedron subdivision grade %s' % sval)
ico_surf = _tessellate_sphere_surf(sval)
else:
assert stype == 'spacing'
logger.info('Approximate spacing %s mm' % sval)
ico_surf = sval
return stype, sval, ico_surf, src_type_str
@verbose
def setup_source_space(subject, spacing='oct6', surface='white',
subjects_dir=None, add_dist=True, n_jobs=1,
verbose=None):
"""Set up bilateral hemisphere surface-based source space with subsampling.
Parameters
----------
%(subject)s
spacing : str
The spacing to use. Can be ``'ico#'`` for a recursively subdivided
icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
``'all'`` for all points, or an integer to use approximate
distance-based spacing (in mm).
.. versionchanged:: 0.18
Support for integers for distance-based spacing.
surface : str
The surface to use.
%(subjects_dir)s
add_dist : bool | str
Add distance and patch information to the source space. This takes some
time so precomputing it is recommended. Can also be 'patch' to only
compute patch information (requires SciPy 1.3+).
.. versionchanged:: 0.20
Support for add_dist='patch'.
%(n_jobs)s
Ignored if ``add_dist=='patch'``.
%(verbose)s
Returns
-------
src : SourceSpaces
The source space for each hemisphere.
See Also
--------
setup_volume_source_space
"""
cmd = ('setup_source_space(%s, spacing=%s, surface=%s, '
'subjects_dir=%s, add_dist=%s, verbose=%s)'
% (subject, spacing, surface, subjects_dir, add_dist, verbose))
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs = [op.join(subjects_dir, subject, 'surf', hemi + surface)
for hemi in ['lh.', 'rh.']]
for surf, hemi in zip(surfs, ['LH', 'RH']):
if surf is not None and not op.isfile(surf):
raise IOError('Could not find the %s surface %s'
% (hemi, surf))
logger.info('Setting up the source space with the following parameters:\n')
logger.info('SUBJECTS_DIR = %s' % subjects_dir)
logger.info('Subject = %s' % subject)
logger.info('Surface = %s' % surface)
stype, sval, ico_surf, src_type_str = _check_spacing(spacing)
logger.info('')
del spacing
logger.info('>>> 1. Creating the source space...\n')
# mne_make_source_space ... actually make the source spaces
src = []
# pre-load ico/oct surf (once) for speed, if necessary
if stype not in ('spacing', 'all'):
logger.info('Doing the %shedral vertex picking...'
% (dict(ico='icosa', oct='octa')[stype],))
for hemi, surf in zip(['lh', 'rh'], surfs):
logger.info('Loading %s...' % surf)
# Setup the surface spacing in the MRI coord frame
if stype != 'all':
logger.info('Mapping %s %s -> %s (%d) ...'
% (hemi, subject, stype, sval))
s = _create_surf_spacing(surf, hemi, subject, stype, ico_surf,
subjects_dir)
logger.info('loaded %s %d/%d selected to source space (%s)'
% (op.split(surf)[1], s['nuse'], s['np'], src_type_str))
src.append(s)
logger.info('') # newline after both subject types are run
# Fill in source space info
hemi_ids = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for s, s_id in zip(src, hemi_ids):
# Add missing fields
s.update(dict(dist=None, dist_limit=None, nearest=None, type='surf',
nearest_dist=None, pinfo=None, patch_inds=None, id=s_id,
coord_frame=FIFF.FIFFV_COORD_MRI))
s['rr'] /= 1000.0
del s['tri_area']
del s['tri_cent']
del s['tri_nn']
del s['neighbor_tri']
# upconvert to object format from lists
src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
if add_dist:
dist_limit = 0. if add_dist == 'patch' else np.inf
add_source_space_distances(src, dist_limit=dist_limit,
n_jobs=n_jobs, verbose=verbose)
# write out if requested, then return the data
logger.info('You are now one step closer to computing the gain matrix')
return src
def _check_mri(mri, subject, subjects_dir):
_validate_type(mri, 'path-like', 'mri')
if not op.isfile(mri):
if subject is None:
raise FileNotFoundError(
'MRI file %r not found and no subject provided' % (mri,))
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri = op.join(subjects_dir, subject, 'mri', mri)
if not op.isfile(mri):
raise FileNotFoundError('MRI file %r not found' % (mri,))
return mri
def _check_volume_labels(volume_label, mri, name='volume_label'):
_validate_type(mri, 'path-like', 'mri when %s is not None' % (name,))
mri = _check_fname(mri, overwrite='read', must_exist=True)
if isinstance(volume_label, str):
volume_label = [volume_label]
_validate_type(volume_label, (list, tuple, dict), name) # should be
if not isinstance(volume_label, dict):
# Turn it into a dict
if not mri.endswith('aseg.mgz'):
raise RuntimeError(
'Must use a *aseg.mgz file unless %s is a dict, got %s'
% (name, op.basename(mri)))
lut, _ = read_freesurfer_lut()
use_volume_label = dict()
for label in volume_label:
if label not in lut:
raise ValueError(
'Volume %r not found in file %s. Double check '
'FreeSurfer lookup table.%s'
% (label, mri, _suggest(label, lut)))
use_volume_label[label] = lut[label]
volume_label = use_volume_label
for label, id_ in volume_label.items():
_validate_type(label, str, 'volume_label keys')
_validate_type(id_, 'int-like', 'volume_labels[%r]' % (label,))
volume_label = {k: _ensure_int(v) for k, v in volume_label.items()}
return volume_label
@verbose
def setup_volume_source_space(subject=None, pos=5.0, mri=None,
sphere=None, bem=None,
surface=None, mindist=5.0, exclude=0.0,
subjects_dir=None, volume_label=None,
add_interpolator=True, sphere_units='m',
single_volume=False, verbose=None):
"""Set up a volume source space with grid spacing or discrete source space.
Parameters
----------
subject : str | None
Subject to process. If None, the path to the MRI volume must be
absolute to get a volume source space. If a subject name
is provided the T1.mgz file will be found automatically.
Defaults to None.
pos : float | dict
Positions to use for sources. If float, a grid will be constructed
with the spacing given by ``pos`` in mm, generating a volume source
space. If dict, pos['rr'] and pos['nn'] will be used as the source
space locations (in meters) and normals, respectively, creating a
discrete source space.
.. note:: For a discrete source space (``pos`` is a dict),
``mri`` must be None.
mri : str | None
The filename of an MRI volume (mgh or mgz) to create the
interpolation matrix over. Source estimates obtained in the
volume source space can then be morphed onto the MRI volume
using this interpolator. If pos is a dict, this cannot be None.
If subject name is provided, ``pos`` is a float or ``volume_label``
are not provided then the ``mri`` parameter will default to 'T1.mgz'
or ``aseg.mgz``, respectively, else it will stay None.
sphere : ndarray, shape (4,) | ConductorModel | None
Define spherical source space bounds using origin and radius given
by (ox, oy, oz, rad) in ``sphere_units``.
Only used if ``bem`` and ``surface`` are both None. Can also be a
spherical ConductorModel, which will use the origin and radius.
None (the default) uses a head-digitization fit.
bem : str | None | ConductorModel
Define source space bounds using a BEM file (specifically the inner
skull surface) or a ConductorModel for a 1-layer of 3-layers BEM.
surface : str | dict | None
Define source space bounds using a FreeSurfer surface file. Can
also be a dictionary with entries ``'rr'`` and ``'tris'``, such as
those returned by :func:`mne.read_surface`.
mindist : float
Exclude points closer than this distance (mm) to the bounding surface.
exclude : float
Exclude points closer than this distance (mm) from the center of mass
of the bounding surface.
%(subjects_dir)s
volume_label : str | dict | list | None
Region(s) of interest to use. None (default) will create a single
whole-brain source space. Otherwise, a separate source space will be
created for each entry in the list or dict (str will be turned into
a single-element list). If list of str, standard Freesurfer labels
are assumed. If dict, should be a mapping of region names to atlas
id numbers, allowing the use of other atlases.
.. versionchanged:: 0.21.0
Support for dict added.
add_interpolator : bool
If True and ``mri`` is not None, then an interpolation matrix
will be produced.
sphere_units : str
Defaults to ``"m"``.
.. versionadded:: 0.20
single_volume : bool
If True, multiple values of ``volume_label`` will be merged into a
a single source space instead of occupying multiple source spaces
(one for each sub-volume), i.e., ``len(src)`` will be ``1`` instead of
``len(volume_label)``. This can help conserve memory and disk space
when many labels are used.
.. versionadded:: 0.21
%(verbose)s
Returns
-------
src : SourceSpaces
A :class:`SourceSpaces` object containing one source space for each
entry of ``volume_labels``, or a single source space if
``volume_labels`` was not specified.
See Also
--------
setup_source_space
Notes
-----
Volume source spaces are related to an MRI image such as T1 and allow to
visualize source estimates overlaid on MRIs and to morph estimates
to a template brain for group analysis. Discrete source spaces
don't allow this. If you provide a subject name the T1 MRI will be
used by default.
When you work with a source space formed from a grid you need to specify
the domain in which the grid will be defined. There are three ways
of specifying this:
(i) sphere, (ii) bem model, and (iii) surface.
The default behavior is to use sphere model
(``sphere=(0.0, 0.0, 0.0, 90.0)``) if ``bem`` or ``surface`` is not
``None`` then ``sphere`` is ignored.
If you're going to use a BEM conductor model for forward model
it is recommended to pass it here.
To create a discrete source space, ``pos`` must be a dict, ``mri`` must be
None, and ``volume_label`` must be None. To create a whole brain volume
source space, ``pos`` must be a float and 'mri' must be provided.
To create a volume source space from label, ``pos`` must be a float,
``volume_label`` must be provided, and 'mri' must refer to a .mgh or .mgz
file with values corresponding to the freesurfer lookup-table (typically
``aseg.mgz``).
"""
subjects_dir = get_subjects_dir(subjects_dir)
_validate_type(
volume_label, (str, list, tuple, dict, None), 'volume_label')
if bem is not None and surface is not None:
raise ValueError('Only one of "bem" and "surface" should be '
'specified')
if mri is None and subject is not None:
if volume_label is not None:
mri = 'aseg.mgz'
elif _is_numeric(pos):
mri = 'T1.mgz'
if mri is not None:
mri = _check_mri(mri, subject, subjects_dir)
if isinstance(pos, dict):
raise ValueError('Cannot create interpolation matrix for '
'discrete source space, mri must be None if '
'pos is a dict')
if volume_label is not None:
volume_label = _check_volume_labels(volume_label, mri)
assert volume_label is None or isinstance(volume_label, dict)
sphere = _check_sphere(sphere, sphere_units=sphere_units)
# triage bounding argument
if bem is not None:
logger.info('BEM : %s', bem)
elif surface is not None:
if isinstance(surface, dict):
if not all(key in surface for key in ['rr', 'tris']):
raise KeyError('surface, if dict, must have entries "rr" '
'and "tris"')
# let's make sure we have geom info
complete_surface_info(surface, copy=False, verbose=False)
surf_extra = 'dict()'
elif isinstance(surface, str):
if not op.isfile(surface):
raise IOError('surface file "%s" not found' % surface)
surf_extra = surface
logger.info('Boundary surface file : %s', surf_extra)
else:
logger.info('Sphere : origin at (%.1f %.1f %.1f) mm'
% (1000 * sphere[0], 1000 * sphere[1], 1000 * sphere[2]))
logger.info(' radius : %.1f mm' % (1000 * sphere[3],))
# triage pos argument
if isinstance(pos, dict):
if not all(key in pos for key in ['rr', 'nn']):
raise KeyError('pos, if dict, must contain "rr" and "nn"')
pos_extra = 'dict()'
else: # pos should be float-like
try:
pos = float(pos)
except (TypeError, ValueError):
raise ValueError('pos must be a dict, or something that can be '
'cast to float()')
if not isinstance(pos, float):
logger.info('Source location file : %s', pos_extra)
logger.info('Assuming input in millimeters')
logger.info('Assuming input in MRI coordinates')
if isinstance(pos, float):
logger.info('grid : %.1f mm' % pos)
logger.info('mindist : %.1f mm' % mindist)
pos /= 1000.0 # convert pos from m to mm
if exclude > 0.0:
logger.info('Exclude : %.1f mm' % exclude)
vol_info = dict()
if mri is not None:
logger.info('MRI volume : %s' % mri)
logger.info('')
logger.info('Reading %s...' % mri)
vol_info = _get_mri_info_data(mri, data=volume_label is not None)
exclude /= 1000.0 # convert exclude from m to mm
logger.info('')
# Explicit list of points
if not isinstance(pos, float):
# Make the grid of sources
sp = [_make_discrete_source_space(pos)]
else:
# Load the brain surface as a template
if isinstance(bem, str):
# read bem surface in the MRI coordinate frame
surf = read_bem_surfaces(bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN,
verbose=False)
logger.info('Loaded inner skull from %s (%d nodes)'
% (bem, surf['np']))
elif bem is not None and bem.get('is_sphere') is False:
# read bem surface in the MRI coordinate frame
which = np.where([surf['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
for surf in bem['surfs']])[0]
if len(which) != 1:
raise ValueError('Could not get inner skull surface from BEM')
surf = bem['surfs'][which[0]]
assert surf['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
if surf['coord_frame'] != FIFF.FIFFV_COORD_MRI:
raise ValueError('BEM is not in MRI coordinates, got %s'
% (_coord_frame_name(surf['coord_frame']),))
logger.info('Taking inner skull from %s' % bem)
elif surface is not None:
if isinstance(surface, str):
# read the surface in the MRI coordinate frame
surf = read_surface(surface, return_dict=True)[-1]
else:
surf = surface
logger.info('Loaded bounding surface from %s (%d nodes)'
% (surface, surf['np']))
surf = deepcopy(surf)
surf['rr'] *= 1e-3 # must be converted to meters
else: # Load an icosahedron and use that as the surface
logger.info('Setting up the sphere...')
surf = dict(R=sphere[3], r0=sphere[:3])
# Make the grid of sources in MRI space
sp = _make_volume_source_space(
surf, pos, exclude, mindist, mri, volume_label,
vol_info=vol_info, single_volume=single_volume)
del sphere
assert isinstance(sp, list)
assert len(sp) == 1 if (volume_label is None or
single_volume) else len(volume_label)
# Compute an interpolation matrix to show data in MRI_VOXEL coord frame
if mri is not None:
if add_interpolator:
_add_interpolator(sp)
elif sp[0]['type'] == 'vol':
# If there is no interpolator, it's actually a discrete source space
sp[0]['type'] = 'discrete'
# do some cleaning
if volume_label is None and 'seg_name' in sp[0]:
del sp[0]['seg_name']
for s in sp:
if 'vol_dims' in s:
del s['vol_dims']
# Save it
sp = _complete_vol_src(sp, subject)
return sp
def _complete_vol_src(sp, subject=None):
for s in sp:
s.update(dict(nearest=None, dist=None, use_tris=None, patch_inds=None,
dist_limit=None, pinfo=None, ntri=0, nearest_dist=None,
nuse_tri=0, tris=None, subject_his_id=subject))
sp = SourceSpaces(sp, dict(working_dir=os.getcwd(), command_line='None'))
return sp
def _make_voxel_ras_trans(move, ras, voxel_size):
"""Make a transformation from MRI_VOXEL to MRI surface RAS (i.e. MRI)."""
assert voxel_size.ndim == 1
assert voxel_size.size == 3
rot = ras.T * voxel_size[np.newaxis, :]
assert rot.ndim == 2
assert rot.shape[0] == 3
assert rot.shape[1] == 3
trans = np.c_[np.r_[rot, np.zeros((1, 3))], np.r_[move, 1.0]]
t = Transform('mri_voxel', 'mri', trans)
return t
def _make_discrete_source_space(pos, coord_frame='mri'):
"""Use a discrete set of source locs/oris to make src space.
Parameters
----------
pos : dict
Must have entries "rr" and "nn". Data should be in meters.
coord_frame : str
The coordinate frame in which the positions are given; default: 'mri'.
The frame must be one defined in transforms.py:_str_to_frame
Returns
-------
src : dict
The source space.
"""
# Check that coordinate frame is valid
if coord_frame not in _str_to_frame: # will fail if coord_frame not string
raise KeyError('coord_frame must be one of %s, not "%s"'
% (list(_str_to_frame.keys()), coord_frame))
coord_frame = _str_to_frame[coord_frame] # now an int
# process points (copy and cast)
rr = np.array(pos['rr'], float)
nn = np.array(pos['nn'], float)
if not (rr.ndim == nn.ndim == 2 and nn.shape[0] == nn.shape[0] and
rr.shape[1] == nn.shape[1]):
raise RuntimeError('"rr" and "nn" must both be 2D arrays with '
'the same number of rows and 3 columns')
npts = rr.shape[0]
_normalize_vectors(nn)
nz = np.sum(np.sum(nn * nn, axis=1) == 0)
if nz != 0:
raise RuntimeError('%d sources have zero length normal' % nz)
logger.info('Positions (in meters) and orientations')
logger.info('%d sources' % npts)
# Ready to make the source space
sp = dict(coord_frame=coord_frame, type='discrete', nuse=npts, np=npts,
inuse=np.ones(npts, int), vertno=np.arange(npts), rr=rr, nn=nn,
id=-1)
return sp
def _import_nibabel(why='use MRI files'):
try:
import nibabel as nib
except ImportError as exp:
msg = 'nibabel is required to %s, got:\n%s' % (why, exp)
else:
msg = ''
if msg:
raise ImportError(msg)
return nib
def _mri_orientation(img, orientation):
"""Get MRI orientation information from an image.
Parameters
----------
img : instance of SpatialImage
The MRI image.
orientation : str
Orientation that you want. Can be "axial", "saggital", or "coronal".
Returns
-------
xyz : tuple, shape (3,)
The dimension indices for X, Y, and Z.
flips : tuple, shape (3,)
Whether each dimension requires a flip.
order : tuple, shape (3,)
The resulting order of the data if the given ``xyz`` and ``flips``
are used.
Notes
-----
.. versionadded:: 0.21
"""
import nibabel as nib
_validate_type(img, nib.spatialimages.SpatialImage)
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
axcodes = ''.join(nib.orientations.aff2axcodes(img.affine))
flips = {o: (1 if o in axcodes else -1) for o in 'RAS'}
axcodes = axcodes.replace('L', 'R').replace('P', 'A').replace('I', 'S')
order = dict(
coronal=('R', 'S', 'A'),
axial=('R', 'A', 'S'),
sagittal=('A', 'S', 'R'),
)[orientation]
xyz = tuple(axcodes.index(c) for c in order)
flips = tuple(flips[c] for c in order)
return xyz, flips, order
def _get_mri_info_data(mri, data):
# Read the segmentation data using nibabel
if data:
_import_nibabel('load MRI atlas data')
out = dict()
_, out['vox_mri_t'], out['mri_ras_t'], dims, _, mgz = _read_mri_info(
mri, return_img=True)
out.update(
mri_width=dims[0], mri_height=dims[1],
mri_depth=dims[1], mri_volume_name=mri)
if data:
assert mgz is not None
out['mri_vox_t'] = invert_transform(out['vox_mri_t'])
out['data'] = np.asarray(mgz.dataobj)
return out
def _get_atlas_values(vol_info, rr):
# Transform MRI coordinates (where our surfaces live) to voxels
rr_vox = apply_trans(vol_info['mri_vox_t'], rr)
good = ((rr_vox >= -.5) &
(rr_vox < np.array(vol_info['data'].shape, int) - 0.5)).all(-1)
idx = np.round(rr_vox[good].T).astype(np.int64)
values = np.full(rr.shape[0], np.nan)
values[good] = vol_info['data'][tuple(idx)]
return values
def _make_volume_source_space(surf, grid, exclude, mindist, mri=None,
volume_labels=None, do_neighbors=True, n_jobs=1,
vol_info={}, single_volume=False):
"""Make a source space which covers the volume bounded by surf."""
# Figure out the grid size in the MRI coordinate frame
if 'rr' in surf:
mins = np.min(surf['rr'], axis=0)
maxs = np.max(surf['rr'], axis=0)
cm = np.mean(surf['rr'], axis=0) # center of mass
maxdist = np.linalg.norm(surf['rr'] - cm, axis=1).max()
else:
mins = surf['r0'] - surf['R']
maxs = surf['r0'] + surf['R']
cm = surf['r0'].copy()
maxdist = surf['R']
# Define the sphere which fits the surface
logger.info('Surface CM = (%6.1f %6.1f %6.1f) mm'
% (1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
logger.info('Surface fits inside a sphere with radius %6.1f mm'
% (1000 * maxdist))
logger.info('Surface extent:')
for c, mi, ma in zip('xyz', mins, maxs):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi, 1000 * ma))
maxn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in maxs], int)
minn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in mins], int)
logger.info('Grid extent:')
for c, mi, ma in zip('xyz', minn, maxn):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi * grid, 1000 * ma * grid))
# Now make the initial grid
ns = tuple(maxn - minn + 1)
npts = np.prod(ns)
nrow = ns[0]
ncol = ns[1]
nplane = nrow * ncol
# x varies fastest, then y, then z (can use unravel to do this)
rr = np.meshgrid(np.arange(minn[2], maxn[2] + 1),
np.arange(minn[1], maxn[1] + 1),
np.arange(minn[0], maxn[0] + 1), indexing='ij')
x, y, z = rr[2].ravel(), rr[1].ravel(), rr[0].ravel()
rr = np.array([x * grid, y * grid, z * grid]).T
sp = dict(np=npts, nn=np.zeros((npts, 3)), rr=rr,
inuse=np.ones(npts, bool), type='vol', nuse=npts,
coord_frame=FIFF.FIFFV_COORD_MRI, id=-1, shape=ns)
sp['nn'][:, 2] = 1.0
assert sp['rr'].shape[0] == npts
logger.info('%d sources before omitting any.', sp['nuse'])
# Exclude infeasible points
dists = np.linalg.norm(sp['rr'] - cm, axis=1)
bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0]
sp['inuse'][bads] = False
sp['nuse'] -= len(bads)
logger.info('%d sources after omitting infeasible sources not within '
'%0.1f - %0.1f mm.',
sp['nuse'], 1000 * exclude, 1000 * maxdist)
if 'rr' in surf:
_filter_source_spaces(surf, mindist, None, [sp], n_jobs)
else: # sphere
vertno = np.where(sp['inuse'])[0]
bads = (np.linalg.norm(sp['rr'][vertno] - surf['r0'], axis=-1) >=
surf['R'] - mindist / 1000.)
sp['nuse'] -= bads.sum()
sp['inuse'][vertno[bads]] = False
sp['vertno'] = np.where(sp['inuse'])[0]
del vertno
del surf
logger.info('%d sources remaining after excluding the sources outside '
'the surface and less than %6.1f mm inside.'
% (sp['nuse'], mindist))
# Restrict sources to volume of interest
if volume_labels is None:
sp['seg_name'] = 'the whole brain'
sps = [sp]
else:
if not do_neighbors:
raise RuntimeError('volume_label cannot be None unless '
'do_neighbors is True')
sps = list()
orig_sp = sp
# reduce the sizes when we deepcopy
for volume_label, id_ in volume_labels.items():
# this saves us some memory
memodict = dict()
for key in ('rr', 'nn'):
if key in orig_sp:
arr = orig_sp[key]
memodict[id(arr)] = arr
sp = deepcopy(orig_sp, memodict)
good = _get_atlas_values(vol_info, sp['rr'][sp['vertno']]) == id_
n_good = good.sum()
logger.info(' Selected %d voxel%s from %s'
% (n_good, _pl(n_good), volume_label))
# Update source info
sp['inuse'][sp['vertno'][~good]] = False
sp['vertno'] = sp['vertno'][good]
sp['nuse'] = sp['inuse'].sum()
sp['seg_name'] = volume_label
sp['mri_file'] = mri
sps.append(sp)
del orig_sp
assert len(sps) == len(volume_labels)
# This will undo some of the work above, but the calculations are
# pretty trivial so allow it
if single_volume:
for sp in sps[1:]:
sps[0]['inuse'][sp['vertno']] = True
sp = sps[0]
sp['seg_name'] = '+'.join(s['seg_name'] for s in sps)
sps = sps[:1]
sp['vertno'] = np.where(sp['inuse'])[0]
sp['nuse'] = len(sp['vertno'])
del sp, volume_labels
if not do_neighbors:
return sps
k = np.arange(npts)
neigh = np.empty((26, npts), int)
neigh.fill(-1)
# Figure out each neighborhood:
# 6-neighborhood first
idxs = [z > minn[2], x < maxn[0], y < maxn[1],
x > minn[0], y > minn[1], z < maxn[2]]
offsets = [-nplane, 1, nrow, -1, -nrow, nplane]
for n, idx, offset in zip(neigh[:6], idxs, offsets):
n[idx] = k[idx] + offset
# Then the rest to complete the 26-neighborhood
# First the plane below
idx1 = z > minn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[6, idx2] = k[idx2] + 1 - nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[7, idx3] = k[idx3] + 1 + nrow - nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[8, idx2] = k[idx2] + nrow - nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[9, idx3] = k[idx3] - 1 + nrow - nplane
neigh[10, idx2] = k[idx2] - 1 - nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[11, idx3] = k[idx3] - 1 - nrow - nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[12, idx2] = k[idx2] - nrow - nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[13, idx3] = k[idx3] + 1 - nrow - nplane
# Then the same plane
idx1 = np.logical_and(x < maxn[0], y < maxn[1])
neigh[14, idx1] = k[idx1] + 1 + nrow
idx1 = x > minn[0]
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[15, idx2] = k[idx2] - 1 + nrow
idx2 = np.logical_and(idx1, y > minn[1])
neigh[16, idx2] = k[idx2] - 1 - nrow
idx1 = np.logical_and(y > minn[1], x < maxn[0])
neigh[17, idx1] = k[idx1] + 1 - nrow - nplane
# Finally one plane above
idx1 = z < maxn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[18, idx2] = k[idx2] + 1 + nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[19, idx3] = k[idx3] + 1 + nrow + nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[20, idx2] = k[idx2] + nrow + nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[21, idx3] = k[idx3] - 1 + nrow + nplane
neigh[22, idx2] = k[idx2] - 1 + nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[23, idx3] = k[idx3] - 1 - nrow + nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[24, idx2] = k[idx2] - nrow + nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[25, idx3] = k[idx3] + 1 - nrow + nplane
# Omit unused vertices from the neighborhoods
logger.info('Adjusting the neighborhood info.')
r0 = minn * grid
voxel_size = grid * np.ones(3)
ras = np.eye(3)
src_mri_t = _make_voxel_ras_trans(r0, ras, voxel_size)
neigh_orig = neigh
for sp in sps:
# remove non source-space points
neigh = neigh_orig.copy()
neigh[:, np.logical_not(sp['inuse'])] = -1
# remove these points from neigh
old_shape = neigh.shape
neigh = neigh.ravel()
checks = np.where(neigh >= 0)[0]
removes = np.logical_not(np.in1d(checks, sp['vertno']))
neigh[checks[removes]] = -1
neigh.shape = old_shape
neigh = neigh.T
# Thought we would need this, but C code keeps -1 vertices, so we will:
# neigh = [n[n >= 0] for n in enumerate(neigh[vertno])]
sp['neighbor_vert'] = neigh
# Set up the volume data (needed for creating the interpolation matrix)
sp['src_mri_t'] = src_mri_t
sp['vol_dims'] = maxn - minn + 1
for key in ('mri_width', 'mri_height', 'mri_depth', 'mri_volume_name',
'vox_mri_t', 'mri_ras_t'):
if key in vol_info:
sp[key] = vol_info[key]
_print_coord_trans(sps[0]['src_mri_t'], 'Source space : ')
for key in ('vox_mri_t', 'mri_ras_t'):
if key in sps[0]:
_print_coord_trans(sps[0][key], 'MRI volume : ')
return sps
def _vol_vertex(width, height, jj, kk, pp):
return jj + width * kk + pp * (width * height)
def _get_mgz_header(fname):
"""Adapted from nibabel to quickly extract header info."""
if not fname.endswith('.mgz'):
raise IOError('Filename must end with .mgz')
header_dtd = [('version', '>i4'), ('dims', '>i4', (4,)),
('type', '>i4'), ('dof', '>i4'), ('goodRASFlag', '>i2'),
('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)),
('Pxyz_c', '>f4', (3,))]
header_dtype = np.dtype(header_dtd)
with GzipFile(fname, 'rb') as fid:
hdr_str = fid.read(header_dtype.itemsize)
header = np.ndarray(shape=(), dtype=header_dtype,
buffer=hdr_str)
# dims
dims = header['dims'].astype(int)
dims = dims[:3] if len(dims) == 4 else dims
# vox2ras_tkr
delta = header['delta']
ds = np.array(delta, float)
ns = np.array(dims * ds) / 2.0
v2rtkr = np.array([[-ds[0], 0, 0, ns[0]],
[0, 0, ds[2], -ns[2]],
[0, -ds[1], 0, ns[1]],
[0, 0, 0, 1]], dtype=np.float32)
# ras2vox
d = np.diag(delta)
pcrs_c = dims / 2.0
Mdc = header['Mdc'].T
pxyz_0 = header['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c))
M = np.eye(4, 4)
M[0:3, 0:3] = np.dot(Mdc, d)
M[0:3, 3] = pxyz_0.T
header = dict(dims=dims, vox2ras_tkr=v2rtkr, vox2ras=M,
zooms=header['delta'])
return header
def _src_vol_dims(s):
w, h, d = [s[f'mri_{key}'] for key in ('width', 'height', 'depth')]
return w, h, d, np.prod([w, h, d])
def _add_interpolator(sp):
"""Compute a sparse matrix to interpolate the data into an MRI volume."""
# extract transformation information from mri
from scipy import sparse
mri_width, mri_height, mri_depth, nvox = _src_vol_dims(sp[0])
#
# Convert MRI voxels from destination (MRI volume) to source (volume
# source space subset) coordinates
#
combo_trans = combine_transforms(sp[0]['vox_mri_t'],
invert_transform(sp[0]['src_mri_t']),
'mri_voxel', 'mri_voxel')
logger.info('Setting up volume interpolation ...')
inuse = np.zeros(sp[0]['np'], bool)
for s_ in sp:
np.logical_or(inuse, s_['inuse'], out=inuse)
interp = _grid_interp(
sp[0]['vol_dims'], (mri_width, mri_height, mri_depth),
combo_trans['trans'], order=1, inuse=inuse)
assert isinstance(interp, sparse.csr_matrix)
# Compose the sparse matrices
for si, s in enumerate(sp):
if len(sp) == 1: # no need to do these gymnastics
this_interp = interp
else: # limit it rows that have any contribution from inuse
# This is the same as the following, but more efficient:
# any_ = np.asarray(
# interp[:, s['inuse'].astype(bool)].sum(1)
# )[:, 0].astype(bool)
any_ = np.zeros(interp.indices.size + 1, np.int64)
any_[1:] = s['inuse'][interp.indices]
np.cumsum(any_, out=any_)
any_ = np.diff(any_[interp.indptr]) > 0
assert any_.shape == (interp.shape[0],)
indptr = np.empty_like(interp.indptr)
indptr[0] = 0
indptr[1:] = np.diff(interp.indptr)
indptr[1:][~any_] = 0
np.cumsum(indptr, out=indptr)
mask = np.repeat(any_, np.diff(interp.indptr))
indices = interp.indices[mask]
data = interp.data[mask]
assert data.shape == indices.shape == (indptr[-1],)
this_interp = sparse.csr_matrix(
(data, indices, indptr), shape=interp.shape)
s['interpolator'] = this_interp
logger.info(' %d/%d nonzero values for %s'
% (len(s['interpolator'].data), nvox, s['seg_name']))
logger.info('[done]')
def _grid_interp(from_shape, to_shape, trans, order=1, inuse=None):
"""Compute a grid-to-grid linear or nearest interpolation given."""
from scipy import sparse
from_shape = np.array(from_shape, int)
to_shape = np.array(to_shape, int)
trans = np.array(trans, np.float64) # to -> from
assert trans.shape == (4, 4) and np.array_equal(trans[3], [0, 0, 0, 1])
assert from_shape.shape == to_shape.shape == (3,)
shape = (np.prod(to_shape), np.prod(from_shape))
if inuse is None:
inuse = np.ones(shape[1], bool)
assert inuse.dtype == bool
assert inuse.shape == (shape[1],)
data, indices, indptr = _grid_interp_jit(
from_shape, to_shape, trans, order, inuse)
data = np.concatenate(data)
indices = np.concatenate(indices)
indptr = np.cumsum(indptr)
interp = sparse.csr_matrix((data, indices, indptr), shape=shape)
return interp
# This is all set up to do jit, but it's actually slower!
def _grid_interp_jit(from_shape, to_shape, trans, order, inuse):
# Loop over slices to save (lots of) memory
# Note that it is the slowest incrementing index
# This is equivalent to using mgrid and reshaping, but faster
assert order in (0, 1)
data = list()
indices = list()
nvox = np.prod(to_shape)
indptr = np.zeros(nvox + 1, np.int32)
mri_width, mri_height, mri_depth = to_shape
r0__ = np.empty((4, mri_height, mri_width), np.float64)
r0__[0, :, :] = np.arange(mri_width)
r0__[1, :, :] = np.arange(mri_height).reshape(1, mri_height, 1)
r0__[3, :, :] = 1
r0_ = np.reshape(r0__, (4, mri_width * mri_height))
width, height, _ = from_shape
trans = np.ascontiguousarray(trans)
maxs = (from_shape - 1).reshape(1, 3)
for p in range(mri_depth):
r0_[2] = p
# Transform our vertices from their MRI space into our source space's
# frame (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's
# really a subset of the entire volume!)
r0 = (trans @ r0_)[:3].T
if order == 0:
rx = np.round(r0).astype(np.int32)
keep = np.where(np.logical_and(np.all(rx >= 0, axis=1),
np.all(rx <= maxs, axis=1)))[0]
indptr[keep + p * mri_height * mri_width + 1] = 1
indices.append(_vol_vertex(width, height, *rx[keep].T))
data.append(np.ones(len(keep)))
continue
rn = np.floor(r0).astype(np.int32)
good = np.where(np.logical_and(np.all(rn >= -1, axis=1),
np.all(rn <= maxs, axis=1)))[0]
if len(good) == 0:
continue
rns = rn[good]
r0s = r0[good]
jj_g, kk_g, pp_g = (rns >= 0).T
jjp1_g, kkp1_g, ppp1_g = (rns < maxs).T # same as rns + 1 <= maxs
# now we take each MRI voxel *in this space*, and figure out how
# to make its value the weighted sum of voxels in the volume source
# space. This is a trilinear interpolation based on the
# fact that we know we're interpolating from one volumetric grid
# into another.
jj = rns[:, 0]
kk = rns[:, 1]
pp = rns[:, 2]
vss = np.empty((len(jj), 8), np.int32)
jjp1 = jj + 1
kkp1 = kk + 1
ppp1 = pp + 1
mask = np.empty((len(jj), 8), bool)
vss[:, 0] = _vol_vertex(width, height, jj, kk, pp)
mask[:, 0] = jj_g & kk_g & pp_g
vss[:, 1] = _vol_vertex(width, height, jjp1, kk, pp)
mask[:, 1] = jjp1_g & kk_g & pp_g
vss[:, 2] = _vol_vertex(width, height, jjp1, kkp1, pp)
mask[:, 2] = jjp1_g & kkp1_g & pp_g
vss[:, 3] = _vol_vertex(width, height, jj, kkp1, pp)
mask[:, 3] = jj_g & kkp1_g & pp_g
vss[:, 4] = _vol_vertex(width, height, jj, kk, ppp1)
mask[:, 4] = jj_g & kk_g & ppp1_g
vss[:, 5] = _vol_vertex(width, height, jjp1, kk, ppp1)
mask[:, 5] = jjp1_g & kk_g & ppp1_g
vss[:, 6] = _vol_vertex(width, height, jjp1, kkp1, ppp1)
mask[:, 6] = jjp1_g & kkp1_g & ppp1_g
vss[:, 7] = _vol_vertex(width, height, jj, kkp1, ppp1)
mask[:, 7] = jj_g & kkp1_g & ppp1_g
# figure out weights for each vertex
xf = r0s[:, 0] - rns[:, 0].astype(np.float64)
yf = r0s[:, 1] - rns[:, 1].astype(np.float64)
zf = r0s[:, 2] - rns[:, 2].astype(np.float64)
omxf = 1.0 - xf
omyf = 1.0 - yf
omzf = 1.0 - zf
this_w = np.empty((len(good), 8), np.float64)
this_w[:, 0] = omxf * omyf * omzf
this_w[:, 1] = xf * omyf * omzf
this_w[:, 2] = xf * yf * omzf
this_w[:, 3] = omxf * yf * omzf
this_w[:, 4] = omxf * omyf * zf
this_w[:, 5] = xf * omyf * zf
this_w[:, 6] = xf * yf * zf
this_w[:, 7] = omxf * yf * zf
# eliminate zeros
mask[this_w <= 0] = False
# eliminate rows where none of inuse are actually present
row_mask = mask.copy()
row_mask[mask] = inuse[vss[mask]]
mask[~(row_mask.any(axis=-1))] = False
# construct the parts we need
indices.append(vss[mask])
indptr[good + p * mri_height * mri_width + 1] = mask.sum(1)
data.append(this_w[mask])
return data, indices, indptr
def _pts_in_hull(pts, hull, tolerance=1e-12):
return np.all([np.dot(eq[:-1], pts.T) + eq[-1] <= tolerance
for eq in hull.equations], axis=0)
@verbose
def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
verbose=None):
"""Remove all source space points closer than a given limit (in mm)."""
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD and mri_head_t is None:
raise RuntimeError('Source spaces are in head coordinates and no '
'coordinate transform was provided!')
# How close are the source points to the surface?
out_str = 'Source spaces are in '
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
inv_trans = invert_transform(mri_head_t)
out_str += 'head coordinates.'
elif src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
out_str += 'MRI coordinates.'
else:
out_str += 'unknown (%d) coordinates.' % src[0]['coord_frame']
logger.info(out_str)
out_str = 'Checking that the sources are inside the surface'
if limit > 0.0:
out_str += ' and at least %6.1f mm away' % (limit)
logger.info(out_str + ' (will take a few...)')
# fit a sphere to a surf quickly
check_inside = _CheckInside(surf)
# Check that the source is inside surface (often the inner skull)
for s in src:
vertno = np.where(s['inuse'])[0] # can't trust s['vertno'] this deep
# Convert all points here first to save time
r1s = s['rr'][vertno]
if s['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
r1s = apply_trans(inv_trans['trans'], r1s)
inside = check_inside(r1s, n_jobs)
omit_outside = (~inside).sum()
# vectorized nearest using BallTree (or cdist)
omit_limit = 0
if limit > 0.0:
# only check "inside" points
idx = np.where(inside)[0]
check_r1s = r1s[idx]
if check_inside.inner_r is not None:
# ... and those that are at least inner_sphere + limit away
mask = (np.linalg.norm(check_r1s - check_inside.cm, axis=-1) >=
check_inside.inner_r - limit / 1000.)
idx = idx[mask]
check_r1s = check_r1s[mask]
dists = _compute_nearest(
surf['rr'], check_r1s, return_dists=True, method='cKDTree')[1]
close = (dists < limit / 1000.0)
omit_limit = np.sum(close)
inside[idx[close]] = False
s['inuse'][vertno[~inside]] = False
del vertno
s['nuse'] -= (omit_outside + omit_limit)
s['vertno'] = np.where(s['inuse'])[0]
if omit_outside > 0:
extras = [omit_outside]
extras += ['s', 'they are'] if omit_outside > 1 else ['', 'it is']
logger.info(' %d source space point%s omitted because %s '
'outside the inner skull surface.' % tuple(extras))
if omit_limit > 0:
extras = [omit_limit]
extras += ['s'] if omit_outside > 1 else ['']
extras += [limit]
logger.info(' %d source space point%s omitted because of the '
'%6.1f-mm distance limit.' % tuple(extras))
# Adjust the patch inds as well if necessary
if omit_limit + omit_outside > 0:
_adjust_patch_info(s)
@verbose
def _adjust_patch_info(s, verbose=None):
"""Adjust patch information in place after vertex omission."""
if s.get('patch_inds') is not None:
if s['nearest'] is None:
# This shouldn't happen, but if it does, we can probably come
# up with a more clever solution
raise RuntimeError('Cannot adjust patch information properly, '
'please contact the mne-python developers')
_add_patch_info(s)
@verbose
def _ensure_src(src, kind=None, extra='', verbose=None):
"""Ensure we have a source space."""
_check_option(
'kind', kind, (None, 'surface', 'volume', 'mixed', 'discrete'))
msg = 'src must be a string or instance of SourceSpaces%s' % (extra,)
if _check_path_like(src):
src = str(src)
if not op.isfile(src):
raise IOError('Source space file "%s" not found' % src)
logger.info('Reading %s...' % src)
src = read_source_spaces(src, verbose=False)
if not isinstance(src, SourceSpaces):
raise ValueError('%s, got %s (type %s)' % (msg, src, type(src)))
if kind is not None:
if src.kind != kind and src.kind == 'mixed':
if kind == 'surface':
src = src[:2]
elif kind == 'volume':
src = src[2:]
if src.kind != kind:
raise ValueError('Source space must contain %s type, got '
'%s' % (kind, src.kind))
return src
def _ensure_src_subject(src, subject):
src_subject = src._subject
if subject is None:
subject = src_subject
if subject is None:
raise ValueError('source space is too old, subject must be '
'provided')
elif src_subject is not None and subject != src_subject:
raise ValueError('Mismatch between provided subject "%s" and subject '
'name "%s" in the source space'
% (subject, src_subject))
return subject
_DIST_WARN_LIMIT = 10242 # warn for anything larger than ICO-5
@verbose
def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
"""Compute inter-source distances along the cortical surface.
This function will also try to add patch info for the source space.
It will only occur if the ``dist_limit`` is sufficiently high that all
points on the surface are within ``dist_limit`` of a point in the
source space.
Parameters
----------
src : instance of SourceSpaces
The source spaces to compute distances for.
dist_limit : float
The upper limit of distances to include (in meters).
Note: if limit < np.inf, scipy > 0.13 (bleeding edge as of
10/2013) must be installed. If 0, then only patch (nearest vertex)
information is added.
%(n_jobs)s
Ignored if ``dist_limit==0.``.
%(verbose)s
Returns
-------
src : instance of SourceSpaces
The original source spaces, with distance information added.
The distances are stored in src[n]['dist'].
Note: this function operates in-place.
Notes
-----
This function can be memory- and CPU-intensive. On a high-end machine
(2012) running 6 jobs in parallel, an ico-5 (10242 per hemi) source space
takes about 10 minutes to compute all distances (``dist_limit = np.inf``).
With ``dist_limit = 0.007``, computing distances takes about 1 minute.
We recommend computing distances once per source space and then saving
the source space to disk, as the computed distances will automatically be
stored along with the source space data for future use.
"""
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import dijkstra
n_jobs = check_n_jobs(n_jobs)
src = _ensure_src(src)
dist_limit = float(dist_limit)
if dist_limit < 0:
raise ValueError('dist_limit must be non-negative, got %s'
% (dist_limit,))
patch_only = (dist_limit == 0)
if patch_only and not check_version('scipy', '1.3'):
raise RuntimeError('scipy >= 1.3 is required to calculate patch '
'information only, consider upgrading SciPy or '
'using dist_limit=np.inf when running '
'add_source_space_distances')
if src.kind != 'surface':
raise RuntimeError('Currently all source spaces must be of surface '
'type')
parallel, p_fun, _ = parallel_func(_do_src_distances, n_jobs)
min_dists = list()
min_idxs = list()
msg = 'patch information' if patch_only else 'source space distances'
logger.info('Calculating %s (limit=%s mm)...' % (msg, 1000 * dist_limit))
max_n = max(s['nuse'] for s in src)
if not patch_only and max_n > _DIST_WARN_LIMIT:
warn('Computing distances for %d source space points (in one '
'hemisphere) will be very slow, consider using add_dist=False'
% (max_n,))
for s in src:
adjacency = mesh_dist(s['tris'], s['rr'])
if patch_only:
min_dist, _, min_idx = dijkstra(
adjacency, indices=s['vertno'],
min_only=True, return_predecessors=True)
min_dists.append(min_dist.astype(np.float32))
min_idxs.append(min_idx)
for key in ('dist', 'dist_limit'):
s[key] = None
else:
d = parallel(p_fun(adjacency, s['vertno'], r, dist_limit)
for r in np.array_split(np.arange(len(s['vertno'])),
n_jobs))
# deal with indexing so we can add patch info
min_idx = np.array([dd[1] for dd in d])
min_dist = np.array([dd[2] for dd in d])
midx = np.argmin(min_dist, axis=0)
range_idx = np.arange(len(s['rr']))
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
min_dists.append(min_dist)
min_idxs.append(min_idx)
# convert to sparse representation
d = np.concatenate([dd[0] for dd in d]).ravel() # already float32
idx = d > 0
d = d[idx]
i, j = np.meshgrid(s['vertno'], s['vertno'])
i = i.ravel()[idx]
j = j.ravel()[idx]
s['dist'] = csr_matrix(
(d, (i, j)), shape=(s['np'], s['np']), dtype=np.float32)
s['dist_limit'] = np.array([dist_limit], np.float32)
# Let's see if our distance was sufficient to allow for patch info
if not any(np.any(np.isinf(md)) for md in min_dists):
# Patch info can be added!
for s, min_dist, min_idx in zip(src, min_dists, min_idxs):
s['nearest'] = min_idx
s['nearest_dist'] = min_dist
_add_patch_info(s)
else:
logger.info('Not adding patch information, dist_limit too small')
return src
def _do_src_distances(con, vertno, run_inds, limit):
"""Compute source space distances in chunks."""
from scipy.sparse.csgraph import dijkstra
func = partial(dijkstra, limit=limit)
chunk_size = 20 # save memory by chunking (only a little slower)
lims = np.r_[np.arange(0, len(run_inds), chunk_size), len(run_inds)]
n_chunks = len(lims) - 1
# eventually we want this in float32, so save memory by only storing 32-bit
d = np.empty((len(run_inds), len(vertno)), np.float32)
min_dist = np.empty((n_chunks, con.shape[0]))
min_idx = np.empty((n_chunks, con.shape[0]), np.int32)
range_idx = np.arange(con.shape[0])
for li, (l1, l2) in enumerate(zip(lims[:-1], lims[1:])):
idx = vertno[run_inds[l1:l2]]
out = func(con, indices=idx)
midx = np.argmin(out, axis=0)
min_idx[li] = idx[midx]
min_dist[li] = out[midx, range_idx]
d[l1:l2] = out[:, vertno]
midx = np.argmin(min_dist, axis=0)
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
d[d == np.inf] = 0 # scipy will give us np.inf for uncalc. distances
return d, min_idx, min_dist
def get_volume_labels_from_aseg(mgz_fname, return_colors=False,
atlas_ids=None):
"""Return a list of names and colors of segmented volumes.
Parameters
----------
mgz_fname : str
Filename to read. Typically aseg.mgz or some variant in the freesurfer
pipeline.
return_colors : bool
If True returns also the labels colors.
atlas_ids : dict | None
A lookup table providing a mapping from region names (str) to ID values
(int). Can be None to use the standard Freesurfer LUT.
.. versionadded:: 0.21.0
Returns
-------
label_names : list of str
The names of segmented volumes included in this mgz file.
label_colors : list of str
The RGB colors of the labels included in this mgz file.
See Also
--------
read_freesurfer_lut
Notes
-----
.. versionchanged:: 0.21.0
The label names are now sorted in the same order as their corresponding
values in the MRI file.
.. versionadded:: 0.9.0
"""
import nibabel as nib
atlas = nib.load(mgz_fname)
data = np.asarray(atlas.dataobj) # don't need float here
want = np.unique(data)
if atlas_ids is None:
atlas_ids, colors = read_freesurfer_lut()
elif return_colors:
raise ValueError('return_colors must be False if atlas_ids are '
'provided')
# restrict to the ones in the MRI, sorted by label name
keep = np.in1d(list(atlas_ids.values()), want)
keys = sorted((key for ki, key in enumerate(atlas_ids.keys()) if keep[ki]),
key=lambda x: atlas_ids[x])
if return_colors:
colors = [colors[k] for k in keys]
out = keys, colors
else:
out = keys
return out
# XXX this should probably be deprecated because it returns surface Labels,
# and probably isn't the way to go moving forward
# XXX this also assumes that the first two source spaces are surf without
# checking, which might not be the case (could be all volumes)
@fill_doc
def get_volume_labels_from_src(src, subject, subjects_dir):
"""Return a list of Label of segmented volumes included in the src space.
Parameters
----------
src : instance of SourceSpaces
The source space containing the volume regions.
%(subject)s
subjects_dir : str
Freesurfer folder of the subjects.
Returns
-------
labels_aseg : list of Label
List of Label of segmented volumes included in src space.
"""
from . import Label
from . import get_volume_labels_from_aseg
# Read the aseg file
aseg_fname = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
if not op.isfile(aseg_fname):
raise IOError('aseg file "%s" not found' % aseg_fname)
all_labels_aseg = get_volume_labels_from_aseg(
aseg_fname, return_colors=True)
# Create a list of Label
if len(src) < 2:
raise ValueError('No vol src space in src')
if any(np.any(s['type'] != 'vol') for s in src[2:]):
raise ValueError('source spaces have to be of vol type')
labels_aseg = list()
for nr in range(2, len(src)):
vertices = src[nr]['vertno']
pos = src[nr]['rr'][src[nr]['vertno'], :]
roi_str = src[nr]['seg_name']
try:
ind = all_labels_aseg[0].index(roi_str)
color = np.array(all_labels_aseg[1][ind]) / 255
except ValueError:
pass
if 'left' in roi_str.lower():
hemi = 'lh'
roi_str = roi_str.replace('Left-', '') + '-lh'
elif 'right' in roi_str.lower():
hemi = 'rh'
roi_str = roi_str.replace('Right-', '') + '-rh'
else:
hemi = 'both'
label = Label(vertices=vertices, pos=pos, hemi=hemi,
name=roi_str, color=color,
subject=subject)
labels_aseg.append(label)
return labels_aseg
def _get_hemi(s):
"""Get a hemisphere from a given source space."""
if s['type'] != 'surf':
raise RuntimeError('Only surface source spaces supported')
if s['id'] == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
return 'lh', 0, s['id']
elif s['id'] == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
return 'rh', 1, s['id']
else:
raise ValueError('unknown surface ID %s' % s['id'])
def _get_vertex_map_nn(fro_src, subject_from, subject_to, hemi, subjects_dir,
to_neighbor_tri=None):
"""Get a nearest-neigbor vertex match for a given hemi src.
The to_neighbor_tri can optionally be passed in to avoid recomputation
if it's already available.
"""
# adapted from mne_make_source_space.c, knowing accurate=False (i.e.
# nearest-neighbor mode should be used)
logger.info('Mapping %s %s -> %s (nearest neighbor)...'
% (hemi, subject_from, subject_to))
regs = [op.join(subjects_dir, s, 'surf', '%s.sphere.reg' % hemi)
for s in (subject_from, subject_to)]
reg_fro, reg_to = [read_surface(r, return_dict=True)[-1] for r in regs]
if to_neighbor_tri is not None:
reg_to['neighbor_tri'] = to_neighbor_tri
if 'neighbor_tri' not in reg_to:
reg_to['neighbor_tri'] = _triangle_neighbors(reg_to['tris'],
reg_to['np'])
morph_inuse = np.zeros(len(reg_to['rr']), int)
best = np.zeros(fro_src['np'], int)
ones = _compute_nearest(reg_to['rr'], reg_fro['rr'][fro_src['vertno']])
for v, one in zip(fro_src['vertno'], ones):
# if it were actually a proper morph map, we would do this, but since
# we know it's nearest neighbor list, we don't need to:
# this_mm = mm[v]
# one = this_mm.indices[this_mm.data.argmax()]
if morph_inuse[one]:
# Try the nearest neighbors
neigh = _get_surf_neighbors(reg_to, one) # on demand calc
was = one
one = neigh[np.where(~morph_inuse[neigh])[0]]
if len(one) == 0:
raise RuntimeError('vertex %d would be used multiple times.'
% one)
one = one[0]
logger.info('Source space vertex moved from %d to %d because of '
'double occupation.' % (was, one))
best[v] = one
morph_inuse[one] = True
return best
@verbose
def morph_source_spaces(src_from, subject_to, surf='white', subject_from=None,
subjects_dir=None, verbose=None):
"""Morph an existing source space to a different subject.
.. warning:: This can be used in place of morphing source estimates for
multiple subjects, but there may be consequences in terms
of dipole topology.
Parameters
----------
src_from : instance of SourceSpaces
Surface source spaces to morph.
subject_to : str
The destination subject.
surf : str
The brain surface to use for the new source space.
subject_from : str | None
The "from" subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
%(verbose)s
Returns
-------
src : instance of SourceSpaces
The morphed source spaces.
Notes
-----
.. versionadded:: 0.10.0
"""
# adapted from mne_make_source_space.c
src_from = _ensure_src(src_from)
subject_from = _ensure_src_subject(src_from, subject_from)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_out = list()
for fro in src_from:
hemi, idx, id_ = _get_hemi(fro)
to = op.join(subjects_dir, subject_to, 'surf', '%s.%s' % (hemi, surf,))
logger.info('Reading destination surface %s' % (to,))
to = read_surface(to, return_dict=True, verbose=False)[-1]
complete_surface_info(to, copy=False)
# Now we morph the vertices to the destination
# The C code does something like this, but with a nearest-neighbor
# mapping instead of the weighted one::
#
# >>> mm = read_morph_map(subject_from, subject_to, subjects_dir)
#
# Here we use a direct NN calculation, since picking the max from the
# existing morph map (which naively one might expect to be equivalent)
# differs for ~3% of vertices.
best = _get_vertex_map_nn(fro, subject_from, subject_to, hemi,
subjects_dir, to['neighbor_tri'])
for key in ('neighbor_tri', 'tri_area', 'tri_cent', 'tri_nn',
'use_tris'):
del to[key]
to['vertno'] = np.sort(best[fro['vertno']])
to['inuse'] = np.zeros(len(to['rr']), int)
to['inuse'][to['vertno']] = True
to['use_tris'] = best[fro['use_tris']]
to.update(nuse=len(to['vertno']), nuse_tri=len(to['use_tris']),
nearest=None, nearest_dist=None, patch_inds=None, pinfo=None,
dist=None, id=id_, dist_limit=None, type='surf',
coord_frame=FIFF.FIFFV_COORD_MRI, subject_his_id=subject_to,
rr=to['rr'] / 1000.)
src_out.append(to)
logger.info('[done]\n')
info = dict(working_dir=os.getcwd(), command_line=_get_call_line())
return SourceSpaces(src_out, info=info)
@verbose
def _get_morph_src_reordering(vertices, src_from, subject_from, subject_to,
subjects_dir=None, verbose=None):
"""Get the reordering indices for a morphed source space.
Parameters
----------
vertices : list
The vertices for the left and right hemispheres.
src_from : instance of SourceSpaces
The original source space.
subject_from : str
The source subject.
subject_to : str
The destination subject.
%(subjects_dir)s
%(verbose)s
Returns
-------
data_idx : ndarray, shape (n_vertices,)
The array used to reshape the data.
from_vertices : list
The right and left hemisphere vertex numbers for the "from" subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
from_vertices = list()
data_idxs = list()
offset = 0
for ii, hemi in enumerate(('lh', 'rh')):
# Get the mapping from the original source space to the destination
# subject's surface vertex numbers
best = _get_vertex_map_nn(src_from[ii], subject_from, subject_to,
hemi, subjects_dir)
full_mapping = best[src_from[ii]['vertno']]
# Tragically, we might not have all of our vertno left (e.g. because
# some are omitted during fwd calc), so we must do some indexing magic:
# From all vertices, a subset could be chosen by fwd calc:
used_vertices = np.in1d(full_mapping, vertices[ii])
from_vertices.append(src_from[ii]['vertno'][used_vertices])
remaining_mapping = full_mapping[used_vertices]
if not np.array_equal(np.sort(remaining_mapping), vertices[ii]) or \
not np.in1d(vertices[ii], full_mapping).all():
raise RuntimeError('Could not map vertices, perhaps the wrong '
'subject "%s" was provided?' % subject_from)
# And our data have been implicitly remapped by the forced ascending
# vertno order in source spaces
implicit_mapping = np.argsort(remaining_mapping) # happens to data
data_idx = np.argsort(implicit_mapping) # to reverse the mapping
data_idx += offset # hemisphere offset
data_idxs.append(data_idx)
offset += len(implicit_mapping)
data_idx = np.concatenate(data_idxs)
# this one is really just a sanity check for us, should never be violated
# by users
assert np.array_equal(np.sort(data_idx),
np.arange(sum(len(v) for v in vertices)))
return data_idx, from_vertices
def _compare_source_spaces(src0, src1, mode='exact', nearest=True,
dist_tol=1.5e-3):
"""Compare two source spaces.
Note: this function is also used by forward/tests/test_make_forward.py
"""
from numpy.testing import (assert_allclose, assert_array_equal,
assert_equal, assert_, assert_array_less)
from scipy.spatial.distance import cdist
if mode != 'exact' and 'approx' not in mode: # 'nointerp' can be appended
raise RuntimeError('unknown mode %s' % mode)
for si, (s0, s1) in enumerate(zip(src0, src1)):
# first check the keys
a, b = set(s0.keys()), set(s1.keys())
assert_equal(a, b, str(a ^ b))
for name in ['nuse', 'ntri', 'np', 'type', 'id']:
a, b = s0[name], s1[name]
if name == 'id': # workaround for old NumPy bug
a, b = int(a), int(b)
assert_equal(a, b, name)
for name in ['subject_his_id']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
for name in ['interpolator']:
if name in s0 or name in s1:
assert name in s0, f'{name} in s1 but not s0'
assert name in s1, f'{name} in s1 but not s0'
n = np.prod(s0['interpolator'].shape)
diffs = (s0['interpolator'] - s1['interpolator']).data
if len(diffs) > 0 and 'nointerp' not in mode:
# 0.1%
assert_array_less(
np.sqrt(np.sum(diffs * diffs) / n), 0.001,
err_msg=f'{name} > 0.1%')
for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
if mode == 'exact':
assert_array_equal(s0[name], s1[name], name)
else: # 'approx' in mode
atol = 1e-3 if name == 'nn' else 1e-4
assert_allclose(s0[name], s1[name], rtol=1e-3, atol=atol,
err_msg=name)
for name in ['seg_name']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
# these fields will exist if patch info was added
if nearest:
for name in ['nearest', 'nearest_dist', 'patch_inds']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
atol = 0 if mode == 'exact' else 1e-6
assert_allclose(s0[name], s1[name],
atol=atol, err_msg=name)
for name in ['pinfo']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
assert_(len(s0[name]) == len(s1[name]), name)
for p1, p2 in zip(s0[name], s1[name]):
assert_(all(p1 == p2), name)
if mode == 'exact':
for name in ['inuse', 'vertno', 'use_tris']:
assert_array_equal(s0[name], s1[name], err_msg=name)
for name in ['dist_limit']:
assert_(s0[name] == s1[name], name)
for name in ['dist']:
if s0[name] is not None:
assert_equal(s1[name].shape, s0[name].shape)
assert_(len((s0['dist'] - s1['dist']).data) == 0)
else: # 'approx' in mode:
# deal with vertno, inuse, and use_tris carefully
for ii, s in enumerate((s0, s1)):
assert_array_equal(s['vertno'], np.where(s['inuse'])[0],
'src%s[%s]["vertno"] != '
'np.where(src%s[%s]["inuse"])[0]'
% (ii, si, ii, si))
assert_equal(len(s0['vertno']), len(s1['vertno']))
agreement = np.mean(s0['inuse'] == s1['inuse'])
assert_(agreement >= 0.99, "%s < 0.99" % agreement)
if agreement < 1.0:
# make sure mismatched vertno are within 1.5mm
v0 = np.setdiff1d(s0['vertno'], s1['vertno'])
v1 = np.setdiff1d(s1['vertno'], s0['vertno'])
dists = cdist(s0['rr'][v0], s1['rr'][v1])
assert_allclose(np.min(dists, axis=1), np.zeros(len(v0)),
atol=dist_tol, err_msg='mismatched vertno')
if s0['use_tris'] is not None: # for "spacing"
assert_array_equal(s0['use_tris'].shape, s1['use_tris'].shape)
else:
assert_(s1['use_tris'] is None)
assert_(np.mean(s0['use_tris'] == s1['use_tris']) > 0.99)
# The above "if s0[name] is not None" can be removed once the sample
# dataset is updated to have a source space with distance info
for name in ['working_dir', 'command_line']:
if mode == 'exact':
assert_equal(src0.info[name], src1.info[name])
else: # 'approx' in mode:
if name in src0.info:
assert_(name in src1.info, '"%s" missing' % name)
else:
assert_(name not in src1.info, '"%s" should not exist' % name)
def _set_source_space_vertices(src, vertices):
"""Reset the list of source space vertices."""
assert len(src) == len(vertices)
for s, v in zip(src, vertices):
s['inuse'].fill(0)
s['nuse'] = len(v)
s['vertno'] = np.array(v)
s['inuse'][s['vertno']] = 1
s['use_tris'] = np.array([[]], int)
s['nuse_tri'] = np.array([0])
# This will fix 'patch_info' and 'pinfo'
_adjust_patch_info(s, verbose=False)
return src
def _get_src_nn(s, use_cps=True, vertices=None):
vertices = s['vertno'] if vertices is None else vertices
if use_cps and s.get('patch_inds') is not None:
nn = np.empty((len(vertices), 3))
for vp, p in enumerate(np.searchsorted(s['vertno'], vertices)):
assert(s['vertno'][p] == vertices[vp])
# Project out the surface normal and compute SVD
nn[vp] = np.sum(
s['nn'][s['pinfo'][s['patch_inds'][p]], :], axis=0)
nn /= np.linalg.norm(nn, axis=-1, keepdims=True)
else:
nn = s['nn'][vertices, :]
return nn
@verbose
def compute_distance_to_sensors(src, info, picks=None, trans=None,
verbose=None):
"""Compute distances between vertices and sensors.
Parameters
----------
src : instance of SourceSpaces
The object with vertex positions for which to compute distances to
sensors.
info : instance of Info
Measurement information with sensor positions to which distances shall
be computed.
%(picks_good_data)s
%(trans_not_none)s
%(verbose)s
Returns
-------
depth : array of shape (n_vertices, n_channels)
The Euclidean distances of source space vertices with respect to
sensors.
"""
from scipy.spatial.distance import cdist
assert isinstance(src, SourceSpaces)
_validate_type(info, (Info,), 'info')
# Load the head<->MRI transform if necessary
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
src_trans, _ = _get_trans(trans, allow_none=False)
else:
src_trans = Transform('head', 'head') # Identity transform
# get vertex position in same coordinates as for sensors below
src_pos = np.vstack([
apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)])
for s in src
])
# Select channels to be used for distance calculations
picks = _picks_to_idx(info, picks, 'data', exclude=())
# get sensor positions
sensor_pos = []
dev_to_head = None
for ch in picks:
# MEG channels are in device coordinates, translate them to head
if channel_type(info, ch) in ['mag', 'grad']:
if dev_to_head is None:
dev_to_head = _ensure_trans(info['dev_head_t'],
'meg', 'head')
sensor_pos.append(apply_trans(dev_to_head,
info['chs'][ch]['loc'][:3]))
else:
sensor_pos.append(info['chs'][ch]['loc'][:3])
sensor_pos = np.array(sensor_pos)
depths = cdist(src_pos, sensor_pos)
return depths
@verbose
def get_mni_fiducials(subject, subjects_dir=None, verbose=None):
"""Estimate fiducials for a subject.
Parameters
----------
%(subject)s
%(subjects_dir)s
%(verbose)s
Returns
-------
fids_mri : list
List of estimated fiducials (each point in a dict), in the order
LPA, nasion, RPA.
Notes
-----
This takes the ``fsaverage-fiducials.fif`` file included with MNE—which
contain the LPA, nasion, and RPA for the ``fsaverage`` subject—and
transforms them to the given FreeSurfer subject's MRI space.
The MRI of ``fsaverage`` is already in MNI Talairach space, so applying
the inverse of the given subject's MNI Talairach affine transformation
(``$SUBJECTS_DIR/$SUBJECT/mri/transforms/talairach.xfm``) is used
to estimate the subject's fiducial locations.
For more details about the coordinate systems and transformations involved,
see https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems and
:ref:`plot_source_alignment`.
"""
# Eventually we might want to allow using the MNI Talairach with-skull
# transformation rather than the standard brain-based MNI Talaranch
# transformation, and/or project the points onto the head surface
# (if available).
fname_fids_fs = os.path.join(os.path.dirname(__file__), 'data',
'fsaverage', 'fsaverage-fiducials.fif')
# Read fsaverage fiducials file and subject Talairach.
fids, coord_frame = read_fiducials(fname_fids_fs)
assert coord_frame == FIFF.FIFFV_COORD_MRI
if subject == 'fsaverage':
return fids # special short-circuit for fsaverage
mni_mri_t = invert_transform(read_talxfm(subject, subjects_dir))
for f in fids:
f['r'] = apply_trans(mni_mri_t, f['r'])
return fids
|
kambysese/mne-python
|
mne/source_space.py
|
Python
|
bsd-3-clause
| 127,142
|
[
"Mayavi"
] |
991f435452f13b17d0e0ceb882991c10d6988fe25ed5f00b4e8b4dfe3842fbfd
|
''' Test_RSS_Command_VOBOXAvailabilityCommand
'''
import unittest
import mock
import DIRAC.ResourceStatusSystem.Command.VOBOXAvailabilityCommand as moduleTested
__RCSID__ = '$Id: $'
################################################################################
class VOBOXAvailabilityCommand_TestCase( unittest.TestCase ):
def setUp( self ):
'''
Setup
'''
# Mock external libraries / modules not interesting for the unit test
mock_RPC = mock.Mock()
mock_RPC.ping.return_value = { 'OK' : True, 'Value' : {} }
mock_RPCClient = mock.Mock()
mock_RPCClient.return_value = mock_RPC
self.mock_RPCClient = mock_RPCClient
# Add mocks to moduleTested
moduleTested.RPCClient = self.mock_RPCClient
self.moduleTested = moduleTested
self.testClass = self.moduleTested.VOBOXAvailabilityCommand
def tearDown( self ):
'''
TearDown
'''
del self.testClass
del self.moduleTested
del self.mock_RPCClient
################################################################################
# Tests
class VOBOXAvailabilityCommand_Success( VOBOXAvailabilityCommand_TestCase ):
def test_instantiate( self ):
''' tests that we can instantiate one object of the tested class
'''
command = self.testClass()
self.assertEqual( 'VOBOXAvailabilityCommand', command.__class__.__name__ )
def test_init( self ):
''' tests that the init method does what it should do
'''
command = self.testClass()
self.assertEqual( {'onlyCache': False}, command.args )
self.assertEqual( {}, command.apis )
def test_doCommand( self ):
''' tests the doCommand method
'''
command = self.testClass()
res = command.doCommand()
self.assertEqual( False, res[ 'OK' ] )
command = self.testClass( args = { 'serviceURL' : '' } )
res = command.doCommand()
self.assertEqual( False, res[ 'OK' ] )
command = self.testClass( args = { 'serviceURL' : 'protocol://site:port/path1/path2' } )
res = command.doCommand()
self.assertTrue(res['OK'])
self.assertEqual( 0, res[ 'Value' ][ 'serviceUpTime' ] )
self.assertEqual( 0, res[ 'Value' ][ 'machineUpTime' ] )
self.assertEqual( 'site', res[ 'Value' ][ 'site' ] )
self.assertEqual( 'path1', res[ 'Value' ][ 'system' ] )
self.assertEqual( 'path2', res[ 'Value' ][ 'service' ] )
mock_RPC = mock.Mock()
mock_RPC.ping.return_value = { 'OK' : True,
'Value' : {'service uptime' : 1,
'host uptime' : 2} }
self.moduleTested.RPCClient.return_value = mock_RPC
command = self.testClass( args = { 'serviceURL' : 'protocol://site:port/path1/path2' } )
res = command.doCommand()
self.assertTrue(res['OK'])
self.assertEqual( 1, res[ 'Value' ][ 'serviceUpTime' ] )
self.assertEqual( 2, res[ 'Value' ][ 'machineUpTime' ] )
self.assertEqual( 'site', res[ 'Value' ][ 'site' ] )
self.assertEqual( 'path1', res[ 'Value' ][ 'system' ] )
self.assertEqual( 'path2', res[ 'Value' ][ 'service' ] )
# Restore the module
self.moduleTested.RPCClient.return_value = self.mock_RPCClient
reload( self.moduleTested )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
andresailer/DIRAC
|
ResourceStatusSystem/Command/test/Test_RSS_Command_VOBOXAvailabilityCommand.py
|
Python
|
gpl-3.0
| 3,435
|
[
"DIRAC"
] |
9a5c18cd237d7f44d2b8e414a71682a11fa1aa2ac9a3131d80d64391863ccbe6
|
"""Introduction
^^^^^^^^^^^^
The likelihood is :math:`p(y|f,X)` which is how well we will predict
target values given inputs :math:`X` and our latent function :math:`f`
(:math:`y` without noise). Marginal likelihood :math:`p(y|X)`, is the
same as likelihood except we marginalize out the model :math:`f`. The
importance of likelihoods in Gaussian Processes is in determining the
'best' values of kernel and noise hyperparamters to relate known,
observed and unobserved data. The purpose of optimizing a model
(e.g. :py:class:`GPy.models.GPRegression`) is to determine the 'best'
hyperparameters i.e. those that minimize negative log marginal
likelihood.
.. inheritance-diagram:: GPy.likelihoods.likelihood GPy.likelihoods.mixed_noise.MixedNoise
:top-classes: GPy.core.parameterization.parameterized.Parameterized
Most likelihood classes inherit directly from
:py:class:`GPy.likelihoods.likelihood`, although an intermediary class
:py:class:`GPy.likelihoods.mixed_noise.MixedNoise` is used by
:py:class:`GPy.likelihoods.multioutput_likelihood`.
"""
from .bernoulli import Bernoulli
from .exponential import Exponential
from .gaussian import Gaussian, HeteroscedasticGaussian
from .gamma import Gamma
from .poisson import Poisson
from .student_t import StudentT
from .likelihood import Likelihood
from .mixed_noise import MixedNoise
from .binomial import Binomial
from .weibull import Weibull
from .loglogistic import LogLogistic
from .multioutput_likelihood import MultioutputLikelihood
|
SheffieldML/GPy
|
GPy/likelihoods/__init__.py
|
Python
|
bsd-3-clause
| 1,494
|
[
"Gaussian"
] |
2a59407a79f4cb18e43a46117a95359732d6587bc96d21a815346fed0aeb6f3b
|
""" This is a test of the chain
PilotsLoggingClient -> PilotsLoggingHandler -> PilotsLoggingDB
It supposes that the DB is present, and that the service is running
"""
import unittest
from DIRAC.WorkloadManagementSystem.Client.PilotsLoggingClient import PilotsLoggingClient
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
class TestPilotsLogging( unittest.TestCase ):
def setUp( self ):
self.pilotsLoggingClient = PilotsLoggingClient()
def tearDown( self ):
pass
class PilotsLogging( TestPilotsLogging ):
def test_PilotsLoggingAddGetDelete( self ):
resp = self.pilotsLoggingClient.addPilotsLogging('11111111-1111-1111-1111-111111111111', 'timestamp1', 'test', 'phase', 'status', 'messageContent')
self.assertTrue(resp['OK'], 'Failed to add PilotsLogging')
resp = self.pilotsLoggingClient.addPilotsLogging('11111111-1111-1111-1111-111111111111', 'timestamp2', 'test2', 'phase2', 'status2', 'messageContent2')
self.assertTrue(resp['OK'], 'Failed to add PilotsLogging')
resp = self.pilotsLoggingClient.getPilotsLogging('11111111-1111-1111-1111-111111111111')
self.assertTrue(resp['OK'], 'Failed to get PilotsLogging')
test_sample = {
'pilotUUID': '11111111-1111-1111-1111-111111111111',
'timestamp': 'timestamp1',
'source': 'test',
'phase': 'phase',
'status': 'status',
'messageContent': 'messageContent',
}
test_sample2 = {
'pilotUUID': '11111111-1111-1111-1111-111111111111',
'timestamp': 'timestamp2',
'source': 'test2',
'phase': 'phase2',
'status': 'status2',
'messageContent': 'messageContent2',
}
self.assertEqual(resp['Value'], [ test_sample, test_sample2 ], 'Wrong data comes out of Service')
resp = self.pilotsLoggingClient.deletePilotsLogging('11111111-1111-1111-1111-111111111111')
self.assertTrue(resp['OK'], 'Failed to delete PilotsLogging')
resp = self.pilotsLoggingClient.getPilotsLogging('11111111-1111-1111-1111-111111111111')
self.assertTrue(resp['OK'], 'Failed to get PilotsLogging')
self.assertEqual(resp['Value'], [], 'PilotsLogging was not really deleted')
def test_PilotsLoggingEmptyGetDelete( self ):
resp = self.pilotsLoggingClient.getPilotsLogging( '11111111-1111-1111-1111-111111111111' )
self.assertTrue( resp['OK'], 'Failed to get PilotsLogging' )
resp = self.pilotsLoggingClient.deletePilotsLogging( '11111111-1111-1111-1111-111111111111' )
self.assertTrue( resp['OK'], 'Failed to delete PilotsLogging' )
def test_PilotsLoggingDeleteList( self ):
test_sample1 = {
'pilotUUID': '11111111-1111-1111-1111-111111111111',
'timestamp': 'timestamp1',
'source': 'test',
'phase': 'phase1',
'status': 'status1',
'messageContent': 'messageContent1',
}
test_sample2 = {
'pilotUUID': '22222222-2222-2222-2222-222222222222',
'timestamp': 'timestamp2',
'source': 'test',
'phase': 'phase2',
'status': 'status2',
'messageContent': 'messageContent2',
}
resp = self.pilotsLoggingClient.addPilotsLogging('11111111-1111-1111-1111-111111111111', 'timestamp1', 'test', 'phase1', 'status1', 'messageContent1')
self.assertTrue(resp['OK'], 'Failed to add PilotsLogging')
resp = self.pilotsLoggingClient.addPilotsLogging('22222222-2222-2222-2222-222222222222', 'timestamp2', 'test', 'phase2', 'status2', 'messageContent2')
resp = self.pilotsLoggingClient.getPilotsLogging('11111111-1111-1111-1111-111111111111')
self.assertTrue(resp['OK'], 'Failed to get PilotsLogging')
self.assertEqual(resp['Value'], [ test_sample1 ], 'Wrong data comes out of Service')
resp = self.pilotsLoggingClient.getPilotsLogging('22222222-2222-2222-2222-222222222222')
self.assertTrue(resp['OK'], 'Failed to get PilotsLogging')
self.assertEqual(resp['Value'], [ test_sample2 ], 'Wrong data comes out of Service')
resp = self.pilotsLoggingClient.deletePilotsLogging( ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'] )
self.assertTrue(resp['OK'], 'Failed to delete PilotsLogging')
resp = self.pilotsLoggingClient.getPilotsLogging('11111111-1111-1111-1111-111111111111')
self.assertTrue(resp['OK'], 'Failed to get PilotsLogging')
self.assertEqual(resp['Value'], [], 'PilotsLogging was not really deleted')
resp = self.pilotsLoggingClient.getPilotsLogging('22222222-2222-2222-2222-222222222222')
self.assertTrue(resp['OK'], 'Failed to get PilotsLogging')
self.assertEqual(resp['Value'], [], 'PilotsLogging was not really deleted')
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( TestPilotsLogging )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( PilotsLogging ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
arrabito/DIRAC
|
tests/Integration/WorkloadManagementSystem/Test_PilotsLoggingClient.py
|
Python
|
gpl-3.0
| 5,238
|
[
"DIRAC"
] |
f5917dd3485b1cdd550e054b32fcf282b8d9892ae9ba3945826387e216b2e0e9
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import codecs
import contextlib
import cookielib
import copy
import getpass
import hashlib
import httplib
import inspect
import json
import locale
import logging
import ntpath
import os
import posixpath
import random
import re
import socket
import string
import subprocess
import sys
import tempfile
import time
import urllib
import urllib2
import urlparse
import unicodedata
from ConfigParser import DEFAULTSECT
from ConfigParser import RawConfigParser
from StringIO import StringIO
from difflib import SequenceMatcher
from math import sqrt
from optparse import OptionValueError
from xml.dom import minidom
from xml.sax import parse
from xml.sax import SAXParseException
from extra.beep.beep import beep
from extra.cloak.cloak import decloak
from extra.safe2bin.safe2bin import safecharencode
from lib.core.bigarray import BigArray
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.convert import base64pickle
from lib.core.convert import base64unpickle
from lib.core.convert import hexdecode
from lib.core.convert import htmlunescape
from lib.core.convert import stdoutencode
from lib.core.convert import unicodeencode
from lib.core.convert import utf8encode
from lib.core.decorators import cachedmethod
from lib.core.defaults import defaults
from lib.core.dicts import DBMS_DICT
from lib.core.dicts import DEFAULT_DOC_ROOTS
from lib.core.dicts import DEPRECATED_OPTIONS
from lib.core.dicts import SQL_STATEMENTS
from lib.core.enums import ADJUST_TIME_DELAY
from lib.core.enums import CONTENT_STATUS
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import DBMS
from lib.core.enums import EXPECTED
from lib.core.enums import HEURISTIC_TEST
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import MKSTEMP_PREFIX
from lib.core.enums import OS
from lib.core.enums import PLACE
from lib.core.enums import PAYLOAD
from lib.core.enums import REFLECTIVE_COUNTER
from lib.core.enums import SORT_ORDER
from lib.core.exception import SqlmapDataException
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapInstallationException
from lib.core.exception import SqlmapMissingDependence
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapSyntaxException
from lib.core.exception import SqlmapSystemException
from lib.core.exception import SqlmapUserQuitException
from lib.core.exception import SqlmapValueException
from lib.core.log import LOGGER_HANDLER
from lib.core.optiondict import optDict
from lib.core.settings import BANNER
from lib.core.settings import BOLD_PATTERNS
from lib.core.settings import BOUNDED_INJECTION_MARKER
from lib.core.settings import BRUTE_DOC_ROOT_PREFIXES
from lib.core.settings import BRUTE_DOC_ROOT_SUFFIXES
from lib.core.settings import BRUTE_DOC_ROOT_TARGET_MARK
from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR
from lib.core.settings import DBMS_DIRECTORY_DICT
from lib.core.settings import DEFAULT_COOKIE_DELIMITER
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import DEFAULT_MSSQL_SCHEMA
from lib.core.settings import DUMMY_USER_INJECTION
from lib.core.settings import DYNAMICITY_MARK_LENGTH
from lib.core.settings import ERROR_PARSING_REGEXES
from lib.core.settings import FILE_PATH_REGEXES
from lib.core.settings import FORCE_COOKIE_EXPIRATION_TIME
from lib.core.settings import FORM_SEARCH_REGEX
from lib.core.settings import GENERIC_DOC_ROOT_DIRECTORY_NAMES
from lib.core.settings import GIT_PAGE
from lib.core.settings import GITHUB_REPORT_OAUTH_TOKEN
from lib.core.settings import GOOGLE_ANALYTICS_COOKIE_PREFIX
from lib.core.settings import HASHDB_MILESTONE_VALUE
from lib.core.settings import HOST_ALIASES
from lib.core.settings import INFERENCE_UNKNOWN_CHAR
from lib.core.settings import INVALID_UNICODE_CHAR_FORMAT
from lib.core.settings import IP_ADDRESS_REGEX
from lib.core.settings import ISSUES_PAGE
from lib.core.settings import IS_WIN
from lib.core.settings import LARGE_OUTPUT_THRESHOLD
from lib.core.settings import LOCALHOST
from lib.core.settings import MIN_ENCODED_LEN_CHECK
from lib.core.settings import MIN_TIME_RESPONSES
from lib.core.settings import MIN_VALID_DELAYED_RESPONSE
from lib.core.settings import NETSCAPE_FORMAT_HEADER_COOKIES
from lib.core.settings import NULL
from lib.core.settings import PARAMETER_AMP_MARKER
from lib.core.settings import PARAMETER_SEMICOLON_MARKER
from lib.core.settings import PARTIAL_HEX_VALUE_MARKER
from lib.core.settings import PARTIAL_VALUE_MARKER
from lib.core.settings import PAYLOAD_DELIMITER
from lib.core.settings import PLATFORM
from lib.core.settings import PRINTABLE_CHAR_REGEX
from lib.core.settings import PUSH_VALUE_EXCEPTION_RETRY_COUNT
from lib.core.settings import PYVERSION
from lib.core.settings import REFERER_ALIASES
from lib.core.settings import REFLECTED_BORDER_REGEX
from lib.core.settings import REFLECTED_MAX_REGEX_PARTS
from lib.core.settings import REFLECTED_REPLACEMENT_REGEX
from lib.core.settings import REFLECTED_VALUE_MARKER
from lib.core.settings import REFLECTIVE_MISS_THRESHOLD
from lib.core.settings import SENSITIVE_DATA_REGEX
from lib.core.settings import SENSITIVE_OPTIONS
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import TEXT_TAG_REGEX
from lib.core.settings import TIME_STDEV_COEFF
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import UNKNOWN_DBMS_VERSION
from lib.core.settings import URI_QUESTION_MARKER
from lib.core.settings import URLENCODE_CHAR_LIMIT
from lib.core.settings import URLENCODE_FAILSAFE_CHARS
from lib.core.settings import USER_AGENT_ALIASES
from lib.core.settings import VERSION_STRING
from lib.core.threads import getCurrentThreadData
from lib.utils.sqlalchemy import _sqlalchemy
from thirdparty.clientform.clientform import ParseResponse
from thirdparty.clientform.clientform import ParseError
from thirdparty.colorama.initialise import init as coloramainit
from thirdparty.magic import magic
from thirdparty.odict.odict import OrderedDict
from thirdparty.termcolor.termcolor import colored
class UnicodeRawConfigParser(RawConfigParser):
"""
RawConfigParser with unicode writing support
"""
def write(self, fp):
"""
Write an .ini-format representation of the configuration state.
"""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, getUnicode(value, UNICODE_ENCODING).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key != "__name__":
if value is None:
fp.write("%s\n" % (key))
else:
fp.write("%s = %s\n" % (key, getUnicode(value, UNICODE_ENCODING).replace('\n', '\n\t')))
fp.write("\n")
class Format(object):
@staticmethod
def humanize(values, chain=" or "):
return chain.join(values)
# Get methods
@staticmethod
def getDbms(versions=None):
"""
Format the back-end DBMS fingerprint value and return its
values formatted as a human readable string.
@return: detected back-end DBMS based upon fingerprint techniques.
@rtype: C{str}
"""
if versions is None and Backend.getVersionList():
versions = Backend.getVersionList()
return Backend.getDbms() if versions is None else "%s %s" % (Backend.getDbms(), " and ".join(filter(None, versions)))
@staticmethod
def getErrorParsedDBMSes():
"""
Parses the knowledge base htmlFp list and return its values
formatted as a human readable string.
@return: list of possible back-end DBMS based upon error messages
parsing.
@rtype: C{str}
"""
htmlParsed = None
if len(kb.htmlFp) == 0 or kb.heuristicTest != HEURISTIC_TEST.POSITIVE:
pass
elif len(kb.htmlFp) == 1:
htmlParsed = kb.htmlFp[0]
elif len(kb.htmlFp) > 1:
htmlParsed = " or ".join(kb.htmlFp)
return htmlParsed
@staticmethod
def getOs(target, info):
"""
Formats the back-end operating system fingerprint value
and return its values formatted as a human readable string.
Example of info (kb.headersFp) dictionary:
{
'distrib': set(['Ubuntu']),
'type': set(['Linux']),
'technology': set(['PHP 5.2.6', 'Apache 2.2.9']),
'release': set(['8.10'])
}
Example of info (kb.bannerFp) dictionary:
{
'sp': set(['Service Pack 4']),
'dbmsVersion': '8.00.194',
'dbmsServicePack': '0',
'distrib': set(['2000']),
'dbmsRelease': '2000',
'type': set(['Windows'])
}
@return: detected back-end operating system based upon fingerprint
techniques.
@rtype: C{str}
"""
infoStr = ""
infoApi = {}
if info and "type" in info:
if hasattr(conf, "api"):
infoApi["%s operating system" % target] = info
else:
infoStr += "%s operating system: %s" % (target, Format.humanize(info["type"]))
if "distrib" in info:
infoStr += " %s" % Format.humanize(info["distrib"])
if "release" in info:
infoStr += " %s" % Format.humanize(info["release"])
if "sp" in info:
infoStr += " %s" % Format.humanize(info["sp"])
if "codename" in info:
infoStr += " (%s)" % Format.humanize(info["codename"])
if "technology" in info:
if hasattr(conf, "api"):
infoApi["web application technology"] = Format.humanize(info["technology"], ", ")
else:
infoStr += "\nweb application technology: %s" % Format.humanize(info["technology"], ", ")
if hasattr(conf, "api"):
return infoApi
else:
return infoStr.lstrip()
class Backend:
# Set methods
@staticmethod
def setDbms(dbms):
dbms = aliasToDbmsEnum(dbms)
if dbms is None:
return None
# Little precaution, in theory this condition should always be false
elif kb.dbms is not None and kb.dbms != dbms:
warnMsg = "there appears to be a high probability that "
warnMsg += "this could be a false positive case"
logger.warn(warnMsg)
msg = "sqlmap previously fingerprinted back-end DBMS as "
msg += "%s. However now it has been fingerprinted " % kb.dbms
msg += "as %s. " % dbms
msg += "Please, specify which DBMS should be "
msg += "correct [%s (default)/%s] " % (kb.dbms, dbms)
while True:
_ = readInput(msg, default=kb.dbms)
if aliasToDbmsEnum(_) == kb.dbms:
kb.dbmsVersion = []
kb.resolutionDbms = kb.dbms
break
elif aliasToDbmsEnum(_) == dbms:
kb.dbms = aliasToDbmsEnum(_)
break
else:
warnMsg = "invalid value"
logger.warn(warnMsg)
elif kb.dbms is None:
kb.dbms = aliasToDbmsEnum(dbms)
return kb.dbms
@staticmethod
def setVersion(version):
if isinstance(version, basestring):
kb.dbmsVersion = [version]
return kb.dbmsVersion
@staticmethod
def setVersionList(versionsList):
if isinstance(versionsList, list):
kb.dbmsVersion = versionsList
elif isinstance(versionsList, basestring):
Backend.setVersion(versionsList)
else:
logger.error("invalid format of versionsList")
@staticmethod
def forceDbms(dbms, sticky=False):
if not kb.stickyDBMS:
kb.forcedDbms = aliasToDbmsEnum(dbms)
kb.stickyDBMS = sticky
@staticmethod
def flushForcedDbms(force=False):
if not kb.stickyDBMS or force:
kb.forcedDbms = None
kb.stickyDBMS = False
@staticmethod
def setOs(os):
if os is None:
return None
# Little precaution, in theory this condition should always be false
elif kb.os is not None and isinstance(os, basestring) and kb.os.lower() != os.lower():
msg = "sqlmap previously fingerprinted back-end DBMS "
msg += "operating system %s. However now it has " % kb.os
msg += "been fingerprinted to be %s. " % os
msg += "Please, specify which OS is "
msg += "correct [%s (default)/%s] " % (kb.os, os)
while True:
_ = readInput(msg, default=kb.os)
if _ == kb.os:
break
elif _ == os:
kb.os = _.capitalize()
break
else:
warnMsg = "invalid value"
logger.warn(warnMsg)
elif kb.os is None and isinstance(os, basestring):
kb.os = os.capitalize()
return kb.os
@staticmethod
def setOsVersion(version):
if version is None:
return None
elif kb.osVersion is None and isinstance(version, basestring):
kb.osVersion = version
@staticmethod
def setOsServicePack(sp):
if sp is None:
return None
elif kb.osSP is None and isinstance(sp, int):
kb.osSP = sp
@staticmethod
def setArch():
msg = "what is the back-end database management system architecture?"
msg += "\n[1] 32-bit (default)"
msg += "\n[2] 64-bit"
while True:
_ = readInput(msg, default='1')
if isinstance(_, basestring) and _.isdigit() and int(_) in (1, 2):
kb.arch = 32 if int(_) == 1 else 64
break
else:
warnMsg = "invalid value. Valid values are 1 and 2"
logger.warn(warnMsg)
return kb.arch
# Get methods
@staticmethod
def getForcedDbms():
return aliasToDbmsEnum(kb.get("forcedDbms"))
@staticmethod
def getDbms():
return aliasToDbmsEnum(kb.get("dbms"))
@staticmethod
def getErrorParsedDBMSes():
"""
Returns array with parsed DBMS names till now
This functions is called to:
1. Ask user whether or not skip specific DBMS tests in detection phase,
lib/controller/checks.py - detection phase.
2. Sort the fingerprint of the DBMS, lib/controller/handler.py -
fingerprint phase.
"""
return kb.htmlFp if kb.get("heuristicTest") == HEURISTIC_TEST.POSITIVE else []
@staticmethod
def getIdentifiedDbms():
"""
This functions is called to:
1. Sort the tests, getSortedInjectionTests() - detection phase.
2. Etc.
"""
dbms = None
if not kb:
pass
elif not kb.get("testMode") and conf.get("dbmsHandler") and getattr(conf.dbmsHandler, "_dbms", None):
dbms = conf.dbmsHandler._dbms
elif Backend.getForcedDbms() is not None:
dbms = Backend.getForcedDbms()
elif Backend.getDbms() is not None:
dbms = Backend.getDbms()
elif kb.get("injection") and kb.injection.dbms:
dbms = unArrayizeValue(kb.injection.dbms)
elif Backend.getErrorParsedDBMSes():
dbms = unArrayizeValue(Backend.getErrorParsedDBMSes())
elif conf.get("dbms"):
dbms = conf.get("dbms")
return aliasToDbmsEnum(dbms)
@staticmethod
def getVersion():
versions = filter(None, flattenValue(kb.dbmsVersion))
if not isNoneValue(versions):
return versions[0]
else:
return None
@staticmethod
def getVersionList():
versions = filter(None, flattenValue(kb.dbmsVersion))
if not isNoneValue(versions):
return versions
else:
return None
@staticmethod
def getOs():
return kb.os
@staticmethod
def getOsVersion():
return kb.osVersion
@staticmethod
def getOsServicePack():
return kb.osSP
@staticmethod
def getArch():
if kb.arch is None:
Backend.setArch()
return kb.arch
# Comparison methods
@staticmethod
def isDbms(dbms):
if not kb.get("testMode") and all((Backend.getDbms(), Backend.getIdentifiedDbms())) and Backend.getDbms() != Backend.getIdentifiedDbms():
singleTimeWarnMessage("identified ('%s') and fingerprinted ('%s') DBMSes differ. If you experience problems in enumeration phase please rerun with '--flush-session'" % (Backend.getIdentifiedDbms(), Backend.getDbms()))
return Backend.getIdentifiedDbms() == aliasToDbmsEnum(dbms)
@staticmethod
def isDbmsWithin(aliases):
return Backend.getDbms() is not None and Backend.getDbms().lower() in aliases
@staticmethod
def isVersion(version):
return Backend.getVersion() is not None and Backend.getVersion() == version
@staticmethod
def isVersionWithin(versionList):
if Backend.getVersionList() is None:
return False
for _ in Backend.getVersionList():
if _ != UNKNOWN_DBMS_VERSION and _ in versionList:
return True
return False
@staticmethod
def isVersionGreaterOrEqualThan(version):
return Backend.getVersion() is not None and str(Backend.getVersion()) >= str(version)
@staticmethod
def isOs(os):
return Backend.getOs() is not None and Backend.getOs().lower() == os.lower()
def paramToDict(place, parameters=None):
"""
Split the parameters into names and values, check if these parameters
are within the testable parameters and return in a dictionary.
"""
testableParameters = OrderedDict()
if place in conf.parameters and not parameters:
parameters = conf.parameters[place]
parameters = re.sub(r"&(\w{1,4});", r"%s\g<1>%s" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), parameters)
if place == PLACE.COOKIE:
splitParams = parameters.split(conf.cookieDel or DEFAULT_COOKIE_DELIMITER)
else:
splitParams = parameters.split(conf.paramDel or DEFAULT_GET_POST_DELIMITER)
for element in splitParams:
element = re.sub(r"%s(.+?)%s" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), r"&\g<1>;", element)
parts = element.split("=")
if len(parts) >= 2:
parameter = urldecode(parts[0].replace(" ", ""))
if not parameter:
continue
if conf.paramDel and conf.paramDel == '\n':
parts[-1] = parts[-1].rstrip()
condition = not conf.testParameter
condition |= conf.testParameter is not None and parameter in conf.testParameter
condition |= place == PLACE.COOKIE and len(intersect((PLACE.COOKIE,), conf.testParameter, True)) > 0
if condition:
testableParameters[parameter] = "=".join(parts[1:])
if not conf.multipleTargets and not (conf.csrfToken and parameter == conf.csrfToken):
_ = urldecode(testableParameters[parameter], convall=True)
if (_.endswith("'") and _.count("'") == 1
or re.search(r'\A9{3,}', _) or re.search(r'\A-\d+\Z', _) or re.search(DUMMY_USER_INJECTION, _))\
and not parameter.upper().startswith(GOOGLE_ANALYTICS_COOKIE_PREFIX):
warnMsg = "it appears that you have provided tainted parameter values "
warnMsg += "('%s') with most likely leftover " % element
warnMsg += "chars/statements from manual SQL injection test(s). "
warnMsg += "Please, always use only valid parameter values "
warnMsg += "so sqlmap could be able to run properly"
logger.warn(warnMsg)
message = "are you really sure that you want to continue (sqlmap could have problems)? [y/N] "
test = readInput(message, default="N")
if test[0] not in ("y", "Y"):
raise SqlmapSilentQuitException
elif not _:
warnMsg = "provided value for parameter '%s' is empty. " % parameter
warnMsg += "Please, always use only valid parameter values "
warnMsg += "so sqlmap could be able to run properly"
logger.warn(warnMsg)
if place in (PLACE.POST, PLACE.GET):
for regex in (r"\A((?:<[^>]+>)+\w+)((?:<[^>]+>)+)\Z", r"\A([^\w]+.*\w+)([^\w]+)\Z"):
match = re.search(regex, testableParameters[parameter])
if match:
try:
candidates = OrderedDict()
def walk(head, current=None):
current = current or head
if isListLike(current):
for _ in current:
walk(head, _)
elif isinstance(current, dict):
for key in current.keys():
value = current[key]
if isinstance(value, (list, tuple, set, dict)):
walk(head, value)
elif isinstance(value, (bool, int, float, basestring)):
original = current[key]
if isinstance(value, bool):
current[key] = "%s%s" % (str(value).lower(), BOUNDED_INJECTION_MARKER)
else:
current[key] = "%s%s" % (value, BOUNDED_INJECTION_MARKER)
candidates["%s (%s)" % (parameter, key)] = re.sub("(%s\s*=\s*)%s" % (re.escape(parameter), re.escape(testableParameters[parameter])), r"\g<1>%s" % json.dumps(deserialized), parameters)
current[key] = original
deserialized = json.loads(testableParameters[parameter])
walk(deserialized)
if candidates:
message = "it appears that provided value for %s parameter '%s' " % (place, parameter)
message += "is JSON deserializable. Do you want to inject inside? [y/N] "
test = readInput(message, default="N")
if test[0] in ("y", "Y"):
del testableParameters[parameter]
testableParameters.update(candidates)
break
except (KeyboardInterrupt, SqlmapUserQuitException):
raise
except Exception:
pass
_ = re.sub(regex, "\g<1>%s\g<%d>" % (CUSTOM_INJECTION_MARK_CHAR, len(match.groups())), testableParameters[parameter])
message = "it appears that provided value for %s parameter '%s' " % (place, parameter)
message += "has boundaries. Do you want to inject inside? ('%s') [y/N] " % _
test = readInput(message, default="N")
if test[0] in ("y", "Y"):
testableParameters[parameter] = re.sub(regex, "\g<1>%s\g<2>" % BOUNDED_INJECTION_MARKER, testableParameters[parameter])
break
if conf.testParameter:
if not testableParameters:
paramStr = ", ".join(test for test in conf.testParameter)
if len(conf.testParameter) > 1:
warnMsg = "provided parameters '%s' " % paramStr
warnMsg += "are not inside the %s" % place
logger.warn(warnMsg)
else:
parameter = conf.testParameter[0]
if not intersect(USER_AGENT_ALIASES + REFERER_ALIASES + HOST_ALIASES, parameter, True):
debugMsg = "provided parameter '%s' " % paramStr
debugMsg += "is not inside the %s" % place
logger.debug(debugMsg)
elif len(conf.testParameter) != len(testableParameters.keys()):
for parameter in conf.testParameter:
if parameter not in testableParameters:
debugMsg = "provided parameter '%s' " % parameter
debugMsg += "is not inside the %s" % place
logger.debug(debugMsg)
if testableParameters:
for parameter, value in testableParameters.items():
if value and not value.isdigit():
for encoding in ("hex", "base64"):
try:
decoded = value.decode(encoding)
if len(decoded) > MIN_ENCODED_LEN_CHECK and all(_ in string.printable for _ in decoded):
warnMsg = "provided parameter '%s' " % parameter
warnMsg += "appears to be '%s' encoded" % encoding
logger.warn(warnMsg)
break
except:
pass
return testableParameters
def getManualDirectories():
directories = None
defaultDocRoot = DEFAULT_DOC_ROOTS.get(Backend.getOs(), DEFAULT_DOC_ROOTS[OS.LINUX])
if kb.absFilePaths:
for absFilePath in kb.absFilePaths:
if directories:
break
if directoryPath(absFilePath) == '/':
continue
absFilePath = normalizePath(absFilePath)
windowsDriveLetter = None
if isWindowsDriveLetterPath(absFilePath):
windowsDriveLetter, absFilePath = absFilePath[:2], absFilePath[2:]
absFilePath = ntToPosixSlashes(posixToNtSlashes(absFilePath))
for _ in list(GENERIC_DOC_ROOT_DIRECTORY_NAMES) + [conf.hostname]:
_ = "/%s/" % _
if _ in absFilePath:
directories = "%s%s" % (absFilePath.split(_)[0], _)
break
if not directories and conf.path.strip('/') and conf.path in absFilePath:
directories = absFilePath.split(conf.path)[0]
if directories and windowsDriveLetter:
directories = "%s/%s" % (windowsDriveLetter, ntToPosixSlashes(directories))
directories = normalizePath(directories)
if conf.webRoot:
directories = [conf.webRoot]
infoMsg = "using '%s' as web server document root" % conf.webRoot
logger.info(infoMsg)
elif directories:
infoMsg = "retrieved the web server document root: '%s'" % directories
logger.info(infoMsg)
else:
warnMsg = "unable to automatically retrieve the web server "
warnMsg += "document root"
logger.warn(warnMsg)
directories = []
message = "what do you want to use for writable directory?\n"
message += "[1] common location(s) ('%s') (default)\n" % ", ".join(root for root in defaultDocRoot)
message += "[2] custom location(s)\n"
message += "[3] custom directory list file\n"
message += "[4] brute force search"
choice = readInput(message, default="1").strip()
if choice == "2":
message = "please provide a comma separate list of absolute directory paths: "
directories = readInput(message, default="").split(',')
elif choice == "3":
message = "what's the list file location?\n"
listPath = readInput(message, default="")
checkFile(listPath)
directories = getFileItems(listPath)
elif choice == "4":
targets = set([conf.hostname])
_ = conf.hostname.split('.')
if _[0] == "www":
targets.add('.'.join(_[1:]))
targets.add('.'.join(_[1:-1]))
else:
targets.add('.'.join(_[:-1]))
targets = filter(None, targets)
for prefix in BRUTE_DOC_ROOT_PREFIXES.get(Backend.getOs(), DEFAULT_DOC_ROOTS[OS.LINUX]):
if BRUTE_DOC_ROOT_TARGET_MARK in prefix and re.match(IP_ADDRESS_REGEX, conf.hostname):
continue
for suffix in BRUTE_DOC_ROOT_SUFFIXES:
for target in targets:
if not prefix.endswith("/%s" % suffix):
item = "%s/%s" % (prefix, suffix)
else:
item = prefix
item = item.replace(BRUTE_DOC_ROOT_TARGET_MARK, target).replace("//", '/').rstrip('/')
if item not in directories:
directories.append(item)
if BRUTE_DOC_ROOT_TARGET_MARK not in prefix:
break
infoMsg = "using generated directory list: %s" % ','.join(directories)
logger.info(infoMsg)
msg = "use any additional custom directories [Enter for None]: "
answer = readInput(msg)
if answer:
directories.extend(answer.split(','))
else:
directories = defaultDocRoot
return directories
def getAutoDirectories():
retVal = set()
if kb.absFilePaths:
infoMsg = "retrieved web server absolute paths: "
infoMsg += "'%s'" % ", ".join(ntToPosixSlashes(path) for path in kb.absFilePaths)
logger.info(infoMsg)
for absFilePath in kb.absFilePaths:
if absFilePath:
directory = directoryPath(absFilePath)
directory = ntToPosixSlashes(directory)
retVal.add(directory)
else:
warnMsg = "unable to automatically parse any web server path"
logger.warn(warnMsg)
return list(retVal)
def filePathToSafeString(filePath):
"""
Returns string representation of a given filepath safe for a single filename usage
>>> filePathToSafeString('C:/Windows/system32')
'C__Windows_system32'
"""
retVal = filePath.replace("/", "_").replace("\\", "_")
retVal = retVal.replace(" ", "_").replace(":", "_")
return retVal
def singleTimeDebugMessage(message):
singleTimeLogMessage(message, logging.DEBUG)
def singleTimeWarnMessage(message):
singleTimeLogMessage(message, logging.WARN)
def singleTimeLogMessage(message, level=logging.INFO, flag=None):
if flag is None:
flag = hash(message)
if not conf.smokeTest and flag not in kb.singleLogFlags:
kb.singleLogFlags.add(flag)
logger.log(level, message)
def boldifyMessage(message):
retVal = message
if any(_ in message for _ in BOLD_PATTERNS):
retVal = setColor(message, True)
return retVal
def setColor(message, bold=False):
retVal = message
level = extractRegexResult(r"\[(?P<result>[A-Z ]+)\]", message) or kb.get("stickyLevel")
if message and getattr(LOGGER_HANDLER, "is_tty", False): # colorizing handler
if bold:
retVal = colored(message, color=None, on_color=None, attrs=("bold",))
elif level:
level = getattr(logging, level, None) if isinstance(level, basestring) else level
_ = LOGGER_HANDLER.level_map.get(level)
if _:
background, foreground, bold = _
retVal = colored(message, color=foreground, on_color="on_%s" % background if background else None, attrs=("bold",) if bold else None)
kb.stickyLevel = level if message and message[-1] != "\n" else None
return retVal
def dataToStdout(data, forceOutput=False, bold=False, content_type=None, status=CONTENT_STATUS.IN_PROGRESS):
"""
Writes text to the stdout (console) stream
"""
message = ""
if not kb.get("threadException"):
if forceOutput or not getCurrentThreadData().disableStdOut:
if kb.get("multiThreadMode"):
logging._acquireLock()
if isinstance(data, unicode):
message = stdoutencode(data)
else:
message = data
try:
if hasattr(conf, "api"):
sys.stdout.write(message, status, content_type)
else:
sys.stdout.write(setColor(message, bold))
sys.stdout.flush()
except IOError:
pass
if kb.get("multiThreadMode"):
logging._releaseLock()
kb.prependFlag = isinstance(data, basestring) and (len(data) == 1 and data not in ('\n', '\r') or len(data) > 2 and data[0] == '\r' and data[-1] != '\n')
def dataToTrafficFile(data):
if not conf.trafficFile:
return
try:
conf.trafficFP.write(data)
conf.trafficFP.flush()
except IOError, ex:
errMsg = "something went wrong while trying "
errMsg += "to write to the traffic file '%s' ('%s')" % (conf.trafficFile, getSafeExString(ex))
raise SqlmapSystemException(errMsg)
def dataToDumpFile(dumpFile, data):
try:
dumpFile.write(data)
dumpFile.flush()
except IOError, ex:
if "No space left" in getUnicode(ex):
errMsg = "no space left on output device"
logger.error(errMsg)
elif "Permission denied" in getUnicode(ex):
errMsg = "permission denied when flushing dump data"
logger.error(errMsg)
else:
raise
def dataToOutFile(filename, data):
retVal = None
if data:
while True:
retVal = os.path.join(conf.filePath, filePathToSafeString(filename))
try:
with open(retVal, "w+b") as f: # has to stay as non-codecs because data is raw ASCII encoded data
f.write(unicodeencode(data))
except UnicodeEncodeError, ex:
_ = normalizeUnicode(filename)
if filename != _:
filename = _
else:
errMsg = "couldn't write to the "
errMsg += "output file ('%s')" % getSafeExString(ex)
raise SqlmapGenericException(errMsg)
except IOError, ex:
errMsg = "something went wrong while trying to write "
errMsg += "to the output file ('%s')" % getSafeExString(ex)
raise SqlmapGenericException(errMsg)
else:
break
return retVal
def readInput(message, default=None, checkBatch=True):
"""
Reads input from terminal
"""
retVal = None
kb.stickyLevel = None
message = getUnicode(message)
if "\n" in message:
message += "%s> " % ("\n" if message.count("\n") > 1 else "")
elif message[-1] == ']':
message += " "
if kb.get("prependFlag"):
message = "\n%s" % message
kb.prependFlag = False
if conf.get("answers"):
for item in conf.answers.split(','):
question = item.split('=')[0].strip()
answer = item.split('=')[1] if len(item.split('=')) > 1 else None
if answer and question.lower() in message.lower():
retVal = getUnicode(answer, UNICODE_ENCODING)
elif answer is None and retVal:
retVal = "%s,%s" % (retVal, getUnicode(item, UNICODE_ENCODING))
if retVal:
dataToStdout("\r%s%s\n" % (message, retVal), forceOutput=True, bold=True)
debugMsg = "used the given answer"
logger.debug(debugMsg)
if retVal is None:
if checkBatch and conf.get("batch"):
if isListLike(default):
options = ",".join(getUnicode(opt, UNICODE_ENCODING) for opt in default)
elif default:
options = getUnicode(default, UNICODE_ENCODING)
else:
options = unicode()
dataToStdout("\r%s%s\n" % (message, options), forceOutput=True, bold=True)
debugMsg = "used the default behaviour, running in batch mode"
logger.debug(debugMsg)
retVal = default
else:
logging._acquireLock()
if conf.get("beep"):
beep()
dataToStdout("\r%s" % message, forceOutput=True, bold=True)
kb.prependFlag = False
try:
retVal = raw_input() or default
retVal = getUnicode(retVal, encoding=sys.stdin.encoding) if retVal else retVal
except:
try:
time.sleep(0.05) # Reference: http://www.gossamer-threads.com/lists/python/python/781893
except:
pass
finally:
kb.prependFlag = True
raise SqlmapUserQuitException
finally:
logging._releaseLock()
return retVal
def randomRange(start=0, stop=1000, seed=None):
"""
Returns random integer value in given range
>>> random.seed(0)
>>> randomRange(1, 500)
423
"""
if seed is not None:
_ = getCurrentThreadData().random
_.seed(seed)
randint = _.randint
else:
randint = random.randint
return int(randint(start, stop))
def randomInt(length=4, seed=None):
"""
Returns random integer value with provided number of digits
>>> random.seed(0)
>>> randomInt(6)
874254
"""
if seed is not None:
_ = getCurrentThreadData().random
_.seed(seed)
choice = _.choice
else:
choice = random.choice
return int("".join(choice(string.digits if _ != 0 else string.digits.replace('0', '')) for _ in xrange(0, length)))
def randomStr(length=4, lowercase=False, alphabet=None, seed=None):
"""
Returns random string value with provided number of characters
>>> random.seed(0)
>>> randomStr(6)
'RNvnAv'
"""
if seed is not None:
_ = getCurrentThreadData().random
_.seed(seed)
choice = _.choice
else:
choice = random.choice
if alphabet:
retVal = "".join(choice(alphabet) for _ in xrange(0, length))
elif lowercase:
retVal = "".join(choice(string.ascii_lowercase) for _ in xrange(0, length))
else:
retVal = "".join(choice(string.ascii_letters) for _ in xrange(0, length))
return retVal
def sanitizeStr(value):
"""
Sanitizes string value in respect to newline and line-feed characters
>>> sanitizeStr('foo\\n\\rbar')
u'foo bar'
"""
return getUnicode(value).replace("\n", " ").replace("\r", "")
def getHeader(headers, key):
retVal = None
for _ in (headers or {}):
if _.upper() == key.upper():
retVal = headers[_]
break
return retVal
def checkFile(filename, raiseOnError=True):
"""
Checks for file existence and readability
"""
valid = True
try:
if filename is None or not os.path.isfile(filename):
valid = False
except UnicodeError:
valid = False
if valid:
try:
with open(filename, "rb"):
pass
except:
valid = False
if not valid and raiseOnError:
raise SqlmapSystemException("unable to read file '%s'" % filename)
return valid
def banner():
"""
This function prints sqlmap banner with its version
"""
if not any(_ in sys.argv for _ in ("--version", "--pickled-options")):
_ = BANNER
if not getattr(LOGGER_HANDLER, "is_tty", False) or "--disable-coloring" in sys.argv:
_ = re.sub("\033.+?m", "", _)
elif IS_WIN:
coloramainit()
dataToStdout(_, forceOutput=True)
def parsePasswordHash(password):
"""
In case of Microsoft SQL Server password hash value is expanded to its components
"""
blank = " " * 8
if not password or password == " ":
password = NULL
if Backend.isDbms(DBMS.MSSQL) and password != NULL and isHexEncodedString(password):
hexPassword = password
password = "%s\n" % hexPassword
password += "%sheader: %s\n" % (blank, hexPassword[:6])
password += "%ssalt: %s\n" % (blank, hexPassword[6:14])
password += "%smixedcase: %s\n" % (blank, hexPassword[14:54])
if not Backend.isVersionWithin(("2005", "2008")):
password += "%suppercase: %s" % (blank, hexPassword[54:])
return password
def cleanQuery(query):
"""
Switch all SQL statement (alike) keywords to upper case
"""
retVal = query
for sqlStatements in SQL_STATEMENTS.values():
for sqlStatement in sqlStatements:
sqlStatementEsc = sqlStatement.replace("(", "\\(")
queryMatch = re.search("(%s)" % sqlStatementEsc, query, re.I)
if queryMatch and "sys_exec" not in query:
retVal = retVal.replace(queryMatch.group(1), sqlStatement.upper())
return retVal
def setPaths(rootPath):
"""
Sets absolute paths for project directories and files
"""
paths.SQLMAP_ROOT_PATH = rootPath
# sqlmap paths
paths.SQLMAP_EXTRAS_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "extra")
paths.SQLMAP_PROCS_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "procs")
paths.SQLMAP_SHELL_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "shell")
paths.SQLMAP_TAMPER_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "tamper")
paths.SQLMAP_WAF_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "waf")
paths.SQLMAP_TXT_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "txt")
paths.SQLMAP_UDF_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "udf")
paths.SQLMAP_XML_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, "xml")
paths.SQLMAP_XML_BANNER_PATH = os.path.join(paths.SQLMAP_XML_PATH, "banner")
paths.SQLMAP_XML_PAYLOADS_PATH = os.path.join(paths.SQLMAP_XML_PATH, "payloads")
_ = os.path.join(os.path.expandvars(os.path.expanduser("~")), ".sqlmap")
paths.SQLMAP_HOME_PATH = _
paths.SQLMAP_OUTPUT_PATH = getUnicode(paths.get("SQLMAP_OUTPUT_PATH", os.path.join(_, "output")), encoding=sys.getfilesystemencoding() or UNICODE_ENCODING)
paths.SQLMAP_DUMP_PATH = os.path.join(paths.SQLMAP_OUTPUT_PATH, "%s", "dump")
paths.SQLMAP_FILES_PATH = os.path.join(paths.SQLMAP_OUTPUT_PATH, "%s", "files")
# sqlmap files
paths.OS_SHELL_HISTORY = os.path.join(_, "os.hst")
paths.SQL_SHELL_HISTORY = os.path.join(_, "sql.hst")
paths.SQLMAP_SHELL_HISTORY = os.path.join(_, "sqlmap.hst")
paths.GITHUB_HISTORY = os.path.join(_, "github.hst")
paths.CHECKSUM_MD5 = os.path.join(paths.SQLMAP_TXT_PATH, "checksum.md5")
paths.COMMON_COLUMNS = os.path.join(paths.SQLMAP_TXT_PATH, "common-columns.txt")
paths.COMMON_TABLES = os.path.join(paths.SQLMAP_TXT_PATH, "common-tables.txt")
paths.COMMON_OUTPUTS = os.path.join(paths.SQLMAP_TXT_PATH, 'common-outputs.txt')
paths.SQL_KEYWORDS = os.path.join(paths.SQLMAP_TXT_PATH, "keywords.txt")
paths.SMALL_DICT = os.path.join(paths.SQLMAP_TXT_PATH, "smalldict.txt")
paths.USER_AGENTS = os.path.join(paths.SQLMAP_TXT_PATH, "user-agents.txt")
paths.WORDLIST = os.path.join(paths.SQLMAP_TXT_PATH, "wordlist.zip")
paths.ERRORS_XML = os.path.join(paths.SQLMAP_XML_PATH, "errors.xml")
paths.BOUNDARIES_XML = os.path.join(paths.SQLMAP_XML_PATH, "boundaries.xml")
paths.LIVE_TESTS_XML = os.path.join(paths.SQLMAP_XML_PATH, "livetests.xml")
paths.QUERIES_XML = os.path.join(paths.SQLMAP_XML_PATH, "queries.xml")
paths.GENERIC_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, "generic.xml")
paths.MSSQL_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, "mssql.xml")
paths.MYSQL_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, "mysql.xml")
paths.ORACLE_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, "oracle.xml")
paths.PGSQL_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, "postgresql.xml")
for path in paths.values():
if any(path.endswith(_) for _ in (".txt", ".xml", ".zip")):
checkFile(path)
def weAreFrozen():
"""
Returns whether we are frozen via py2exe.
This will affect how we find out where we are located.
Reference: http://www.py2exe.org/index.cgi/WhereAmI
"""
return hasattr(sys, "frozen")
def parseTargetDirect():
"""
Parse target dbms and set some attributes into the configuration singleton.
"""
if not conf.direct:
return
details = None
remote = False
for dbms in SUPPORTED_DBMS:
details = re.search("^(?P<dbms>%s)://(?P<credentials>(?P<user>.+?)\:(?P<pass>.*)\@)?(?P<remote>(?P<hostname>[\w.-]+?)\:(?P<port>[\d]+)\/)?(?P<db>[\w\d\ \:\.\_\-\/\\\\]+?)$" % dbms, conf.direct, re.I)
if details:
conf.dbms = details.group("dbms")
if details.group('credentials'):
conf.dbmsUser = details.group("user")
conf.dbmsPass = details.group("pass")
else:
if conf.dbmsCred:
conf.dbmsUser, conf.dbmsPass = conf.dbmsCred.split(':')
else:
conf.dbmsUser = unicode()
conf.dbmsPass = unicode()
if not conf.dbmsPass:
conf.dbmsPass = None
if details.group("remote"):
remote = True
conf.hostname = details.group("hostname").strip()
conf.port = int(details.group("port"))
else:
conf.hostname = "localhost"
conf.port = 0
conf.dbmsDb = details.group("db")
conf.parameters[None] = "direct connection"
break
if not details:
errMsg = "invalid target details, valid syntax is for instance "
errMsg += "'mysql://USER:PASSWORD@DBMS_IP:DBMS_PORT/DATABASE_NAME' "
errMsg += "or 'access://DATABASE_FILEPATH'"
raise SqlmapSyntaxException(errMsg)
for dbmsName, data in DBMS_DICT.items():
if dbmsName == conf.dbms or conf.dbms.lower() in data[0]:
try:
if dbmsName in (DBMS.ACCESS, DBMS.SQLITE, DBMS.FIREBIRD):
if remote:
warnMsg = "direct connection over the network for "
warnMsg += "%s DBMS is not supported" % dbmsName
logger.warn(warnMsg)
conf.hostname = "localhost"
conf.port = 0
elif not remote:
errMsg = "missing remote connection details (e.g. "
errMsg += "'mysql://USER:PASSWORD@DBMS_IP:DBMS_PORT/DATABASE_NAME' "
errMsg += "or 'access://DATABASE_FILEPATH')"
raise SqlmapSyntaxException(errMsg)
if dbmsName in (DBMS.MSSQL, DBMS.SYBASE):
import _mssql
import pymssql
if not hasattr(pymssql, "__version__") or pymssql.__version__ < "1.0.2":
errMsg = "'%s' third-party library must be " % data[1]
errMsg += "version >= 1.0.2 to work properly. "
errMsg += "Download from '%s'" % data[2]
raise SqlmapMissingDependence(errMsg)
elif dbmsName == DBMS.MYSQL:
import pymysql
elif dbmsName == DBMS.PGSQL:
import psycopg2
elif dbmsName == DBMS.ORACLE:
import cx_Oracle
elif dbmsName == DBMS.SQLITE:
import sqlite3
elif dbmsName == DBMS.ACCESS:
import pyodbc
elif dbmsName == DBMS.FIREBIRD:
import kinterbasdb
except ImportError:
if _sqlalchemy and data[3] in _sqlalchemy.dialects.__all__:
pass
else:
errMsg = "sqlmap requires '%s' third-party library " % data[1]
errMsg += "in order to directly connect to the DBMS "
errMsg += "'%s'. You can download it from '%s'" % (dbmsName, data[2])
errMsg += ". Alternative is to use a package 'python-sqlalchemy' "
errMsg += "with support for dialect '%s' installed" % data[3]
raise SqlmapMissingDependence(errMsg)
def parseTargetUrl():
"""
Parse target URL and set some attributes into the configuration singleton.
"""
if not conf.url:
return
originalUrl = conf.url
if re.search("\[.+\]", conf.url) and not socket.has_ipv6:
errMsg = "IPv6 addressing is not supported "
errMsg += "on this platform"
raise SqlmapGenericException(errMsg)
if not re.search("^http[s]*://", conf.url, re.I) and \
not re.search("^ws[s]*://", conf.url, re.I):
if ":443/" in conf.url:
conf.url = "https://" + conf.url
else:
conf.url = "http://" + conf.url
if CUSTOM_INJECTION_MARK_CHAR in conf.url:
conf.url = conf.url.replace('?', URI_QUESTION_MARKER)
try:
urlSplit = urlparse.urlsplit(conf.url)
except ValueError, ex:
errMsg = "invalid URL '%s' has been given ('%s'). " % (conf.url, getSafeExString(ex))
errMsg += "Please be sure that you don't have any leftover characters (e.g. '[' or ']') "
errMsg += "in the hostname part"
raise SqlmapGenericException(errMsg)
hostnamePort = urlSplit.netloc.split(":") if not re.search("\[.+\]", urlSplit.netloc) else filter(None, (re.search("\[.+\]", urlSplit.netloc).group(0), re.search("\](:(?P<port>\d+))?", urlSplit.netloc).group("port")))
conf.scheme = urlSplit.scheme.strip().lower() if not conf.forceSSL else "https"
conf.path = urlSplit.path.strip()
conf.hostname = hostnamePort[0].strip()
conf.ipv6 = conf.hostname != conf.hostname.strip("[]")
conf.hostname = conf.hostname.strip("[]").replace(CUSTOM_INJECTION_MARK_CHAR, "")
try:
_ = conf.hostname.encode("idna")
except LookupError:
_ = conf.hostname.encode(UNICODE_ENCODING)
except UnicodeError:
_ = None
if any((_ is None, re.search(r'\s', conf.hostname), '..' in conf.hostname, conf.hostname.startswith('.'), '\n' in originalUrl)):
errMsg = "invalid target URL ('%s')" % originalUrl
raise SqlmapSyntaxException(errMsg)
if len(hostnamePort) == 2:
try:
conf.port = int(hostnamePort[1])
except:
errMsg = "invalid target URL"
raise SqlmapSyntaxException(errMsg)
elif conf.scheme == "https":
conf.port = 443
else:
conf.port = 80
if conf.port < 0 or conf.port > 65535:
errMsg = "invalid target URL's port (%d)" % conf.port
raise SqlmapSyntaxException(errMsg)
conf.url = getUnicode("%s://%s:%d%s" % (conf.scheme, ("[%s]" % conf.hostname) if conf.ipv6 else conf.hostname, conf.port, conf.path))
conf.url = conf.url.replace(URI_QUESTION_MARKER, '?')
if urlSplit.query:
if '=' not in urlSplit.query:
conf.url = "%s?%s" % (conf.url, getUnicode(urlSplit.query))
else:
conf.parameters[PLACE.GET] = urldecode(urlSplit.query) if urlSplit.query and urlencode(DEFAULT_GET_POST_DELIMITER, None) not in urlSplit.query else urlSplit.query
if not conf.referer and (intersect(REFERER_ALIASES, conf.testParameter, True) or conf.level >= 3):
debugMsg = "setting the HTTP Referer header to the target URL"
logger.debug(debugMsg)
conf.httpHeaders = filter(lambda (key, value): key != HTTP_HEADER.REFERER, conf.httpHeaders)
conf.httpHeaders.append((HTTP_HEADER.REFERER, conf.url.replace(CUSTOM_INJECTION_MARK_CHAR, "")))
if not conf.host and (intersect(HOST_ALIASES, conf.testParameter, True) or conf.level >= 5):
debugMsg = "setting the HTTP Host header to the target URL"
logger.debug(debugMsg)
conf.httpHeaders = filter(lambda (key, value): key != HTTP_HEADER.HOST, conf.httpHeaders)
conf.httpHeaders.append((HTTP_HEADER.HOST, getHostHeader(conf.url)))
if conf.url != originalUrl:
kb.originalUrls[conf.url] = originalUrl
def expandAsteriskForColumns(expression):
"""
If the user provided an asterisk rather than the column(s)
name, sqlmap will retrieve the columns itself and reprocess
the SQL query string (expression)
"""
asterisk = re.search("^SELECT(\s+TOP\s+[\d]+)?\s+\*\s+FROM\s+`?([^`\s()]+)", expression, re.I)
if asterisk:
infoMsg = "you did not provide the fields in your query. "
infoMsg += "sqlmap will retrieve the column names itself"
logger.info(infoMsg)
_ = asterisk.group(2).replace("..", ".").replace(".dbo.", ".")
db, conf.tbl = _.split(".", 1) if '.' in _ else (None, _)
if db is None:
if expression != conf.query:
conf.db = db
else:
expression = re.sub(r"([^\w])%s" % re.escape(conf.tbl), "\g<1>%s.%s" % (conf.db, conf.tbl), expression)
else:
conf.db = db
conf.db = safeSQLIdentificatorNaming(conf.db)
conf.tbl = safeSQLIdentificatorNaming(conf.tbl, True)
columnsDict = conf.dbmsHandler.getColumns(onlyColNames=True)
if columnsDict and conf.db in columnsDict and conf.tbl in columnsDict[conf.db]:
columns = columnsDict[conf.db][conf.tbl].keys()
columns.sort()
columnsStr = ", ".join(column for column in columns)
expression = expression.replace("*", columnsStr, 1)
infoMsg = "the query with expanded column name(s) is: "
infoMsg += "%s" % expression
logger.info(infoMsg)
return expression
def getLimitRange(count, plusOne=False):
"""
Returns range of values used in limit/offset constructs
>>> [_ for _ in getLimitRange(10)]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
retVal = None
count = int(count)
limitStart, limitStop = 1, count
if kb.dumpTable:
if isinstance(conf.limitStop, int) and conf.limitStop > 0 and conf.limitStop < limitStop:
limitStop = conf.limitStop
if isinstance(conf.limitStart, int) and conf.limitStart > 0 and conf.limitStart <= limitStop:
limitStart = conf.limitStart
retVal = xrange(limitStart, limitStop + 1) if plusOne else xrange(limitStart - 1, limitStop)
return retVal
def parseUnionPage(page):
"""
Returns resulting items from UNION query inside provided page content
"""
if page is None:
return None
if re.search("(?si)\A%s.*%s\Z" % (kb.chars.start, kb.chars.stop), page):
if len(page) > LARGE_OUTPUT_THRESHOLD:
warnMsg = "large output detected. This might take a while"
logger.warn(warnMsg)
data = BigArray()
keys = set()
for match in re.finditer("%s(.*?)%s" % (kb.chars.start, kb.chars.stop), page, re.DOTALL | re.IGNORECASE):
entry = match.group(1)
if kb.chars.start in entry:
entry = entry.split(kb.chars.start)[-1]
if kb.unionDuplicates:
key = entry.lower()
if key not in keys:
keys.add(key)
else:
continue
entry = entry.split(kb.chars.delimiter)
if conf.hexConvert:
entry = applyFunctionRecursively(entry, decodeHexValue)
if kb.safeCharEncode:
entry = applyFunctionRecursively(entry, safecharencode)
data.append(entry[0] if len(entry) == 1 else entry)
else:
data = page
if len(data) == 1 and isinstance(data[0], basestring):
data = data[0]
return data
def parseFilePaths(page):
"""
Detects (possible) absolute system paths inside the provided page content
"""
if page:
for regex in FILE_PATH_REGEXES:
for match in re.finditer(regex, page):
absFilePath = match.group("result").strip()
page = page.replace(absFilePath, "")
if isWindowsDriveLetterPath(absFilePath):
absFilePath = posixToNtSlashes(absFilePath)
if absFilePath not in kb.absFilePaths:
kb.absFilePaths.add(absFilePath)
def getLocalIP():
"""
Get local IP address (exposed to the remote/target)
"""
retVal = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((conf.hostname, conf.port))
retVal, _ = s.getsockname()
s.close()
except:
debugMsg = "there was an error in opening socket "
debugMsg += "connection toward '%s'" % conf.hostname
logger.debug(debugMsg)
return retVal
def getRemoteIP():
"""
Get remote/target IP address
"""
retVal = None
try:
retVal = socket.gethostbyname(conf.hostname)
except socket.gaierror:
errMsg = "address resolution problem "
errMsg += "occurred for hostname '%s'" % conf.hostname
singleTimeLogMessage(errMsg, logging.ERROR)
return retVal
def getFileType(filePath):
try:
_ = magic.from_file(filePath)
except:
return "unknown"
return "text" if "ASCII" in _ or "text" in _ else "binary"
def getCharset(charsetType=None):
"""
Returns list with integers representing characters of a given
charset type appropriate for inference techniques
>>> getCharset(CHARSET_TYPE.BINARY)
[0, 1, 47, 48, 49]
"""
asciiTbl = []
if charsetType is None:
asciiTbl.extend(xrange(0, 128))
# 0 or 1
elif charsetType == CHARSET_TYPE.BINARY:
asciiTbl.extend([0, 1])
asciiTbl.extend(xrange(47, 50))
# Digits
elif charsetType == CHARSET_TYPE.DIGITS:
asciiTbl.extend([0, 1])
asciiTbl.extend(xrange(47, 58))
# Hexadecimal
elif charsetType == CHARSET_TYPE.HEXADECIMAL:
asciiTbl.extend([0, 1])
asciiTbl.extend(xrange(47, 58))
asciiTbl.extend(xrange(64, 71))
asciiTbl.extend([87, 88]) # X
asciiTbl.extend(xrange(96, 103))
asciiTbl.extend([119, 120]) # x
# Characters
elif charsetType == CHARSET_TYPE.ALPHA:
asciiTbl.extend([0, 1])
asciiTbl.extend(xrange(64, 91))
asciiTbl.extend(xrange(96, 123))
# Characters and digits
elif charsetType == CHARSET_TYPE.ALPHANUM:
asciiTbl.extend([0, 1])
asciiTbl.extend(xrange(47, 58))
asciiTbl.extend(xrange(64, 91))
asciiTbl.extend(xrange(96, 123))
return asciiTbl
def directoryPath(filepath):
"""
Returns directory path for a given filepath
>>> directoryPath('/var/log/apache.log')
'/var/log'
"""
retVal = filepath
if filepath:
retVal = ntpath.dirname(filepath) if isWindowsDriveLetterPath(filepath) else posixpath.dirname(filepath)
return retVal
def normalizePath(filepath):
"""
Returns normalized string representation of a given filepath
>>> normalizePath('//var///log/apache.log')
'//var/log/apache.log'
"""
retVal = filepath
if retVal:
retVal = retVal.strip("\r\n")
retVal = ntpath.normpath(retVal) if isWindowsDriveLetterPath(retVal) else posixpath.normpath(retVal)
return retVal
def safeExpandUser(filepath):
"""
Patch for a Python Issue18171 (http://bugs.python.org/issue18171)
"""
retVal = filepath
try:
retVal = os.path.expanduser(filepath)
except UnicodeError:
_ = locale.getdefaultlocale()
encoding = _[1] if _ and len(_) > 1 else UNICODE_ENCODING
retVal = getUnicode(os.path.expanduser(filepath.encode(encoding)), encoding=encoding)
return retVal
def safeStringFormat(format_, params):
"""
Avoids problems with inappropriate string format strings
>>> safeStringFormat('SELECT foo FROM %s LIMIT %d', ('bar', '1'))
u'SELECT foo FROM bar LIMIT 1'
"""
if format_.count(PAYLOAD_DELIMITER) == 2:
_ = format_.split(PAYLOAD_DELIMITER)
_[1] = re.sub(r"(\A|[^A-Za-z0-9])(%d)([^A-Za-z0-9]|\Z)", r"\g<1>%s\g<3>", _[1])
retVal = PAYLOAD_DELIMITER.join(_)
else:
retVal = re.sub(r"(\A|[^A-Za-z0-9])(%d)([^A-Za-z0-9]|\Z)", r"\g<1>%s\g<3>", format_)
if isinstance(params, basestring):
retVal = retVal.replace("%s", params, 1)
elif not isListLike(params):
retVal = retVal.replace("%s", str(params), 1)
else:
start, end = 0, len(retVal)
match = re.search(r"%s(.+)%s" % (PAYLOAD_DELIMITER, PAYLOAD_DELIMITER), retVal)
if match and PAYLOAD_DELIMITER not in match.group(1):
start, end = match.start(), match.end()
if retVal.count("%s", start, end) == len(params):
for param in params:
index = retVal.find("%s", start)
retVal = retVal[:index] + getUnicode(param) + retVal[index + 2:]
else:
if any('%s' in _ for _ in conf.parameters.values()):
parts = format_.split(' ')
for i in xrange(len(parts)):
if PAYLOAD_DELIMITER in parts[i]:
parts[i] = parts[i].replace(PAYLOAD_DELIMITER, "")
parts[i] = "%s%s" % (parts[i], PAYLOAD_DELIMITER)
break
format_ = ' '.join(parts)
count = 0
while True:
match = re.search(r"(\A|[^A-Za-z0-9])(%s)([^A-Za-z0-9]|\Z)", retVal)
if match:
if count >= len(params):
warnMsg = "wrong number of parameters during string formatting. "
warnMsg += "Please report by e-mail content \"%r | %r | %r\" to 'dev@sqlmap.org'" % (format_, params, retVal)
raise SqlmapValueException(warnMsg)
else:
retVal = re.sub(r"(\A|[^A-Za-z0-9])(%s)([^A-Za-z0-9]|\Z)", r"\g<1>%s\g<3>" % params[count], retVal, 1)
count += 1
else:
break
return retVal
def getFilteredPageContent(page, onlyText=True, split=" "):
"""
Returns filtered page content without script, style and/or comments
or all HTML tags
>>> getFilteredPageContent(u'<html><title>foobar</title><body>test</body></html>')
u'foobar test'
"""
retVal = page
# only if the page's charset has been successfully identified
if isinstance(page, unicode):
retVal = re.sub(r"(?si)<script.+?</script>|<!--.+?-->|<style.+?</style>%s" % (r"|<[^>]+>|\t|\n|\r" if onlyText else ""), split, page)
while retVal.find(2 * split) != -1:
retVal = retVal.replace(2 * split, split)
retVal = htmlunescape(retVal.strip().strip(split))
return retVal
def getPageWordSet(page):
"""
Returns word set used in page content
>>> sorted(getPageWordSet(u'<html><title>foobar</title><body>test</body></html>'))
[u'foobar', u'test']
"""
retVal = set()
# only if the page's charset has been successfully identified
if isinstance(page, unicode):
_ = getFilteredPageContent(page)
retVal = set(re.findall(r"\w+", _))
return retVal
def showStaticWords(firstPage, secondPage):
"""
Prints words appearing in two different response pages
"""
infoMsg = "finding static words in longest matching part of dynamic page content"
logger.info(infoMsg)
firstPage = getFilteredPageContent(firstPage)
secondPage = getFilteredPageContent(secondPage)
infoMsg = "static words: "
if firstPage and secondPage:
match = SequenceMatcher(None, firstPage, secondPage).find_longest_match(0, len(firstPage), 0, len(secondPage))
commonText = firstPage[match[0]:match[0] + match[2]]
commonWords = getPageWordSet(commonText)
else:
commonWords = None
if commonWords:
commonWords = list(commonWords)
commonWords.sort(lambda a, b: cmp(a.lower(), b.lower()))
for word in commonWords:
if len(word) > 2:
infoMsg += "'%s', " % word
infoMsg = infoMsg.rstrip(", ")
else:
infoMsg += "None"
logger.info(infoMsg)
def isWindowsDriveLetterPath(filepath):
"""
Returns True if given filepath starts with a Windows drive letter
>>> isWindowsDriveLetterPath('C:\\boot.ini')
True
>>> isWindowsDriveLetterPath('/var/log/apache.log')
False
"""
return re.search("\A[\w]\:", filepath) is not None
def posixToNtSlashes(filepath):
"""
Replaces all occurances of Posix slashes (/) in provided
filepath with NT ones (\)
>>> posixToNtSlashes('C:/Windows')
'C:\\\\Windows'
"""
return filepath.replace('/', '\\') if filepath else filepath
def ntToPosixSlashes(filepath):
"""
Replaces all occurances of NT slashes (\) in provided
filepath with Posix ones (/)
>>> ntToPosixSlashes('C:\\Windows')
'C:/Windows'
"""
return filepath.replace('\\', '/') if filepath else filepath
def isHexEncodedString(subject):
"""
Checks if the provided string is hex encoded
>>> isHexEncodedString('DEADBEEF')
True
>>> isHexEncodedString('test')
False
"""
return re.match(r"\A[0-9a-fA-Fx]+\Z", subject) is not None
@cachedmethod
def getConsoleWidth(default=80):
"""
Returns console width
"""
width = None
if os.getenv("COLUMNS", "").isdigit():
width = int(os.getenv("COLUMNS"))
else:
try:
try:
FNULL = open(os.devnull, 'w')
except IOError:
FNULL = None
process = subprocess.Popen("stty size", shell=True, stdout=subprocess.PIPE, stderr=FNULL or subprocess.PIPE)
stdout, _ = process.communicate()
items = stdout.split()
if len(items) == 2 and items[1].isdigit():
width = int(items[1])
except (OSError, MemoryError):
pass
if width is None:
try:
import curses
stdscr = curses.initscr()
_, width = stdscr.getmaxyx()
curses.endwin()
except:
pass
return width or default
def clearConsoleLine(forceOutput=False):
"""
Clears current console line
"""
if getattr(LOGGER_HANDLER, "is_tty", False):
dataToStdout("\r%s\r" % (" " * (getConsoleWidth() - 1)), forceOutput)
kb.prependFlag = False
kb.stickyLevel = None
def parseXmlFile(xmlFile, handler):
"""
Parses XML file by a given handler
"""
try:
with contextlib.closing(StringIO(readCachedFileContent(xmlFile))) as stream:
parse(stream, handler)
except (SAXParseException, UnicodeError), ex:
errMsg = "something appears to be wrong with "
errMsg += "the file '%s' ('%s'). Please make " % (xmlFile, getSafeExString(ex))
errMsg += "sure that you haven't made any changes to it"
raise SqlmapInstallationException, errMsg
def getSQLSnippet(dbms, sfile, **variables):
"""
Returns content of SQL snippet located inside 'procs/' directory
"""
if sfile.endswith('.sql') and os.path.exists(sfile):
filename = sfile
elif not sfile.endswith('.sql') and os.path.exists("%s.sql" % sfile):
filename = "%s.sql" % sfile
else:
filename = os.path.join(paths.SQLMAP_PROCS_PATH, DBMS_DIRECTORY_DICT[dbms], sfile if sfile.endswith('.sql') else "%s.sql" % sfile)
checkFile(filename)
retVal = readCachedFileContent(filename)
retVal = re.sub(r"#.+", "", retVal)
retVal = re.sub(r"(?s);\s+", "; ", retVal).strip("\r\n")
for _ in variables.keys():
retVal = re.sub(r"%%%s%%" % _, variables[_], retVal)
for _ in re.findall(r"%RANDSTR\d+%", retVal, re.I):
retVal = retVal.replace(_, randomStr())
for _ in re.findall(r"%RANDINT\d+%", retVal, re.I):
retVal = retVal.replace(_, randomInt())
variables = re.findall(r"(?<!\bLIKE ')%(\w+)%", retVal, re.I)
if variables:
errMsg = "unresolved variable%s '%s' in SQL file '%s'" % ("s" if len(variables) > 1 else "", ", ".join(variables), sfile)
logger.error(errMsg)
msg = "do you want to provide the substitution values? [y/N] "
choice = readInput(msg, default="N")
if choice and choice[0].lower() == "y":
for var in variables:
msg = "insert value for variable '%s': " % var
val = readInput(msg, default="")
retVal = retVal.replace(r"%%%s%%" % var, val)
return retVal
def readCachedFileContent(filename, mode='rb'):
"""
Cached reading of file content (avoiding multiple same file reading)
"""
if filename not in kb.cache.content:
with kb.locks.cache:
if filename not in kb.cache.content:
checkFile(filename)
try:
with openFile(filename, mode) as f:
kb.cache.content[filename] = f.read()
except (IOError, OSError, MemoryError), ex:
errMsg = "something went wrong while trying "
errMsg += "to read the content of file '%s' ('%s')" % (filename, getSafeExString(ex))
raise SqlmapSystemException(errMsg)
return kb.cache.content[filename]
def readXmlFile(xmlFile):
"""
Reads XML file content and returns its DOM representation
"""
checkFile(xmlFile)
retVal = minidom.parse(xmlFile).documentElement
return retVal
def stdev(values):
"""
Computes standard deviation of a list of numbers.
Reference: http://www.goldb.org/corestats.html
>>> stdev([0.9, 0.9, 0.9, 1.0, 0.8, 0.9])
0.06324555320336757
"""
if not values or len(values) < 2:
return None
key = (values[0], values[-1], len(values))
if kb.get("cache") and key in kb.cache.stdev:
retVal = kb.cache.stdev[key]
else:
avg = average(values)
_ = reduce(lambda x, y: x + pow((y or 0) - avg, 2), values, 0.0)
retVal = sqrt(_ / (len(values) - 1))
if kb.get("cache"):
kb.cache.stdev[key] = retVal
return retVal
def average(values):
"""
Computes the arithmetic mean of a list of numbers.
>>> average([0.9, 0.9, 0.9, 1.0, 0.8, 0.9])
0.9
"""
return (sum(values) / len(values)) if values else None
def calculateDeltaSeconds(start):
"""
Returns elapsed time from start till now
"""
return time.time() - start
def initCommonOutputs():
"""
Initializes dictionary containing common output values used by "good samaritan" feature
"""
kb.commonOutputs = {}
key = None
with openFile(paths.COMMON_OUTPUTS, 'r') as f:
for line in f.readlines(): # xreadlines doesn't return unicode strings when codec.open() is used
if line.find('#') != -1:
line = line[:line.find('#')]
line = line.strip()
if len(line) > 1:
if line.startswith('[') and line.endswith(']'):
key = line[1:-1]
elif key:
if key not in kb.commonOutputs:
kb.commonOutputs[key] = set()
if line not in kb.commonOutputs[key]:
kb.commonOutputs[key].add(line)
def getFileItems(filename, commentPrefix='#', unicode_=True, lowercase=False, unique=False):
"""
Returns newline delimited items contained inside file
"""
retVal = list() if not unique else OrderedDict()
checkFile(filename)
try:
with openFile(filename, 'r', errors="ignore") if unicode_ else open(filename, 'r') as f:
for line in (f.readlines() if unicode_ else f.xreadlines()): # xreadlines doesn't return unicode strings when codec.open() is used
if commentPrefix:
if line.find(commentPrefix) != -1:
line = line[:line.find(commentPrefix)]
line = line.strip()
if not unicode_:
try:
line = str.encode(line)
except UnicodeDecodeError:
continue
if line:
if lowercase:
line = line.lower()
if unique and line in retVal:
continue
if unique:
retVal[line] = True
else:
retVal.append(line)
except (IOError, OSError, MemoryError), ex:
errMsg = "something went wrong while trying "
errMsg += "to read the content of file '%s' ('%s')" % (filename, getSafeExString(ex))
raise SqlmapSystemException(errMsg)
return retVal if not unique else retVal.keys()
def goGoodSamaritan(prevValue, originalCharset):
"""
Function for retrieving parameters needed for common prediction (good
samaritan) feature.
prevValue: retrieved query output so far (e.g. 'i').
Returns commonValue if there is a complete single match (in kb.partRun
of txt/common-outputs.txt under kb.partRun) regarding parameter
prevValue. If there is no single value match, but multiple, commonCharset is
returned containing more probable characters (retrieved from matched
values in txt/common-outputs.txt) together with the rest of charset as
otherCharset.
"""
if kb.commonOutputs is None:
initCommonOutputs()
predictionSet = set()
commonValue = None
commonPattern = None
countCommonValue = 0
# If the header (e.g. Databases) we are looking for has common
# outputs defined
if kb.partRun in kb.commonOutputs:
commonPartOutputs = kb.commonOutputs[kb.partRun]
commonPattern = commonFinderOnly(prevValue, commonPartOutputs)
# If the longest common prefix is the same as previous value then
# do not consider it
if commonPattern and commonPattern == prevValue:
commonPattern = None
# For each common output
for item in commonPartOutputs:
# Check if the common output (item) starts with prevValue
# where prevValue is the enumerated character(s) so far
if item.startswith(prevValue):
commonValue = item
countCommonValue += 1
if len(item) > len(prevValue):
char = item[len(prevValue)]
predictionSet.add(char)
# Reset single value if there is more than one possible common
# output
if countCommonValue > 1:
commonValue = None
commonCharset = []
otherCharset = []
# Split the original charset into common chars (commonCharset)
# and other chars (otherCharset)
for ordChar in originalCharset:
if chr(ordChar) not in predictionSet:
otherCharset.append(ordChar)
else:
commonCharset.append(ordChar)
commonCharset.sort()
return commonValue, commonPattern, commonCharset, originalCharset
else:
return None, None, None, originalCharset
def getPartRun(alias=True):
"""
Goes through call stack and finds constructs matching conf.dbmsHandler.*.
Returns it or its alias used in txt/common-outputs.txt
"""
retVal = None
commonPartsDict = optDict["Enumeration"]
try:
stack = [item[4][0] if isinstance(item[4], list) else '' for item in inspect.stack()]
# Goes backwards through the stack to find the conf.dbmsHandler method
# calling this function
for i in xrange(0, len(stack) - 1):
for regex in (r"self\.(get[^(]+)\(\)", r"conf\.dbmsHandler\.([^(]+)\(\)"):
match = re.search(regex, stack[i])
if match:
# This is the calling conf.dbmsHandler or self method
# (e.g. 'getDbms')
retVal = match.groups()[0]
break
if retVal is not None:
break
# Reference: http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-06/2267.html
except TypeError:
pass
# Return the INI tag to consider for common outputs (e.g. 'Databases')
if alias:
return commonPartsDict[retVal][1] if isinstance(commonPartsDict.get(retVal), tuple) else retVal
else:
return retVal
def getUnicode(value, encoding=None, noneToNull=False):
"""
Return the unicode representation of the supplied value:
>>> getUnicode(u'test')
u'test'
>>> getUnicode('test')
u'test'
>>> getUnicode(1)
u'1'
"""
if noneToNull and value is None:
return NULL
if isinstance(value, unicode):
return value
elif isinstance(value, basestring):
while True:
try:
return unicode(value, encoding or (kb.get("pageEncoding") if kb.get("originalPage") else None) or UNICODE_ENCODING)
except UnicodeDecodeError, ex:
try:
return unicode(value, UNICODE_ENCODING)
except:
value = value[:ex.start] + "".join(INVALID_UNICODE_CHAR_FORMAT % ord(_) for _ in value[ex.start:ex.end]) + value[ex.end:]
elif isListLike(value):
value = list(getUnicode(_, encoding, noneToNull) for _ in value)
return value
else:
try:
return unicode(value)
except UnicodeDecodeError:
return unicode(str(value), errors="ignore") # encoding ignored for non-basestring instances
def longestCommonPrefix(*sequences):
"""
Returns longest common prefix occuring in given sequences
Reference: http://boredzo.org/blog/archives/2007-01-06/longest-common-prefix-in-python-2
>>> longestCommonPrefix('foobar', 'fobar')
'fo'
"""
if len(sequences) == 1:
return sequences[0]
sequences = [pair[1] for pair in sorted((len(fi), fi) for fi in sequences)]
if not sequences:
return None
for i, comparison_ch in enumerate(sequences[0]):
for fi in sequences[1:]:
ch = fi[i]
if ch != comparison_ch:
return fi[:i]
return sequences[0]
def commonFinderOnly(initial, sequence):
return longestCommonPrefix(*filter(lambda x: x.startswith(initial), sequence))
def pushValue(value):
"""
Push value to the stack (thread dependent)
"""
_ = None
success = False
for i in xrange(PUSH_VALUE_EXCEPTION_RETRY_COUNT):
try:
getCurrentThreadData().valueStack.append(copy.deepcopy(value))
success = True
break
except Exception, ex:
_ = ex
if not success:
getCurrentThreadData().valueStack.append(None)
if _:
raise _
def popValue():
"""
Pop value from the stack (thread dependent)
>>> pushValue('foobar')
>>> popValue()
'foobar'
"""
return getCurrentThreadData().valueStack.pop()
def wasLastResponseDBMSError():
"""
Returns True if the last web request resulted in a (recognized) DBMS error page
"""
threadData = getCurrentThreadData()
return threadData.lastErrorPage and threadData.lastErrorPage[0] == threadData.lastRequestUID
def wasLastResponseHTTPError():
"""
Returns True if the last web request resulted in an erroneous HTTP code (like 500)
"""
threadData = getCurrentThreadData()
return threadData.lastHTTPError and threadData.lastHTTPError[0] == threadData.lastRequestUID
def wasLastResponseDelayed():
"""
Returns True if the last web request resulted in a time-delay
"""
# 99.9999999997440% of all non time-based SQL injection affected
# response times should be inside +-7*stdev([normal response times])
# Math reference: http://www.answers.com/topic/standard-deviation
deviation = stdev(kb.responseTimes.get(kb.responseTimeMode, []))
threadData = getCurrentThreadData()
if deviation and not conf.direct and not conf.disableStats:
if len(kb.responseTimes[kb.responseTimeMode]) < MIN_TIME_RESPONSES:
warnMsg = "time-based standard deviation method used on a model "
warnMsg += "with less than %d response times" % MIN_TIME_RESPONSES
logger.warn(warnMsg)
lowerStdLimit = average(kb.responseTimes[kb.responseTimeMode]) + TIME_STDEV_COEFF * deviation
retVal = (threadData.lastQueryDuration >= max(MIN_VALID_DELAYED_RESPONSE, lowerStdLimit))
if not kb.testMode and retVal:
if kb.adjustTimeDelay is None:
msg = "do you want sqlmap to try to optimize value(s) "
msg += "for DBMS delay responses (option '--time-sec')? [Y/n] "
choice = readInput(msg, default='Y')
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE if choice.upper() == 'N' else ADJUST_TIME_DELAY.YES
if kb.adjustTimeDelay is ADJUST_TIME_DELAY.YES:
adjustTimeDelay(threadData.lastQueryDuration, lowerStdLimit)
return retVal
else:
delta = threadData.lastQueryDuration - conf.timeSec
if Backend.getIdentifiedDbms() in (DBMS.MYSQL,): # MySQL's SLEEP(X) lasts 0.05 seconds shorter on average
delta += 0.05
return delta >= 0
def adjustTimeDelay(lastQueryDuration, lowerStdLimit):
"""
Provides tip for adjusting time delay in time-based data retrieval
"""
candidate = 1 + int(round(lowerStdLimit))
if candidate:
kb.delayCandidates = [candidate] + kb.delayCandidates[:-1]
if all((x == candidate for x in kb.delayCandidates)) and candidate < conf.timeSec:
conf.timeSec = candidate
infoMsg = "adjusting time delay to "
infoMsg += "%d second%s due to good response times" % (conf.timeSec, 's' if conf.timeSec > 1 else '')
logger.info(infoMsg)
def getLastRequestHTTPError():
"""
Returns last HTTP error code
"""
threadData = getCurrentThreadData()
return threadData.lastHTTPError[1] if threadData.lastHTTPError else None
def extractErrorMessage(page):
"""
Returns reported error message from page if it founds one
>>> extractErrorMessage(u'<html><title>Test</title>\\n<b>Warning</b>: oci_parse() [function.oci-parse]: ORA-01756: quoted string not properly terminated<br><p>Only a test page</p></html>')
u'oci_parse() [function.oci-parse]: ORA-01756: quoted string not properly terminated'
"""
retVal = None
if isinstance(page, basestring):
for regex in ERROR_PARSING_REGEXES:
match = re.search(regex, page, re.DOTALL | re.IGNORECASE)
if match:
retVal = htmlunescape(match.group("result")).replace("<br>", "\n").strip()
break
return retVal
def findLocalPort(ports):
"""
Find the first opened localhost port from a given list of ports (e.g. for Tor port checks)
"""
retVal = None
for port in ports:
try:
try:
s = socket._orig_socket(socket.AF_INET, socket.SOCK_STREAM)
except AttributeError:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((LOCALHOST, port))
retVal = port
break
except socket.error:
pass
finally:
try:
s.close()
except socket.error:
pass
return retVal
def findMultipartPostBoundary(post):
"""
Finds value for a boundary parameter in given multipart POST body
"""
retVal = None
done = set()
candidates = []
for match in re.finditer(r"(?m)^--(.+?)(--)?$", post or ""):
_ = match.group(1).strip().strip('-')
if _ in done:
continue
else:
candidates.append((post.count(_), _))
done.add(_)
if candidates:
candidates.sort(key=lambda _: _[0], reverse=True)
retVal = candidates[0][1]
return retVal
def urldecode(value, encoding=None, unsafe="%%&=;+%s" % CUSTOM_INJECTION_MARK_CHAR, convall=False, plusspace=True):
"""
URL decodes given value
>>> urldecode('AND%201%3E%282%2B3%29%23', convall=True)
u'AND 1>(2+3)#'
"""
result = value
if value:
try:
# for cases like T%C3%BCrk%C3%A7e
value = str(value)
except ValueError:
pass
finally:
if convall:
result = urllib.unquote_plus(value) if plusspace else urllib.unquote(value)
else:
def _(match):
charset = reduce(lambda x, y: x.replace(y, ""), unsafe, string.printable)
char = chr(ord(match.group(1).decode("hex")))
return char if char in charset else match.group(0)
result = value
if plusspace:
result = result.replace("+", " ") # plus sign has a special meaning in URL encoded data (hence the usage of urllib.unquote_plus in convall case)
result = re.sub("%([0-9a-fA-F]{2})", _, result)
if isinstance(result, str):
result = unicode(result, encoding or UNICODE_ENCODING, "replace")
return result
def urlencode(value, safe="%&=-_", convall=False, limit=False, spaceplus=False):
"""
URL encodes given value
>>> urlencode('AND 1>(2+3)#')
'AND%201%3E%282%2B3%29%23'
"""
if conf.get("direct"):
return value
count = 0
result = None if value is None else ""
if value:
if Backend.isDbms(DBMS.MSSQL) and not kb.tamperFunctions and any(ord(_) > 255 for _ in value):
warnMsg = "if you experience problems with "
warnMsg += "non-ASCII identifier names "
warnMsg += "you are advised to rerun with '--tamper=charunicodeencode'"
singleTimeWarnMessage(warnMsg)
if convall or safe is None:
safe = ""
# corner case when character % really needs to be
# encoded (when not representing URL encoded char)
# except in cases when tampering scripts are used
if all(map(lambda x: '%' in x, [safe, value])) and not kb.tamperFunctions:
value = re.sub("%(?![0-9a-fA-F]{2})", "%25", value)
while True:
result = urllib.quote(utf8encode(value), safe)
if limit and len(result) > URLENCODE_CHAR_LIMIT:
if count >= len(URLENCODE_FAILSAFE_CHARS):
break
while count < len(URLENCODE_FAILSAFE_CHARS):
safe += URLENCODE_FAILSAFE_CHARS[count]
count += 1
if safe[-1] in value:
break
else:
break
if spaceplus:
result = result.replace(urllib.quote(' '), '+')
return result
def runningAsAdmin():
"""
Returns True if the current process is run under admin privileges
"""
isAdmin = None
if PLATFORM in ("posix", "mac"):
_ = os.geteuid()
isAdmin = isinstance(_, (int, float, long)) and _ == 0
elif IS_WIN:
import ctypes
_ = ctypes.windll.shell32.IsUserAnAdmin()
isAdmin = isinstance(_, (int, float, long)) and _ == 1
else:
errMsg = "sqlmap is not able to check if you are running it "
errMsg += "as an administrator account on this platform. "
errMsg += "sqlmap will assume that you are an administrator "
errMsg += "which is mandatory for the requested takeover attack "
errMsg += "to work properly"
logger.error(errMsg)
isAdmin = True
return isAdmin
def logHTTPTraffic(requestLogMsg, responseLogMsg):
"""
Logs HTTP traffic to the output file
"""
if not conf.trafficFile:
return
with kb.locks.log:
dataToTrafficFile("%s%s" % (requestLogMsg, os.linesep))
dataToTrafficFile("%s%s" % (responseLogMsg, os.linesep))
dataToTrafficFile("%s%s%s%s" % (os.linesep, 76 * '#', os.linesep, os.linesep))
def getPageTemplate(payload, place): # Cross-linked function
raise NotImplementedError
@cachedmethod
def getPublicTypeMembers(type_, onlyValues=False):
"""
Useful for getting members from types (e.g. in enums)
>>> [_ for _ in getPublicTypeMembers(OS, True)]
['Linux', 'Windows']
"""
retVal = []
for name, value in inspect.getmembers(type_):
if not name.startswith('__'):
if not onlyValues:
retVal.append((name, value))
else:
retVal.append(value)
return retVal
def enumValueToNameLookup(type_, value_):
"""
Returns name of a enum member with a given value
>>> enumValueToNameLookup(SORT_ORDER, 100)
'LAST'
"""
retVal = None
for name, value in getPublicTypeMembers(type_):
if value == value_:
retVal = name
break
return retVal
def extractRegexResult(regex, content, flags=0):
"""
Returns 'result' group value from a possible match with regex on a given
content
>>> extractRegexResult(r'a(?P<result>[^g]+)g', 'abcdefg')
'bcdef'
"""
retVal = None
if regex and content and "?P<result>" in regex:
match = re.search(regex, content, flags)
if match:
retVal = match.group("result")
return retVal
def extractTextTagContent(page):
"""
Returns list containing content from "textual" tags
>>> extractTextTagContent(u'<html><head><title>Title</title></head><body><pre>foobar</pre><a href="#link">Link</a></body></html>')
[u'Title', u'foobar']
"""
page = page or ""
if REFLECTED_VALUE_MARKER in page:
try:
page = re.sub(r"(?i)[^\s>]*%s[^\s<]*" % REFLECTED_VALUE_MARKER, "", page)
except MemoryError:
page = page.replace(REFLECTED_VALUE_MARKER, "")
return filter(None, (_.group('result').strip() for _ in re.finditer(TEXT_TAG_REGEX, page)))
def trimAlphaNum(value):
"""
Trims alpha numeric characters from start and ending of a given value
>>> trimAlphaNum(u'AND 1>(2+3)-- foobar')
u' 1>(2+3)-- '
"""
while value and value[-1].isalnum():
value = value[:-1]
while value and value[0].isalnum():
value = value[1:]
return value
def isNumPosStrValue(value):
"""
Returns True if value is a string (or integer) with a positive integer representation
>>> isNumPosStrValue(1)
True
>>> isNumPosStrValue('1')
True
>>> isNumPosStrValue(0)
False
>>> isNumPosStrValue('-2')
False
"""
return (value and isinstance(value, basestring) and value.isdigit() and int(value) > 0) or (isinstance(value, int) and value > 0)
@cachedmethod
def aliasToDbmsEnum(dbms):
"""
Returns major DBMS name from a given alias
>>> aliasToDbmsEnum('mssql')
'Microsoft SQL Server'
"""
retVal = None
if dbms:
for key, item in DBMS_DICT.items():
if dbms.lower() in item[0] or dbms.lower() == key.lower():
retVal = key
break
return retVal
def findDynamicContent(firstPage, secondPage):
"""
This function checks if the provided pages have dynamic content. If they
are dynamic, proper markings will be made
"""
if not firstPage or not secondPage:
return
infoMsg = "searching for dynamic content"
logger.info(infoMsg)
blocks = SequenceMatcher(None, firstPage, secondPage).get_matching_blocks()
kb.dynamicMarkings = []
# Removing too small matching blocks
for block in blocks[:]:
(_, _, length) = block
if length <= DYNAMICITY_MARK_LENGTH:
blocks.remove(block)
# Making of dynamic markings based on prefix/suffix principle
if len(blocks) > 0:
blocks.insert(0, None)
blocks.append(None)
for i in xrange(len(blocks) - 1):
prefix = firstPage[blocks[i][0]:blocks[i][0] + blocks[i][2]] if blocks[i] else None
suffix = firstPage[blocks[i + 1][0]:blocks[i + 1][0] + blocks[i + 1][2]] if blocks[i + 1] else None
if prefix is None and blocks[i + 1][0] == 0:
continue
if suffix is None and (blocks[i][0] + blocks[i][2] >= len(firstPage)):
continue
prefix = trimAlphaNum(prefix)
suffix = trimAlphaNum(suffix)
kb.dynamicMarkings.append((prefix[-DYNAMICITY_MARK_LENGTH / 2:] if prefix else None, suffix[:DYNAMICITY_MARK_LENGTH / 2] if suffix else None))
if len(kb.dynamicMarkings) > 0:
infoMsg = "dynamic content marked for removal (%d region%s)" % (len(kb.dynamicMarkings), 's' if len(kb.dynamicMarkings) > 1 else '')
logger.info(infoMsg)
def removeDynamicContent(page):
"""
Removing dynamic content from supplied page basing removal on
precalculated dynamic markings
"""
if page:
for item in kb.dynamicMarkings:
prefix, suffix = item
if prefix is None and suffix is None:
continue
elif prefix is None:
page = re.sub(r'(?s)^.+%s' % re.escape(suffix), suffix.replace('\\', r'\\'), page)
elif suffix is None:
page = re.sub(r'(?s)%s.+$' % re.escape(prefix), prefix.replace('\\', r'\\'), page)
else:
page = re.sub(r'(?s)%s.+%s' % (re.escape(prefix), re.escape(suffix)), '%s%s' % (prefix.replace('\\', r'\\'), suffix.replace('\\', r'\\')), page)
return page
def filterStringValue(value, charRegex, replacement=""):
"""
Returns string value consisting only of chars satisfying supplied
regular expression (note: it has to be in form [...])
>>> filterStringValue(u'wzydeadbeef0123#', r'[0-9a-f]')
u'deadbeef0123'
"""
retVal = value
if value:
retVal = re.sub(charRegex.replace("[", "[^") if "[^" not in charRegex else charRegex.replace("[^", "["), replacement, value)
return retVal
def filterControlChars(value):
"""
Returns string value with control chars being supstituted with ' '
>>> filterControlChars(u'AND 1>(2+3)\\n--')
u'AND 1>(2+3) --'
"""
return filterStringValue(value, PRINTABLE_CHAR_REGEX, ' ')
def isDBMSVersionAtLeast(version):
"""
Checks if the recognized DBMS version is at least the version
specified
"""
retVal = None
if Backend.getVersion() and Backend.getVersion() != UNKNOWN_DBMS_VERSION:
value = Backend.getVersion().replace(" ", "").rstrip('.')
while True:
index = value.find('.', value.find('.') + 1)
if index > -1:
value = value[0:index] + value[index + 1:]
else:
break
value = filterStringValue(value, '[0-9.><=]')
if isinstance(value, basestring):
if value.startswith(">="):
value = float(value.replace(">=", ""))
elif value.startswith(">"):
value = float(value.replace(">", "")) + 0.01
elif value.startswith("<="):
value = float(value.replace("<=", ""))
elif value.startswith(">"):
value = float(value.replace("<", "")) - 0.01
retVal = getUnicode(value) >= getUnicode(version)
return retVal
def parseSqliteTableSchema(value):
"""
Parses table column names and types from specified SQLite table schema
"""
if value:
table = {}
columns = {}
for match in re.finditer(r"(\w+)[\"'`]?\s+(INT|INTEGER|TINYINT|SMALLINT|MEDIUMINT|BIGINT|UNSIGNED BIG INT|INT2|INT8|INTEGER|CHARACTER|VARCHAR|VARYING CHARACTER|NCHAR|NATIVE CHARACTER|NVARCHAR|TEXT|CLOB|LONGTEXT|BLOB|NONE|REAL|DOUBLE|DOUBLE PRECISION|FLOAT|REAL|NUMERIC|DECIMAL|BOOLEAN|DATE|DATETIME|NUMERIC)\b", value, re.I):
columns[match.group(1)] = match.group(2)
table[conf.tbl] = columns
kb.data.cachedColumns[conf.db] = table
def getTechniqueData(technique=None):
"""
Returns injection data for technique specified
"""
return kb.injection.data.get(technique)
def isTechniqueAvailable(technique):
"""
Returns True if there is injection data which sqlmap could use for
technique specified
"""
if conf.tech and isinstance(conf.tech, list) and technique not in conf.tech:
return False
else:
return getTechniqueData(technique) is not None
def isStackingAvailable():
"""
Returns True whether techniques using stacking are available
"""
retVal = False
if PAYLOAD.TECHNIQUE.STACKED in kb.injection.data:
retVal = True
else:
for technique in getPublicTypeMembers(PAYLOAD.TECHNIQUE, True):
_ = getTechniqueData(technique)
if _ and "stacked" in _["title"].lower():
retVal = True
break
return retVal
def isInferenceAvailable():
"""
Returns True whether techniques using inference technique are available
"""
return any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.BOOLEAN, PAYLOAD.TECHNIQUE.STACKED, PAYLOAD.TECHNIQUE.TIME))
def setOptimize():
"""
Sets options turned on by switch '-o'
"""
#conf.predictOutput = True
conf.keepAlive = True
conf.threads = 3 if conf.threads < 3 else conf.threads
conf.nullConnection = not any((conf.data, conf.textOnly, conf.titles, conf.string, conf.notString, conf.regexp, conf.tor))
if not conf.nullConnection:
debugMsg = "turning off switch '--null-connection' used indirectly by switch '-o'"
logger.debug(debugMsg)
def initTechnique(technique=None):
"""
Prepares data for technique specified
"""
try:
data = getTechniqueData(technique)
resetCounter(technique)
if data:
kb.pageTemplate, kb.errorIsNone = getPageTemplate(data.templatePayload, kb.injection.place)
kb.matchRatio = data.matchRatio
kb.negativeLogic = (technique == PAYLOAD.TECHNIQUE.BOOLEAN) and (data.where == PAYLOAD.WHERE.NEGATIVE)
# Restoring stored conf options
for key, value in kb.injection.conf.items():
if value and (not hasattr(conf, key) or (hasattr(conf, key) and not getattr(conf, key))):
setattr(conf, key, value)
debugMsg = "resuming configuration option '%s' (%s)" % (key, value)
logger.debug(debugMsg)
if value and key == "optimize":
setOptimize()
else:
warnMsg = "there is no injection data available for technique "
warnMsg += "'%s'" % enumValueToNameLookup(PAYLOAD.TECHNIQUE, technique)
logger.warn(warnMsg)
except SqlmapDataException:
errMsg = "missing data in old session file(s). "
errMsg += "Please use '--flush-session' to deal "
errMsg += "with this error"
raise SqlmapNoneDataException(errMsg)
def arrayizeValue(value):
"""
Makes a list out of value if it is not already a list or tuple itself
>>> arrayizeValue(u'1')
[u'1']
"""
if not isListLike(value):
value = [value]
return value
def unArrayizeValue(value):
"""
Makes a value out of iterable if it is a list or tuple itself
>>> unArrayizeValue([u'1'])
u'1'
"""
if isListLike(value):
if not value:
value = None
elif len(value) == 1 and not isListLike(value[0]):
value = value[0]
else:
_ = filter(lambda _: _ is not None, (_ for _ in flattenValue(value)))
value = _[0] if len(_) > 0 else None
return value
def flattenValue(value):
"""
Returns an iterator representing flat representation of a given value
>>> [_ for _ in flattenValue([[u'1'], [[u'2'], u'3']])]
[u'1', u'2', u'3']
"""
for i in iter(value):
if isListLike(i):
for j in flattenValue(i):
yield j
else:
yield i
def isListLike(value):
"""
Returns True if the given value is a list-like instance
>>> isListLike([1, 2, 3])
True
>>> isListLike(u'2')
False
"""
return isinstance(value, (list, tuple, set, BigArray))
def getSortedInjectionTests():
"""
Returns prioritized test list by eventually detected DBMS from error
messages
"""
retVal = copy.deepcopy(conf.tests)
def priorityFunction(test):
retVal = SORT_ORDER.FIRST
if test.stype == PAYLOAD.TECHNIQUE.UNION:
retVal = SORT_ORDER.LAST
elif 'details' in test and 'dbms' in test.details:
if intersect(test.details.dbms, Backend.getIdentifiedDbms()):
retVal = SORT_ORDER.SECOND
else:
retVal = SORT_ORDER.THIRD
return retVal
if Backend.getIdentifiedDbms():
retVal = sorted(retVal, key=priorityFunction)
return retVal
def filterListValue(value, regex):
"""
Returns list with items that have parts satisfying given regular
expression
>>> filterListValue(['users', 'admins', 'logs'], r'(users|admins)')
['users', 'admins']
"""
if isinstance(value, list) and regex:
retVal = filter(lambda _: re.search(regex, _, re.I), value)
else:
retVal = value
return retVal
def showHttpErrorCodes():
"""
Shows all HTTP error codes raised till now
"""
if kb.httpErrorCodes:
warnMsg = "HTTP error codes detected during run:\n"
warnMsg += ", ".join("%d (%s) - %d times" % (code, httplib.responses[code] \
if code in httplib.responses else '?', count) \
for code, count in kb.httpErrorCodes.items())
logger.warn(warnMsg)
if any((str(_).startswith('4') or str(_).startswith('5')) and _ != httplib.INTERNAL_SERVER_ERROR and _ != kb.originalCode for _ in kb.httpErrorCodes.keys()):
msg = "too many 4xx and/or 5xx HTTP error codes "
msg += "could mean that some kind of protection is involved (e.g. WAF)"
logger.debug(msg)
def openFile(filename, mode='r', encoding=UNICODE_ENCODING, errors="replace", buffering=1): # "buffering=1" means line buffered (Reference: http://stackoverflow.com/a/3168436)
"""
Returns file handle of a given filename
"""
try:
return codecs.open(filename, mode, encoding, errors, buffering)
except IOError:
errMsg = "there has been a file opening error for filename '%s'. " % filename
errMsg += "Please check %s permissions on a file " % ("write" if \
mode and ('w' in mode or 'a' in mode or '+' in mode) else "read")
errMsg += "and that it's not locked by another process."
raise SqlmapSystemException(errMsg)
def decodeIntToUnicode(value):
"""
Decodes inferenced integer value to an unicode character
>>> decodeIntToUnicode(35)
u'#'
>>> decodeIntToUnicode(64)
u'@'
"""
retVal = value
if isinstance(value, int):
try:
if value > 255:
_ = "%x" % value
if len(_) % 2 == 1:
_ = "0%s" % _
raw = hexdecode(_)
if Backend.isDbms(DBMS.MYSQL):
# https://github.com/sqlmapproject/sqlmap/issues/1531
retVal = getUnicode(raw, conf.charset or UNICODE_ENCODING)
elif Backend.isDbms(DBMS.MSSQL):
retVal = getUnicode(raw, "UTF-16-BE")
elif Backend.getIdentifiedDbms() in (DBMS.PGSQL, DBMS.ORACLE):
retVal = unichr(value)
else:
retVal = getUnicode(raw, conf.charset)
else:
retVal = getUnicode(chr(value))
except:
retVal = INFERENCE_UNKNOWN_CHAR
return retVal
def checkIntegrity():
"""
Checks integrity of code files during the unhandled exceptions
"""
logger.debug("running code integrity check")
retVal = True
for checksum, _ in (re.split(r'\s+', _) for _ in getFileItems(paths.CHECKSUM_MD5)):
path = os.path.normpath(os.path.join(paths.SQLMAP_ROOT_PATH, _))
if not os.path.isfile(path):
logger.error("missing file detected '%s'" % path)
retVal = False
elif hashlib.md5(open(path, 'rb').read()).hexdigest() != checksum:
logger.error("wrong checksum of file '%s' detected" % path)
retVal = False
return retVal
def unhandledExceptionMessage():
"""
Returns detailed message about occurred unhandled exception
"""
errMsg = "unhandled exception occurred in %s. It is recommended to retry your " % VERSION_STRING
errMsg += "run with the latest development version from official GitHub "
errMsg += "repository at '%s'. If the exception persists, please open a new issue " % GIT_PAGE
errMsg += "at '%s' " % ISSUES_PAGE
errMsg += "with the following text and any other information required to "
errMsg += "reproduce the bug. The "
errMsg += "developers will try to reproduce the bug, fix it accordingly "
errMsg += "and get back to you\n"
errMsg += "sqlmap version: %s\n" % VERSION_STRING[VERSION_STRING.find('/') + 1:]
errMsg += "Python version: %s\n" % PYVERSION
errMsg += "Operating system: %s\n" % PLATFORM
errMsg += "Command line: %s\n" % re.sub(r".+?\bsqlmap.py\b", "sqlmap.py", getUnicode(" ".join(sys.argv), encoding=sys.stdin.encoding))
errMsg += "Technique: %s\n" % (enumValueToNameLookup(PAYLOAD.TECHNIQUE, kb.technique) if kb.get("technique") else ("DIRECT" if conf.get("direct") else None))
errMsg += "Back-end DBMS:"
if Backend.getDbms() is not None:
errMsg += " %s (fingerprinted)" % Backend.getDbms()
if Backend.getIdentifiedDbms() is not None and (Backend.getDbms() is None or Backend.getIdentifiedDbms() != Backend.getDbms()):
errMsg += " %s (identified)" % Backend.getIdentifiedDbms()
if not errMsg.endswith(')'):
errMsg += " None"
return errMsg
def createGithubIssue(errMsg, excMsg):
"""
Automatically create a Github issue with unhandled exception information
"""
issues = []
try:
issues = getFileItems(paths.GITHUB_HISTORY, unique=True)
except:
pass
finally:
issues = set(issues)
_ = re.sub(r"'[^']+'", "''", excMsg)
_ = re.sub(r"\s+line \d+", "", _)
_ = re.sub(r'File ".+?/(\w+\.py)', "\g<1>", _)
_ = re.sub(r".+\Z", "", _)
key = hashlib.md5(_).hexdigest()[:8]
if key in issues:
return
msg = "\ndo you want to automatically create a new (anonymized) issue "
msg += "with the unhandled exception information at "
msg += "the official Github repository? [y/N] "
try:
test = readInput(msg, default="N")
except:
test = None
if test and test[0] in ("y", "Y"):
ex = None
errMsg = errMsg[errMsg.find("\n"):]
req = urllib2.Request(url="https://api.github.com/search/issues?q=%s" % urllib.quote("repo:sqlmapproject/sqlmap Unhandled exception (#%s)" % key))
try:
content = urllib2.urlopen(req).read()
_ = json.loads(content)
duplicate = _["total_count"] > 0
closed = duplicate and _["items"][0]["state"] == "closed"
if duplicate:
warnMsg = "issue seems to be already reported"
if closed:
warnMsg += " and resolved. Please update to the latest "
warnMsg += "development version from official GitHub repository at '%s'" % GIT_PAGE
logger.warn(warnMsg)
return
except:
pass
data = {"title": "Unhandled exception (#%s)" % key, "body": "```%s\n```\n```\n%s```" % (errMsg, excMsg)}
req = urllib2.Request(url="https://api.github.com/repos/sqlmapproject/sqlmap/issues", data=json.dumps(data), headers={"Authorization": "token %s" % GITHUB_REPORT_OAUTH_TOKEN.decode("base64")})
try:
content = urllib2.urlopen(req).read()
except Exception, ex:
content = None
issueUrl = re.search(r"https://github.com/sqlmapproject/sqlmap/issues/\d+", content or "")
if issueUrl:
infoMsg = "created Github issue can been found at the address '%s'" % issueUrl.group(0)
logger.info(infoMsg)
try:
with open(paths.GITHUB_HISTORY, "a+b") as f:
f.write("%s\n" % key)
except:
pass
else:
warnMsg = "something went wrong while creating a Github issue"
if ex:
warnMsg += " ('%s')" % getSafeExString(ex)
if "Unauthorized" in warnMsg:
warnMsg += ". Please update to the latest revision"
logger.warn(warnMsg)
def maskSensitiveData(msg):
"""
Masks sensitive data in the supplied message
"""
retVal = getUnicode(msg)
for item in filter(None, map(lambda x: conf.get(x), SENSITIVE_OPTIONS)):
regex = SENSITIVE_DATA_REGEX % re.sub("(\W)", r"\\\1", getUnicode(item))
while extractRegexResult(regex, retVal):
value = extractRegexResult(regex, retVal)
retVal = retVal.replace(value, '*' * len(value))
if not conf.get("hostname"):
match = re.search(r"(?i)sqlmap.+(-u|--url)(\s+|=)([^ ]+)", retVal)
if match:
retVal = retVal.replace(match.group(3), '*' * len(match.group(3)))
if getpass.getuser():
retVal = re.sub(r"(?i)\b%s\b" % re.escape(getpass.getuser()), "*" * len(getpass.getuser()), retVal)
return retVal
def listToStrValue(value):
"""
Flattens list to a string value
>>> listToStrValue([1,2,3])
'1, 2, 3'
"""
if isinstance(value, (set, tuple)):
value = list(value)
if isinstance(value, list):
retVal = value.__str__().lstrip('[').rstrip(']')
else:
retVal = value
return retVal
def getExceptionFrameLocals():
"""
Returns dictionary with local variable content from frame
where exception has been raised
"""
retVal = {}
if sys.exc_info():
trace = sys.exc_info()[2]
while trace.tb_next:
trace = trace.tb_next
retVal = trace.tb_frame.f_locals
return retVal
def intersect(valueA, valueB, lowerCase=False):
"""
Returns intersection of the array-ized values
>>> intersect([1, 2, 3], set([1,3]))
[1, 3]
"""
retVal = []
if valueA and valueB:
valueA = arrayizeValue(valueA)
valueB = arrayizeValue(valueB)
if lowerCase:
valueA = [val.lower() if isinstance(val, basestring) else val for val in valueA]
valueB = [val.lower() if isinstance(val, basestring) else val for val in valueB]
retVal = [val for val in valueA if val in valueB]
return retVal
def removeReflectiveValues(content, payload, suppressWarning=False):
"""
Neutralizes reflective values in a given content based on a payload
(e.g. ..search.php?q=1 AND 1=2 --> "...searching for <b>1%20AND%201%3D2</b>..." --> "...searching for <b>__REFLECTED_VALUE__</b>...")
"""
retVal = content
try:
if all([content, payload]) and isinstance(content, unicode) and kb.reflectiveMechanism and not kb.heuristicMode:
def _(value):
while 2 * REFLECTED_REPLACEMENT_REGEX in value:
value = value.replace(2 * REFLECTED_REPLACEMENT_REGEX, REFLECTED_REPLACEMENT_REGEX)
return value
payload = getUnicode(urldecode(payload.replace(PAYLOAD_DELIMITER, ''), convall=True))
regex = _(filterStringValue(payload, r"[A-Za-z0-9]", REFLECTED_REPLACEMENT_REGEX.encode("string-escape")))
if regex != payload:
if all(part.lower() in content.lower() for part in filter(None, regex.split(REFLECTED_REPLACEMENT_REGEX))[1:]): # fast optimization check
parts = regex.split(REFLECTED_REPLACEMENT_REGEX)
retVal = content.replace(payload, REFLECTED_VALUE_MARKER) # dummy approach
if len(parts) > REFLECTED_MAX_REGEX_PARTS: # preventing CPU hogs
regex = _("%s%s%s" % (REFLECTED_REPLACEMENT_REGEX.join(parts[:REFLECTED_MAX_REGEX_PARTS / 2]), REFLECTED_REPLACEMENT_REGEX, REFLECTED_REPLACEMENT_REGEX.join(parts[-REFLECTED_MAX_REGEX_PARTS / 2:])))
parts = filter(None, regex.split(REFLECTED_REPLACEMENT_REGEX))
if regex.startswith(REFLECTED_REPLACEMENT_REGEX):
regex = r"%s%s" % (REFLECTED_BORDER_REGEX, regex[len(REFLECTED_REPLACEMENT_REGEX):])
else:
regex = r"\b%s" % regex
if regex.endswith(REFLECTED_REPLACEMENT_REGEX):
regex = r"%s%s" % (regex[:-len(REFLECTED_REPLACEMENT_REGEX)], REFLECTED_BORDER_REGEX)
else:
regex = r"%s\b" % regex
retVal = re.sub(r"(?i)%s" % regex, REFLECTED_VALUE_MARKER, retVal)
if len(parts) > 2:
regex = REFLECTED_REPLACEMENT_REGEX.join(parts[1:])
retVal = re.sub(r"(?i)\b%s\b" % regex, REFLECTED_VALUE_MARKER, retVal)
if retVal != content:
kb.reflectiveCounters[REFLECTIVE_COUNTER.HIT] += 1
if not suppressWarning:
warnMsg = "reflective value(s) found and filtering out"
singleTimeWarnMessage(warnMsg)
if re.search(r"FRAME[^>]+src=[^>]*%s" % REFLECTED_VALUE_MARKER, retVal, re.I):
warnMsg = "frames detected containing attacked parameter values. Please be sure to "
warnMsg += "test those separately in case that attack on this page fails"
singleTimeWarnMessage(warnMsg)
elif not kb.testMode and not kb.reflectiveCounters[REFLECTIVE_COUNTER.HIT]:
kb.reflectiveCounters[REFLECTIVE_COUNTER.MISS] += 1
if kb.reflectiveCounters[REFLECTIVE_COUNTER.MISS] > REFLECTIVE_MISS_THRESHOLD:
kb.reflectiveMechanism = False
if not suppressWarning:
debugMsg = "turning off reflection removal mechanism (for optimization purposes)"
logger.debug(debugMsg)
except MemoryError:
kb.reflectiveMechanism = False
if not suppressWarning:
debugMsg = "turning off reflection removal mechanism (because of low memory issues)"
logger.debug(debugMsg)
return retVal
def normalizeUnicode(value):
"""
Does an ASCII normalization of unicode strings
Reference: http://www.peterbe.com/plog/unicode-to-ascii
>>> normalizeUnicode(u'\u0161u\u0107uraj')
'sucuraj'
"""
return unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') if isinstance(value, unicode) else value
def safeSQLIdentificatorNaming(name, isTable=False):
"""
Returns a safe representation of SQL identificator name (internal data format)
Reference: http://stackoverflow.com/questions/954884/what-special-characters-are-allowed-in-t-sql-column-retVal
"""
retVal = name
if isinstance(name, basestring):
retVal = getUnicode(name)
_ = isTable and Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE)
if _:
retVal = re.sub(r"(?i)\A%s\." % DEFAULT_MSSQL_SCHEMA, "", retVal)
if retVal.upper() in kb.keywords or (retVal or " ")[0].isdigit() or not re.match(r"\A[A-Za-z0-9_@%s\$]+\Z" % ("." if _ else ""), retVal): # MsSQL is the only DBMS where we automatically prepend schema to table name (dot is normal)
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.ACCESS):
retVal = "`%s`" % retVal.strip("`")
elif Backend.getIdentifiedDbms() in (DBMS.PGSQL, DBMS.DB2):
retVal = "\"%s\"" % retVal.strip("\"")
elif Backend.getIdentifiedDbms() in (DBMS.ORACLE,):
retVal = "\"%s\"" % retVal.strip("\"").upper()
elif Backend.getIdentifiedDbms() in (DBMS.MSSQL,) and ((retVal or " ")[0].isdigit() or not re.match(r"\A\w+\Z", retVal, re.U)):
retVal = "[%s]" % retVal.strip("[]")
if _ and DEFAULT_MSSQL_SCHEMA not in retVal and '.' not in re.sub(r"\[[^]]+\]", "", retVal):
retVal = "%s.%s" % (DEFAULT_MSSQL_SCHEMA, retVal)
return retVal
def unsafeSQLIdentificatorNaming(name):
"""
Extracts identificator's name from its safe SQL representation
"""
retVal = name
if isinstance(name, basestring):
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.ACCESS):
retVal = name.replace("`", "")
elif Backend.getIdentifiedDbms() in (DBMS.PGSQL, DBMS.DB2):
retVal = name.replace("\"", "")
elif Backend.getIdentifiedDbms() in (DBMS.ORACLE,):
retVal = name.replace("\"", "").upper()
elif Backend.getIdentifiedDbms() in (DBMS.MSSQL,):
retVal = name.replace("[", "").replace("]", "")
if Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE):
prefix = "%s." % DEFAULT_MSSQL_SCHEMA
if retVal.startswith(prefix):
retVal = retVal[len(prefix):]
return retVal
def isNoneValue(value):
"""
Returns whether the value is unusable (None or '')
>>> isNoneValue(None)
True
>>> isNoneValue('None')
True
>>> isNoneValue('')
True
>>> isNoneValue([])
True
>>> isNoneValue([2])
False
"""
if isinstance(value, basestring):
return value in ("None", "")
elif isListLike(value):
return all(isNoneValue(_) for _ in value)
elif isinstance(value, dict):
return not any(value)
else:
return value is None
def isNullValue(value):
"""
Returns whether the value contains explicit 'NULL' value
>>> isNullValue(u'NULL')
True
>>> isNullValue(u'foobar')
False
"""
return isinstance(value, basestring) and value.upper() == NULL
def expandMnemonics(mnemonics, parser, args):
"""
Expands mnemonic options
"""
class MnemonicNode(object):
def __init__(self):
self.next = {}
self.current = []
head = MnemonicNode()
pointer = None
for group in parser.option_groups:
for option in group.option_list:
for opt in option._long_opts + option._short_opts:
pointer = head
for char in opt:
if char == "-":
continue
elif char not in pointer.next:
pointer.next[char] = MnemonicNode()
pointer = pointer.next[char]
pointer.current.append(option)
for mnemonic in (mnemonics or "").split(','):
found = None
name = mnemonic.split('=')[0].replace("-", "").strip()
value = mnemonic.split('=')[1] if len(mnemonic.split('=')) > 1 else None
pointer = head
for char in name:
if char in pointer.next:
pointer = pointer.next[char]
else:
pointer = None
break
if pointer in (None, head):
errMsg = "mnemonic '%s' can't be resolved to any parameter name" % name
raise SqlmapSyntaxException(errMsg)
elif len(pointer.current) > 1:
options = {}
for option in pointer.current:
for opt in option._long_opts + option._short_opts:
opt = opt.strip('-')
if opt.startswith(name):
options[opt] = option
if not options:
warnMsg = "mnemonic '%s' can't be resolved" % name
logger.warn(warnMsg)
elif name in options:
found = name
debugMsg = "mnemonic '%s' resolved to %s). " % (name, found)
logger.debug(debugMsg)
else:
found = sorted(options.keys(), key=lambda x: len(x))[0]
warnMsg = "detected ambiguity (mnemonic '%s' can be resolved to: %s). " % (name, ", ".join("'%s'" % key for key in options.keys()))
warnMsg += "Resolved to shortest of those ('%s')" % found
logger.warn(warnMsg)
if found:
found = options[found]
else:
found = pointer.current[0]
debugMsg = "mnemonic '%s' resolved to %s). " % (name, found)
logger.debug(debugMsg)
if found:
try:
value = found.convert_value(found, value)
except OptionValueError:
value = None
if value is not None:
setattr(args, found.dest, value)
elif not found.type: # boolean
setattr(args, found.dest, True)
else:
errMsg = "mnemonic '%s' requires value of type '%s'" % (name, found.type)
raise SqlmapSyntaxException(errMsg)
def safeCSValue(value):
"""
Returns value safe for CSV dumping
Reference: http://tools.ietf.org/html/rfc4180
>>> safeCSValue(u'foo, bar')
u'"foo, bar"'
>>> safeCSValue(u'foobar')
u'foobar'
"""
retVal = value
if retVal and isinstance(retVal, basestring):
if not (retVal[0] == retVal[-1] == '"'):
if any(_ in retVal for _ in (conf.get("csvDel", defaults.csvDel), '"', '\n')):
retVal = '"%s"' % retVal.replace('"', '""')
return retVal
def filterPairValues(values):
"""
Returns only list-like values with length 2
>>> filterPairValues([[1, 2], [3], 1, [4, 5]])
[[1, 2], [4, 5]]
"""
retVal = []
if not isNoneValue(values) and hasattr(values, '__iter__'):
retVal = filter(lambda x: isinstance(x, (tuple, list, set)) and len(x) == 2, values)
return retVal
def randomizeParameterValue(value):
"""
Randomize a parameter value based on occurances of alphanumeric characters
>>> random.seed(0)
>>> randomizeParameterValue('foobar')
'rnvnav'
>>> randomizeParameterValue('17')
'83'
"""
retVal = value
value = re.sub(r"%[0-9a-fA-F]{2}", "", value)
for match in re.finditer('[A-Z]+', value):
while True:
original = match.group()
candidate = randomStr(len(match.group())).upper()
if original != candidate:
break
retVal = retVal.replace(original, candidate)
for match in re.finditer('[a-z]+', value):
while True:
original = match.group()
candidate = randomStr(len(match.group())).lower()
if original != candidate:
break
retVal = retVal.replace(original, candidate)
for match in re.finditer('[0-9]+', value):
while True:
original = match.group()
candidate = str(randomInt(len(match.group())))
if original != candidate:
break
retVal = retVal.replace(original, candidate)
return retVal
@cachedmethod
def asciifyUrl(url, forceQuote=False):
"""
Attempts to make a unicode URL usuable with ``urllib/urllib2``.
More specifically, it attempts to convert the unicode object ``url``,
which is meant to represent a IRI, to an unicode object that,
containing only ASCII characters, is a valid URI. This involves:
* IDNA/Puny-encoding the domain name.
* UTF8-quoting the path and querystring parts.
See also RFC 3987.
Reference: http://blog.elsdoerfer.name/2008/12/12/opening-iris-in-python/
>>> asciifyUrl(u'http://www.\u0161u\u0107uraj.com')
u'http://www.xn--uuraj-gxa24d.com'
"""
parts = urlparse.urlsplit(url)
if not parts.scheme or not parts.netloc:
# apparently not an url
return url
if all(char in string.printable for char in url):
return url
# idna-encode domain
try:
hostname = parts.hostname.encode("idna")
except LookupError:
hostname = parts.hostname.encode(UNICODE_ENCODING)
# UTF8-quote the other parts. We check each part individually if
# if needs to be quoted - that should catch some additional user
# errors, say for example an umlaut in the username even though
# the path *is* already quoted.
def quote(s, safe):
s = s or ''
# Triggers on non-ascii characters - another option would be:
# urllib.quote(s.replace('%', '')) != s.replace('%', '')
# which would trigger on all %-characters, e.g. "&".
if s.encode("ascii", "replace") != s or forceQuote:
return urllib.quote(s.encode(UNICODE_ENCODING), safe=safe)
return s
username = quote(parts.username, '')
password = quote(parts.password, safe='')
path = quote(parts.path, safe='/')
query = quote(parts.query, safe="&=")
# put everything back together
netloc = hostname
if username or password:
netloc = '@' + netloc
if password:
netloc = ':' + password + netloc
netloc = username + netloc
try:
port = parts.port
except:
port = None
if port:
netloc += ':' + str(port)
return urlparse.urlunsplit([parts.scheme, netloc, path, query, parts.fragment])
def isAdminFromPrivileges(privileges):
"""
Inspects privileges to see if those are coming from an admin user
"""
# In PostgreSQL the usesuper privilege means that the
# user is DBA
retVal = (Backend.isDbms(DBMS.PGSQL) and "super" in privileges)
# In Oracle the DBA privilege means that the
# user is DBA
retVal |= (Backend.isDbms(DBMS.ORACLE) and "DBA" in privileges)
# In MySQL >= 5.0 the SUPER privilege means
# that the user is DBA
retVal |= (Backend.isDbms(DBMS.MYSQL) and kb.data.has_information_schema and "SUPER" in privileges)
# In MySQL < 5.0 the super_priv privilege means
# that the user is DBA
retVal |= (Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema and "super_priv" in privileges)
# In Firebird there is no specific privilege that means
# that the user is DBA
retVal |= (Backend.isDbms(DBMS.FIREBIRD) and all(_ in privileges for _ in ("SELECT", "INSERT", "UPDATE", "DELETE", "REFERENCES", "EXECUTE")))
return retVal
def findPageForms(content, url, raise_=False, addToTargets=False):
"""
Parses given page content for possible forms
"""
class _(StringIO):
def __init__(self, content, url):
StringIO.__init__(self, unicodeencode(content, kb.pageEncoding) if isinstance(content, unicode) else content)
self._url = url
def geturl(self):
return self._url
if not content:
errMsg = "can't parse forms as the page content appears to be blank"
if raise_:
raise SqlmapGenericException(errMsg)
else:
logger.debug(errMsg)
forms = None
retVal = set()
response = _(content, url)
try:
forms = ParseResponse(response, backwards_compat=False)
except (UnicodeError, ValueError):
pass
except ParseError:
if "<html" in (content or ""):
warnMsg = "badly formed HTML at the given URL ('%s'). Going to filter it" % url
logger.warning(warnMsg)
filtered = _("".join(re.findall(FORM_SEARCH_REGEX, content)), url)
try:
forms = ParseResponse(filtered, backwards_compat=False)
except ParseError:
errMsg = "no success"
if raise_:
raise SqlmapGenericException(errMsg)
else:
logger.debug(errMsg)
if forms:
for form in forms:
try:
for control in form.controls:
if hasattr(control, "items") and not any((control.disabled, control.readonly)):
# if control has selectable items select first non-disabled
for item in control.items:
if not item.disabled:
if not item.selected:
item.selected = True
break
if conf.crawlExclude and re.search(conf.crawlExclude, form.action or ""):
dbgMsg = "skipping '%s'" % form.action
logger.debug(dbgMsg)
continue
request = form.click()
except (ValueError, TypeError), ex:
errMsg = "there has been a problem while "
errMsg += "processing page forms ('%s')" % getSafeExString(ex)
if raise_:
raise SqlmapGenericException(errMsg)
else:
logger.debug(errMsg)
else:
url = urldecode(request.get_full_url(), kb.pageEncoding)
method = request.get_method()
data = request.get_data() if request.has_data() else None
data = urldecode(data, kb.pageEncoding, plusspace=False)
if not data and method and method.upper() == HTTPMETHOD.POST:
debugMsg = "invalid POST form with blank data detected"
logger.debug(debugMsg)
continue
# flag to know if we are dealing with the same target host
_ = checkSameHost(response.geturl(), url)
if conf.scope:
if not re.search(conf.scope, url, re.I):
continue
elif not _:
continue
else:
target = (url, method, data, conf.cookie, None)
retVal.add(target)
else:
errMsg = "there were no forms found at the given target URL"
if raise_:
raise SqlmapGenericException(errMsg)
else:
logger.debug(errMsg)
if addToTargets and retVal:
for target in retVal:
kb.targets.add(target)
return retVal
def checkSameHost(*urls):
"""
Returns True if all provided urls share that same host
>>> checkSameHost('http://www.target.com/page1.php?id=1', 'http://www.target.com/images/page2.php')
True
>>> checkSameHost('http://www.target.com/page1.php?id=1', 'http://www.target2.com/images/page2.php')
False
"""
if not urls:
return None
elif len(urls) == 1:
return True
else:
return all(urlparse.urlparse(url or "").netloc.split(':')[0] == urlparse.urlparse(urls[0] or "").netloc.split(':')[0] for url in urls[1:])
def getHostHeader(url):
"""
Returns proper Host header value for a given target URL
>>> getHostHeader('http://www.target.com/vuln.php?id=1')
'www.target.com'
"""
retVal = url
if url:
retVal = urlparse.urlparse(url).netloc
if re.search("http(s)?://\[.+\]", url, re.I):
retVal = extractRegexResult("http(s)?://\[(?P<result>.+)\]", url)
elif any(retVal.endswith(':%d' % _) for _ in (80, 443)):
retVal = retVal.split(':')[0]
return retVal
def checkDeprecatedOptions(args):
"""
Checks for deprecated options
"""
for _ in args:
if _ in DEPRECATED_OPTIONS:
errMsg = "switch/option '%s' is deprecated" % _
if DEPRECATED_OPTIONS[_]:
errMsg += " (hint: %s)" % DEPRECATED_OPTIONS[_]
raise SqlmapSyntaxException(errMsg)
def checkSystemEncoding():
"""
Checks for problematic encodings
"""
if sys.getdefaultencoding() == "cp720":
try:
codecs.lookup("cp720")
except LookupError:
errMsg = "there is a known Python issue (#1616979) related "
errMsg += "to support for charset 'cp720'. Please visit "
errMsg += "'http://blog.oneortheother.info/tip/python-fix-cp720-encoding/index.html' "
errMsg += "and follow the instructions to be able to fix it"
logger.critical(errMsg)
warnMsg = "temporary switching to charset 'cp1256'"
logger.warn(warnMsg)
reload(sys)
sys.setdefaultencoding("cp1256")
def evaluateCode(code, variables=None):
"""
Executes given python code given in a string form
"""
try:
exec(code, variables)
except KeyboardInterrupt:
raise
except Exception, ex:
errMsg = "an error occurred while evaluating provided code ('%s') " % getSafeExString(ex)
raise SqlmapGenericException(errMsg)
def serializeObject(object_):
"""
Serializes given object
>>> serializeObject([1, 2, 3, ('a', 'b')])
'gAJdcQEoSwFLAksDVQFhVQFihnECZS4='
>>> serializeObject(None)
'gAJOLg=='
>>> serializeObject('foobar')
'gAJVBmZvb2JhcnEBLg=='
"""
return base64pickle(object_)
def unserializeObject(value):
"""
Unserializes object from given serialized form
>>> unserializeObject(serializeObject([1, 2, 3])) == [1, 2, 3]
True
>>> unserializeObject('gAJVBmZvb2JhcnEBLg==')
'foobar'
"""
return base64unpickle(value) if value else None
def resetCounter(technique):
"""
Resets query counter for a given technique
"""
kb.counters[technique] = 0
def incrementCounter(technique):
"""
Increments query counter for a given technique
"""
kb.counters[technique] = getCounter(technique) + 1
def getCounter(technique):
"""
Returns query counter for a given technique
"""
return kb.counters.get(technique, 0)
def applyFunctionRecursively(value, function):
"""
Applies function recursively through list-like structures
>>> applyFunctionRecursively([1, 2, [3, 4, [19]], -9], lambda _: _ > 0)
[True, True, [True, True, [True]], False]
"""
if isListLike(value):
retVal = [applyFunctionRecursively(_, function) for _ in value]
else:
retVal = function(value)
return retVal
def decodeHexValue(value, raw=False):
"""
Returns value decoded from DBMS specific hexadecimal representation
>>> decodeHexValue('3132332031')
u'123 1'
>>> decodeHexValue(['0x31', '0x32'])
[u'1', u'2']
"""
retVal = value
def _(value):
retVal = value
if value and isinstance(value, basestring):
if len(value) % 2 != 0:
retVal = "%s?" % hexdecode(value[:-1])
singleTimeWarnMessage("there was a problem decoding value '%s' from expected hexadecimal form" % value)
else:
retVal = hexdecode(value)
if not kb.binaryField and not raw:
if Backend.isDbms(DBMS.MSSQL) and value.startswith("0x"):
try:
retVal = retVal.decode("utf-16-le")
except UnicodeDecodeError:
pass
elif Backend.isDbms(DBMS.HSQLDB):
try:
retVal = retVal.decode("utf-16-be")
except UnicodeDecodeError:
pass
if not isinstance(retVal, unicode):
retVal = getUnicode(retVal, "utf8")
return retVal
try:
retVal = applyFunctionRecursively(value, _)
except:
singleTimeWarnMessage("there was a problem decoding value '%s' from expected hexadecimal form" % value)
return retVal
def extractExpectedValue(value, expected):
"""
Extracts and returns expected value by a given type
>>> extractExpectedValue(['1'], EXPECTED.BOOL)
True
>>> extractExpectedValue('1', EXPECTED.INT)
1
"""
if expected:
value = unArrayizeValue(value)
if isNoneValue(value):
value = None
elif expected == EXPECTED.BOOL:
if isinstance(value, int):
value = bool(value)
elif isinstance(value, basestring):
value = value.strip().lower()
if value in ("true", "false"):
value = value == "true"
elif value in ("1", "-1"):
value = True
elif value == "0":
value = False
else:
value = None
elif expected == EXPECTED.INT:
if isinstance(value, basestring):
value = int(value) if value.isdigit() else None
return value
def hashDBWrite(key, value, serialize=False):
"""
Helper function for writing session data to HashDB
"""
_ = "%s%s%s" % (conf.url or "%s%s" % (conf.hostname, conf.port), key, HASHDB_MILESTONE_VALUE)
conf.hashDB.write(_, value, serialize)
def hashDBRetrieve(key, unserialize=False, checkConf=False):
"""
Helper function for restoring session data from HashDB
"""
_ = "%s%s%s" % (conf.url or "%s%s" % (conf.hostname, conf.port), key, HASHDB_MILESTONE_VALUE)
retVal = conf.hashDB.retrieve(_, unserialize) if kb.resumeValues and not (checkConf and any((conf.flushSession, conf.freshQueries))) else None
if not kb.inferenceMode and not kb.fileReadMode and isinstance(retVal, basestring) and any(_ in retVal for _ in (PARTIAL_VALUE_MARKER, PARTIAL_HEX_VALUE_MARKER)):
retVal = None
return retVal
def resetCookieJar(cookieJar):
"""
Cleans cookies from a given cookie jar
"""
if not conf.loadCookies:
cookieJar.clear()
else:
try:
if not cookieJar.filename:
infoMsg = "loading cookies from '%s'" % conf.loadCookies
logger.info(infoMsg)
content = readCachedFileContent(conf.loadCookies)
lines = filter(None, (line.strip() for line in content.split("\n") if not line.startswith('#')))
handle, filename = tempfile.mkstemp(prefix=MKSTEMP_PREFIX.COOKIE_JAR)
os.close(handle)
# Reference: http://www.hashbangcode.com/blog/netscape-http-cooke-file-parser-php-584.html
with openFile(filename, "w+b") as f:
f.write("%s\n" % NETSCAPE_FORMAT_HEADER_COOKIES)
for line in lines:
_ = line.split("\t")
if len(_) == 7:
_[4] = FORCE_COOKIE_EXPIRATION_TIME
f.write("\n%s" % "\t".join(_))
cookieJar.filename = filename
cookieJar.load(cookieJar.filename, ignore_expires=True)
for cookie in cookieJar:
if cookie.expires < time.time():
warnMsg = "cookie '%s' has expired" % cookie
singleTimeWarnMessage(warnMsg)
cookieJar.clear_expired_cookies()
if not cookieJar._cookies:
errMsg = "no valid cookies found"
raise SqlmapGenericException(errMsg)
except cookielib.LoadError, msg:
errMsg = "there was a problem loading "
errMsg += "cookies file ('%s')" % re.sub(r"(cookies) file '[^']+'", "\g<1>", str(msg))
raise SqlmapGenericException(errMsg)
def decloakToTemp(filename):
"""
Decloaks content of a given file to a temporary file with similar name and extension
"""
content = decloak(filename)
_ = utf8encode(os.path.split(filename[:-1])[-1])
prefix, suffix = os.path.splitext(_)
prefix = prefix.split(os.extsep)[0]
handle, filename = tempfile.mkstemp(prefix=prefix, suffix=suffix)
os.close(handle)
with open(filename, "w+b") as f:
f.write(content)
return filename
def prioritySortColumns(columns):
"""
Sorts given column names by length in ascending order while those containing
string 'id' go first
>>> prioritySortColumns(['password', 'userid', 'name'])
['userid', 'name', 'password']
"""
_ = lambda x: x and "id" in x.lower()
return sorted(sorted(columns, key=len), lambda x, y: -1 if _(x) and not _(y) else 1 if not _(x) and _(y) else 0)
def getRequestHeader(request, name):
"""
Solving an issue with an urllib2 Request header case sensitivity
Reference: http://bugs.python.org/issue2275
"""
retVal = None
if request and name:
_ = name.upper()
retVal = max([value if _ == key.upper() else None for key, value in request.header_items()])
return retVal
def isNumber(value):
"""
Returns True if the given value is a number-like object
>>> isNumber(1)
True
>>> isNumber('0')
True
>>> isNumber('foobar')
False
"""
try:
float(value)
except:
return False
else:
return True
def zeroDepthSearch(expression, value):
"""
Searches occurrences of value inside expression at 0-depth level
regarding the parentheses
"""
retVal = []
depth = 0
for index in xrange(len(expression)):
if expression[index] == '(':
depth += 1
elif expression[index] == ')':
depth -= 1
elif depth == 0 and expression[index:index + len(value)] == value:
retVal.append(index)
return retVal
def splitFields(fields, delimiter=','):
"""
Returns list of (0-depth) fields splitted by delimiter
>>> splitFields('foo, bar, max(foo, bar)')
['foo', 'bar', 'max(foo,bar)']
"""
fields = fields.replace("%s " % delimiter, delimiter)
commas = [-1, len(fields)]
commas.extend(zeroDepthSearch(fields, ','))
commas = sorted(commas)
return [fields[x + 1:y] for (x, y) in zip(commas, commas[1:])]
def pollProcess(process, suppress_errors=False):
"""
Checks for process status (prints . if still running)
"""
while True:
dataToStdout(".")
time.sleep(1)
returncode = process.poll()
if returncode is not None:
if not suppress_errors:
if returncode == 0:
dataToStdout(" done\n")
elif returncode < 0:
dataToStdout(" process terminated by signal %d\n" % returncode)
elif returncode > 0:
dataToStdout(" quit unexpectedly with return code %d\n" % returncode)
break
def getSafeExString(ex, encoding=None):
"""
Safe way how to get the proper exception represtation as a string
(Note: errors to be avoided: 1) "%s" % Exception(u'\u0161') and 2) "%s" % str(Exception(u'\u0161'))
"""
retVal = ex
if getattr(ex, "message", None):
retVal = ex.message
elif getattr(ex, "msg", None):
retVal = ex.msg
return getUnicode(retVal, encoding=encoding)
|
michaelhidalgo/7WCSQ
|
Tools/SQLMap/sqlmap/lib/core/common.py
|
Python
|
apache-2.0
| 145,010
|
[
"VisIt"
] |
f6f85e20e97537725c8ee28b6fc84cf7a98c7bd7e6d2a78fed6b4589f5e9ef7d
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# mrslview - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.mrslview import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/mrslview.py
|
Python
|
gpl-2.0
| 1,104
|
[
"Brian"
] |
16dbf75596e9fa446047c6e2287b7efd4142f3daa096d0698a8dfc4e56db9620
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RVariantannotation(RPackage):
"""Annotation of Genetic Variants
Annotate variants, compute amino acid coding changes, predict coding
outcomes."""
homepage = "https://bioconductor.org/packages/VariantAnnotation"
git = "https://git.bioconductor.org/packages/VariantAnnotation.git"
version('1.36.0', commit='9918bd19a2e6f89e5edc5fe03c8812f500bb3e19')
version('1.30.1', commit='fb1ab00872570afb280522c4663e347dafc07a9e')
version('1.28.13', commit='0393347b8ce2d5edf1a61589be93e6a93eda3419')
version('1.26.1', commit='60ae67598cc3d7ed20ee6417920f8c209085faaf')
version('1.24.5', commit='468d7f53fd743e04c9af853d58e871b4cc13a090')
version('1.22.3', commit='3a91b6d4297aa416d5f056dec6f8925eb1a8eaee')
depends_on('r@2.8.0:', type=('build', 'run'))
depends_on('r-biocgenerics@0.15.3:', type=('build', 'run'))
depends_on('r-matrixgenerics', when='@1.36.0:', type=('build', 'run'))
depends_on('r-genomeinfodb@1.11.4:', type=('build', 'run'))
depends_on('r-genomeinfodb@1.15.2:', when='@1.26.1:', type=('build', 'run'))
depends_on('r-genomicranges@1.27.6:', type=('build', 'run'))
depends_on('r-genomicranges@1.31.8:', when='@1.26.1:', type=('build', 'run'))
depends_on('r-genomicranges@1.41.5:', when='@1.36.0:', type=('build', 'run'))
depends_on('r-summarizedexperiment@1.5.3:', type=('build', 'run'))
depends_on('r-summarizedexperiment@1.19.5:', when='@1.36.0:', type=('build', 'run'))
depends_on('r-rsamtools@1.23.10:', type=('build', 'run'))
depends_on('r-rsamtools@1.31.2:', when='@1.26.1:', type=('build', 'run'))
depends_on('r-rsamtools@1.33.6:', when='@1.28.13:', type=('build', 'run'))
depends_on('r-rsamtools@1.99.0:', when='@1.30.1:', type=('build', 'run'))
depends_on('r-dbi', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-s4vectors@0.13.13:', type=('build', 'run'))
depends_on('r-s4vectors@0.17.24:', when='@1.26.1:', type=('build', 'run'))
depends_on('r-s4vectors@0.27.12:', when='@1.36.0:', type=('build', 'run'))
depends_on('r-iranges@2.3.25:', type=('build', 'run'))
depends_on('r-iranges@2.13.13:', when='@1.26.1:', type=('build', 'run'))
depends_on('r-iranges@2.23.9:', when='@1.36.0:', type=('build', 'run'))
depends_on('r-xvector@0.5.6:', type=('build', 'run'))
depends_on('r-xvector@0.19.7:', when='@1.26.1:', type=('build', 'run'))
depends_on('r-xvector@0.29.2:', when='@1.36.0:', type=('build', 'run'))
depends_on('r-biostrings@2.33.5:', type=('build', 'run'))
depends_on('r-biostrings@2.47.6:', when='@1.26.1:', type=('build', 'run'))
depends_on('r-biostrings@2.57.2:', when='@1.36.0:', type=('build', 'run'))
depends_on('r-annotationdbi@1.27.9:', type=('build', 'run'))
depends_on('r-rtracklayer@1.25.16:', type=('build', 'run'))
depends_on('r-rtracklayer@1.39.7:', when='@1.26.1:', type=('build', 'run'))
depends_on('r-bsgenome@1.37.6:', type=('build', 'run'))
depends_on('r-bsgenome@1.47.3:', when='@1.26.1:', type=('build', 'run'))
depends_on('r-genomicfeatures@1.27.4:', type=('build', 'run'))
depends_on('r-genomicfeatures@1.31.3:', when='@1.26.1:', type=('build', 'run'))
depends_on('r-rhtslib', when='@1.30.1:', type=('build', 'run'))
depends_on('gmake', type='build')
# Not listed but needed
depends_on('curl')
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-variantannotation/package.py
|
Python
|
lgpl-2.1
| 3,666
|
[
"Bioconductor"
] |
cc5509f7ed557e2764e68ce2fc59e2cf392b976eb906dca9782eedc3931e240d
|
import os
import sys
import csv
import datetime
import configparser
from cappy import API
from nacc.uds3.filters import *
# Creating a folder which contains Intermediate files
def recent_run_folder(out_dir):
# Check if directory exists. If not, create it.
if not os.path.exists(out_dir):
try:
os.makedirs(out_dir)
except Exception as e:
raise e
def get_headers(input_ptr):
reader = csv.DictReader(input_ptr)
headers = reader.fieldnames
print(headers)
def run_all_filters(folder_name, config):
# Calling Filters
try:
print("--------------Removing subjects already in current--------------------", file=sys.stderr)
input_path = os.path.join(folder_name, "redcap_input.csv")
output_path = os.path.join(folder_name, "clean.csv")
print("Processing", file=sys.stderr)
with open(output_path, 'w') as output_ptr, open(input_path, 'r') as input_ptr:
filter_clean_ptid(input_ptr, config, output_ptr)
print("--------------Replacing drug IDs--------------------", file=sys.stderr)
input_path = os.path.join(folder_name, "clean.csv")
output_path = os.path.join(folder_name, "drugs.csv")
with open(output_path, 'w') as output_ptr, open(input_path, 'r') as input_ptr:
filter_replace_drug_id(input_ptr, config, output_ptr)
print("--------------Fixing Headers--------------------", file=sys.stderr)
input_path = os.path.join(folder_name, "drugs.csv")
output_path = os.path.join(folder_name, "clean_headers.csv")
with open(output_path, 'w') as output_ptr, open(input_path, 'r') as input_ptr:
filter_fix_headers(input_ptr, config, output_ptr)
print("--------------Filling in Defaults--------------------", file=sys.stderr)
input_path = os.path.join(folder_name, "clean_headers.csv")
output_path = os.path.join(folder_name, "default.csv")
with open(output_path, 'w') as output_ptr, open(input_path, 'r') as input_ptr:
filter_fill_default(input_ptr, config, output_ptr)
print("--------------Updating fields--------------------", file=sys.stderr)
input_path = os.path.join(folder_name, "default.csv")
output_path = os.path.join(folder_name, "update_fields.csv")
with open(output_path, 'w') as output_ptr, open(input_path, 'r') as input_ptr:
filter_update_field(input_ptr, config, output_ptr)
print("--------------Fixing Visit Dates--------------------", file=sys.stderr)
input_path = os.path.join(folder_name, "update_fields.csv")
output_path = os.path.join(folder_name, "proper_visitdate.csv")
with open(output_path, 'w') as output_ptr, open(input_path, 'r') as input_ptr:
filter_fix_visitdate(input_ptr, config, output_ptr)
print("--------------Removing Unnecessary Records--------------------", file=sys.stderr)
input_path = os.path.join(folder_name, "proper_visitdate.csv")
output_path = os.path.join(folder_name, "CleanedPtid_Update.csv")
with open(output_path, 'w') as output_ptr, open(input_path, 'r') as input_ptr:
filter_remove_ptid(input_ptr, config, output_ptr)
print("--------------Removing Records without VisitDate--------------------", file=sys.stderr)
input_path = os.path.join(folder_name, "CleanedPtid_Update.csv")
output_path = os.path.join(folder_name, "final_Update.csv")
with open(output_path, 'w') as output_ptr, open(input_path, 'r') as input_ptr:
filter_eliminate_empty_date(input_ptr, config, output_ptr)
except Exception as e:
print("Error in Opening a file")
print(e)
return
def read_config(config_path):
config = configparser.ConfigParser()
config.read(config_path)
return config
# Getting Data From RedCap
def get_data_from_redcap(folder_name, config):
# Enter the path for filters_config
try:
token = config.get('cappy', 'token')
redcap_url = config.get('cappy', 'redcap_server')
except Exception as e:
print("Please check the config file and validate all the proper fields exist", file=sys.stderr)
print(e)
raise e
redcap_access_api = API(token, redcap_url, 'master.yaml')
res = redcap_access_api.export_records(adhoc_redcap_options={
'format': 'csv'
})
try:
rawdata = str(res.text)
myreader = csv.reader(rawdata.splitlines())
try:
with open(os.path.join(folder_name, "redcap_input.csv"), "w") as file:
writer = csv.writer(file, delimiter=',')
for row in myreader:
writer.writerow(row)
except Exception as e:
print("Error in Writing")
print(e)
except Exception:
print("Error in CSV file")
return
def main():
currentdate = datetime.datetime.now().strftime('%m-%d-%Y')
folder_name = "run_" + currentdate
print("Recent folder " + folder_name, file=sys.stderr)
current_directory = os.getcwd()
identified_folder = os.path.join(current_directory, folder_name)
if not os.path.exists(identified_folder):
recent_run_folder(identified_folder)
# Reading from Config and Accessing the necessary Data
config_path = sys.argv[1]
config = read_config(config_path)
get_data_from_redcap(folder_name, config)
run_all_filters(folder_name, config_path)
exit()
if __name__ == '__main__':
main()
|
ctsit/nacculator
|
nacc/run_filters.py
|
Python
|
bsd-2-clause
| 5,622
|
[
"VisIt"
] |
70fa58f58db217a31ae8edf5c7dd68922135e0a162fd090545572f14fd507880
|
#!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import titus.pfaast as ast
from titus.datatype import AvroArray
from titus.datatype import AvroDouble
from titus.datatype import AvroString
from titus.datatype import AvroTypeBuilder
from titus.signature import LabelData
from titus.util import uniqueEngineName, uniqueRecordName, uniqueEnumName
class Context(object):
"""PMML-to-PFA conversion context."""
def copy(self, **butChange):
"""Copy this context object with a few members changed."""
out = Context()
out.__dict__ = dict(self.__dict__)
out.__dict__.update(butChange)
return out
def fieldRef(self, name):
"""Generate a PFA field reference, which may be a direct reference or a member of the input record.
:type name: string
:param name: name of the field
:rtype: titus.pfaast.Ref or titus.pfaast.AttrGet
:return: PFA expression that gets the field
"""
if name in self.scope:
return ast.Ref(name)
elif name in self.dataDictionary:
return ast.AttrGet(ast.Ref("input"), [ast.LiteralString(name)])
else:
raise NameError("unknown field \"{0}\"".format(name))
def symbolTable(self):
"""Symbol table as a dict from names to type strings."""
out = {"input": self.inputType}
out.update(self.scope)
return out
class PmmlBinding(object):
"""Base class for loaded PMML elements."""
def __init__(self):
self.children = []
self.text = ""
self.pos = None
@property
def tag(self):
"""PMML tag name (string)."""
return self.__class__.__name__
class ModelElement(object):
"""Trait for PMML ModelElements."""
def defineFields(self, options, context, transformations):
"""Create PFA ``let`` expressions to define fields.
:type options: dict of string
:param options: PMML-to-PFA conversion options
:type context: titus.pmml.version_independent.Context
:param context: PMML-to-PFA conversion context
:type transformations: PMML node with ``<DerivedField>`` elements
:param transformations: derived fields to convert to ``let`` expressions
:rtype: list of titus.pfaast.Expression
:return: PFA ``let`` expressions
"""
action = []
for derivedField in transformations.DerivedField:
name, value = derivedField.toPFA(options, context)
action.append(ast.Let({name: value}))
return action
class Expression(object):
"""Trait for PMML Expressions."""
pass
class Predicate(object):
"""Trait for PMML Predicates."""
pass
class HasDataType(object):
"""Mixin for PMML nodes that handle data types."""
def pmmlTypeToAvro(self, dataType=None):
"""Limited PMML type to PFA type converter.
:type dataType: string or ``None``
:param dataType: PMML data type name or ``None`` for ``self.dataType``
:rtype: string
:return: PFA data type name
"""
if dataType is None:
dataType = self.dataType
if dataType == "string":
return "string"
elif dataType == "integer":
return "int"
elif dataType == "float":
return "float"
elif dataType == "double":
return "double"
else:
raise NotImplementedError
class PMML(PmmlBinding):
"""Represents a <PMML> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
inputType = self.DataDictionary[0].toPFA(options, context)
context.inputType = context.avroTypeBuilder.resolveOneType(json.dumps(inputType))
models = self.models()
if len(models) == 0:
action = [ast.LiteralNull()]
fcns = {}
context.cells = {}
context.pools = {}
outputType = "null"
else:
action = []
fcns = {}
context.scope = {}
context.fcns = {}
context.cells = {}
context.pools = {}
context.storageType = "cell"
context.storageName = "modelData"
if len(self.TransformationDictionary) > 0:
for defineFunction in self.TransformationDictionary[0].DefineFunction:
name, fcn = defineFunction.toPFA(options, context)
fcns[name] = fcn
context.fcns["u." + name] = ast.UserFcn.fromFcnDef("u." + name, fcn)
action.extend(models[0].defineFields(options, context, self.TransformationDictionary[0]))
if len(models[0].LocalTransformations) > 0:
action.extend(models[0].defineFields(options, context, models[0].LocalTransformations[0]))
action.extend(models[0].toPFA(options, context))
outputType = context.outputType
return ast.EngineConfig(
name=options.get("engine.name", uniqueEngineName()),
method=ast.Method.MAP,
inputPlaceholder=context.avroTypeBuilder.makePlaceholder(json.dumps(inputType)),
outputPlaceholder=context.avroTypeBuilder.makePlaceholder(json.dumps(outputType)),
begin=[],
action=action,
end=[],
fcns=fcns,
zero=None,
merge=None,
cells=context.cells,
pools=context.pools,
randseed=options.get("engine.randseed", None),
doc=options.get("engine.doc", None),
version=options.get("engine.version", None),
metadata=options.get("engine.metadata", {}),
options=options.get("engine.options", {}))
class ARIMA(PmmlBinding):
"""Represents a <ARIMA> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Aggregate(PmmlBinding, Expression):
"""Represents a <Aggregate> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Alternate(PmmlBinding):
"""Represents a <Alternate> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class AlwaysFalse(PmmlBinding, Predicate):
"""Represents a <AlwaysFalse> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class AlwaysTrue(PmmlBinding, Predicate):
"""Represents a <AlwaysTrue> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Annotation(PmmlBinding):
"""Represents a <Annotation> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Anova(PmmlBinding):
"""Represents a <Anova> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class AnovaRow(PmmlBinding):
"""Represents a <AnovaRow> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class AntecedentSequence(PmmlBinding):
"""Represents a <AntecedentSequence> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class AnyDistribution(PmmlBinding):
"""Represents a <AnyDistribution> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
def zValue(self, fieldName):
raise NotImplementedError
class Application(PmmlBinding):
"""Represents a <Application> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Apply(PmmlBinding, Expression):
"""Represents a <Apply> tag and provides methods to convert to PFA."""
pmmlToPFA = {
"+": "+",
"-": "-",
"*": "*",
"/": "/",
"log10": "log10",
"ln": "ln",
"sqrt": "sqrt",
"abs": "abs",
"exp": "exp",
"pow": "pow",
"floor": "floor",
"ceil": "ceil",
"round": "round",
"equal": "==",
"notEqual": "!=",
"lessThan": "<",
"lessOrEqual": "<=",
"greaterThan": ">",
"greaterOrEqual": ">=",
"and": "and",
"or": "or",
"not": "not",
"uppercase": "s.upper",
"lowercase": "s.lower",
}
def argTypes(self, args, context):
return [ast.inferType(x, context.symbolTable(), fcns=context.fcns) for x in args]
def broadestType(self, types):
return LabelData.broadestType(types)
def toPFA(self, options, context):
expressions = [x for x in self.children if isinstance(x, Expression)]
args = [x.toPFA(options, context) for x in expressions]
if "u." + self.function in context.fcns:
return ast.Call("u." + self.function, args)
elif self.function in self.pmmlToPFA:
return ast.Call(self.pmmlToPFA[self.function], args)
elif self.function == "min":
if len(expressions) == 2:
return ast.Call("min", args)
else:
return ast.Call("a.min", [ast.NewArray(args, AvroArray(self.broadestType(self.argTypes(args, context))))])
elif self.function == "max":
if len(expressions) == 2:
return ast.Call("max", args)
else:
return ast.Call("a.max", [ast.NewArray(args, AvroArray(self.broadestType(self.argTypes(args, context))))])
elif self.function == "sum":
if len(expressions) == 2:
return ast.Call("+", args)
else:
return ast.Call("a.sum", [ast.NewArray(args, AvroArray(self.broadestType(self.argTypes(args, context))))])
elif self.function == "avg":
return ast.Call("a.mean", [ast.NewArray(args, AvroArray(self.broadestType(self.argTypes(args, context))))])
elif self.function == "median":
return ast.Call("a.median", [ast.NewArray(args, AvroArray(self.broadestType(self.argTypes(args, context))))])
elif self.function == "product":
if len(expressions) == 2:
return ast.Call("*", args)
else:
return ast.Call("a.product", [ast.NewArray(args, AvroArray(self.broadestType(self.argTypes(args, context))))])
elif self.function == "threshold":
return ast.If(ast.Call(">", args), [ast.LiteralInt(1)], [ast.LiteralInt(0)])
elif self.function == "isMissing" or self.function == "isNotMissing":
raise NotImplementedError
elif self.function == "isIn":
return ast.Call("a.contains", [args(1), args(0)])
elif self.function == "isNotIn":
return ast.Call("not", [ast.Call("a.contains", [args(1), args(0)])])
elif self.function == "if":
if len(args) != 3:
raise NotImplementedError
else:
return ast.If(args(0), [args(1)], [args(2)])
elif self.function == "substring":
return ast.Call("s.substr", [args(0), ast.Call("+", args)])
elif self.function == "trimBlanks":
return ast.Call("s.strip", [args(0), ast.LiteralString(" \t\n")])
elif self.function == "concat":
if len(expressions) == 2:
return ast.Call("s.concat", args)
else:
return ast.Call("a.join", [ast.NewArray(args, AvroArray(AvroString())), ast.LiteralString("")])
elif self.function == "replace" or self.function == "matches":
raise NotImplementedError # requires regular expressions
elif self.function == "formatNumber":
raise NotImplementedError # requires printf-like formatting
elif self.function == "formatDatetime":
raise NotImplementedError
elif self.function == "dateDaysSinceYear":
raise NotImplementedError
elif self.function == "dateSecondsSinceYear":
raise NotImplementedError
elif self.function == "dateSecondsSinceMidnight":
raise NotImplementedError
else:
raise ValueError("not a PMML built-in function: " + self.function)
class Array(PmmlBinding):
"""Represents a <Array> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class AssociationModel(PmmlBinding, ModelElement):
"""Represents a <AssociationModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class AssociationRule(PmmlBinding):
"""Represents a <AssociationRule> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Attribute(PmmlBinding):
"""Represents a <Attribute> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class BaseCumHazardTables(PmmlBinding):
"""Represents a <BaseCumHazardTables> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Baseline(PmmlBinding):
"""Represents a <Baseline> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class BaselineCell(PmmlBinding):
"""Represents a <BaselineCell> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class BaselineModel(PmmlBinding, ModelElement):
"""Represents a <BaselineModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
return [self.TestDistributions[0].toPFA(options, context)]
class BaselineStratum(PmmlBinding):
"""Represents a <BaselineStratum> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class BayesInput(PmmlBinding):
"""Represents a <BayesInput> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class BayesInputs(PmmlBinding):
"""Represents a <BayesInputs> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class BayesOutput(PmmlBinding):
"""Represents a <BayesOutput> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class BoundaryValueMeans(PmmlBinding):
"""Represents a <BoundaryValueMeans> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class BoundaryValues(PmmlBinding):
"""Represents a <BoundaryValues> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class CategoricalPredictor(PmmlBinding):
"""Represents a <CategoricalPredictor> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Categories(PmmlBinding):
"""Represents a <Categories> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Category(PmmlBinding):
"""Represents a <Category> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Characteristic(PmmlBinding):
"""Represents a <Characteristic> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Characteristics(PmmlBinding):
"""Represents a <Characteristics> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ChildParent(PmmlBinding):
"""Represents a <ChildParent> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ClassLabels(PmmlBinding):
"""Represents a <ClassLabels> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Cluster(PmmlBinding):
"""Represents a <Cluster> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ClusteringField(PmmlBinding):
"""Represents a <ClusteringField> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ClusteringModel(PmmlBinding, ModelElement):
"""Represents a <ClusteringModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ClusteringModelQuality(PmmlBinding):
"""Represents a <ClusteringModelQuality> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Coefficient(PmmlBinding):
"""Represents a <Coefficient> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Coefficients(PmmlBinding):
"""Represents a <Coefficients> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ComparisonMeasure(PmmlBinding):
"""Represents a <ComparisonMeasure> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Comparisons(PmmlBinding):
"""Represents a <Comparisons> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ComplexPartialScore(PmmlBinding):
"""Represents a <ComplexPartialScore> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class CompoundPredicate(PmmlBinding, Predicate):
"""Represents a <CompoundPredicate> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class CompoundRule(PmmlBinding):
"""Represents a <CompoundRule> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Con(PmmlBinding):
"""Represents a <Con> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ConfusionMatrix(PmmlBinding):
"""Represents a <ConfusionMatrix> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ConsequentSequence(PmmlBinding):
"""Represents a <ConsequentSequence> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Constant(PmmlBinding, Expression):
"""Represents a <Constant> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
if self.dataType is None:
try:
value = int(self.text)
except ValueError:
try:
value = float(self.text)
except ValueError:
out = ast.LiteralString(self.text)
else:
out = ast.LiteralDouble(value)
else:
out = ast.LiteralInt(value)
elif self.dataType == "string":
out = ast.LiteralString(self.text)
elif self.dataType == "integer":
out = ast.LiteralInt(int(self.text))
elif self.dataType == "float":
out = ast.LiteralFloat(float(self.text))
elif self.dataType == "double":
out = ast.LiteralDouble(float(self.text))
else:
raise NotImplementedError
return out
class Constraints(PmmlBinding):
"""Represents a <Constraints> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ContStats(PmmlBinding):
"""Represents a <ContStats> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class CorrelationFields(PmmlBinding):
"""Represents a <CorrelationFields> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class CorrelationMethods(PmmlBinding):
"""Represents a <CorrelationMethods> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class CorrelationValues(PmmlBinding):
"""Represents a <CorrelationValues> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Correlations(PmmlBinding):
"""Represents a <Correlations> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class CountTable(PmmlBinding):
"""Represents a <CountTable> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Counts(PmmlBinding):
"""Represents a <Counts> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Covariances(PmmlBinding):
"""Represents a <Covariances> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class CovariateList(PmmlBinding):
"""Represents a <CovariateList> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class DataDictionary(PmmlBinding):
"""Represents a <DataDictionary> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
context.dataDictionary = {}
fields = []
for dataField in self.DataField:
fields.append(dataField.toPFA(options, context))
return {"type": "record", "name": "DataDictionary", "fields": fields}
class DataField(PmmlBinding, HasDataType):
"""Represents a <DataField> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
context.dataDictionary[self.name] = {"type": self.pmmlTypeToAvro()}
return {"name": self.name, "type": self.pmmlTypeToAvro()}
class Decision(PmmlBinding):
"""Represents a <Decision> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class DecisionTree(PmmlBinding):
"""Represents a <DecisionTree> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Decisions(PmmlBinding):
"""Represents a <Decisions> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class DefineFunction(PmmlBinding):
"""Represents a <DefineFunction> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
expressions = [x for x in self.children if isinstance(x, Expression)]
params = []
for parameterField in self.ParameterField:
if parameterField.dataType is None:
raise TypeError("parameter field dataType needed for field \"{0}\" of function \"{1}\"".format(parameterField.name, self.name))
params.append(parameterField.toPFA(options, context))
symbolTable = {}
for p in params:
n = p.keys()[0]
v = p.values()[0]
symbolTable[n] = v
expr = expressions[0].toPFA(options, context.copy(scope=symbolTable))
inferred = ast.inferType(expr, symbolTable, fcns=context.fcns)
if self.dataType is not None:
declared = context.avroTypeBuilder.resolveOneType(json.dumps(self.pmmlTypeToAvro()))
if not declared.accepts(inferred):
raise TypeError("DefineFunction {0} has inferred type {1} and declared type {2}".format(self.name, repr(inferred), repr(declared)))
ret = declared
if not inferred.accepts(declared):
expr = ast.Upcast(expr, context.avroTypeBuilder.makePlaceholder(json.dumps(self.pmmlTypeToAvro())))
else:
ret = inferred
return self.name, ast.FcnDef(params, ret, [expr])
class Delimiter(PmmlBinding):
"""Represents a <Delimiter> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class DerivedField(PmmlBinding, HasDataType):
"""Represents a <DerivedField> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
for child in self.children:
if isinstance(child, Expression):
expr = child.toPFA(options, context)
inferred = ast.inferType(expr, context.symbolTable(), fcns=context.fcns)
if self.dataType is not None:
declared = context.avroTypeBuilder.resolveOneType(json.dumps(self.pmmlTypeToAvro()))
if not declared.accepts(inferred):
raise TypeError("DerivedField {0} has inferred type {1} and declared type {2}".format(self.name, repr(inferred), repr(declared)))
context.scope[self.name] = declared
if not inferred.accepts(declared):
expr = ast.Upcast(expr, context.avroTypeBuilder.makePlaceholder(json.dumps(self.pmmlTypeToAvro())))
else:
context.scope[self.name] = inferred
return self.name, expr
class DiscrStats(PmmlBinding):
"""Represents a <DiscrStats> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Discretize(PmmlBinding, Expression):
"""Represents a <Discretize> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class DiscretizeBin(PmmlBinding):
"""Represents a <DiscretizeBin> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class DocumentTermMatrix(PmmlBinding):
"""Represents a <DocumentTermMatrix> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class EventValues(PmmlBinding):
"""Represents a <EventValues> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ExponentialSmoothing(PmmlBinding):
"""Represents a <ExponentialSmoothing> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Extension(PmmlBinding):
"""Represents a <Extension> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class FactorList(PmmlBinding):
"""Represents a <FactorList> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class FieldColumnPair(PmmlBinding):
"""Represents a <FieldColumnPair> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class FieldRef(PmmlBinding, Expression):
"""Represents a <FieldRef> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
return context.fieldRef(self.field)
class FieldValue(PmmlBinding):
"""Represents a <FieldValue> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class FieldValueCount(PmmlBinding):
"""Represents a <FieldValueCount> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class GaussianDistribution(PmmlBinding):
"""Represents a <GaussianDistribution> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
def zValue(self, fieldName):
return ast.Call("/", [ast.Call("-", [fieldName, ast.LiteralDouble(float(self.mean))]), ast.Call("m.sqrt", [ast.LiteralDouble(float(self.variance))])])
class GeneralRegressionModel(PmmlBinding, ModelElement):
"""Represents a <GeneralRegressionModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Header(PmmlBinding):
"""Represents a <Header> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class INT_Entries(PmmlBinding):
"""Represents a <INT_Entries> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class INT_SparseArray(PmmlBinding):
"""Represents a <INT_SparseArray> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Indices(PmmlBinding):
"""Represents a <Indices> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class InlineTable(PmmlBinding):
"""Represents a <InlineTable> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class InstanceField(PmmlBinding):
"""Represents a <InstanceField> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class InstanceFields(PmmlBinding):
"""Represents a <InstanceFields> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Interval(PmmlBinding):
"""Represents a <Interval> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Item(PmmlBinding):
"""Represents a <Item> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ItemRef(PmmlBinding):
"""Represents a <ItemRef> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Itemset(PmmlBinding):
"""Represents a <Itemset> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class KNNInput(PmmlBinding):
"""Represents a <KNNInput> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class KNNInputs(PmmlBinding):
"""Represents a <KNNInputs> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class KohonenMap(PmmlBinding):
"""Represents a <KohonenMap> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Level(PmmlBinding):
"""Represents a <Level> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class LiftData(PmmlBinding):
"""Represents a <LiftData> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class LiftGraph(PmmlBinding):
"""Represents a <LiftGraph> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class LinearKernelType(PmmlBinding):
"""Represents a <LinearKernelType> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class LinearNorm(PmmlBinding):
"""Represents a <LinearNorm> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class LocalTransformations(PmmlBinding):
"""Represents a <LocalTransformations> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class MapValues(PmmlBinding, Expression):
"""Represents a <MapValues> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class MatCell(PmmlBinding):
"""Represents a <MatCell> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Matrix(PmmlBinding):
"""Represents a <Matrix> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class MiningBuildTask(PmmlBinding):
"""Represents a <MiningBuildTask> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class MiningField(PmmlBinding):
"""Represents a <MiningField> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class MiningModel(PmmlBinding, ModelElement):
"""Represents a <MiningModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class MiningSchema(PmmlBinding):
"""Represents a <MiningSchema> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class MissingValueWeights(PmmlBinding):
"""Represents a <MissingValueWeights> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ModelExplanation(PmmlBinding):
"""Represents a <ModelExplanation> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ModelLiftGraph(PmmlBinding):
"""Represents a <ModelLiftGraph> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ModelStats(PmmlBinding):
"""Represents a <ModelStats> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ModelVerification(PmmlBinding):
"""Represents a <ModelVerification> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class MultivariateStat(PmmlBinding):
"""Represents a <MultivariateStat> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class MultivariateStats(PmmlBinding):
"""Represents a <MultivariateStats> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NaiveBayesModel(PmmlBinding, ModelElement):
"""Represents a <NaiveBayesModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NearestNeighborModel(PmmlBinding, ModelElement):
"""Represents a <NearestNeighborModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NeuralInput(PmmlBinding):
"""Represents a <NeuralInput> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NeuralInputs(PmmlBinding):
"""Represents a <NeuralInputs> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NeuralLayer(PmmlBinding):
"""Represents a <NeuralLayer> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NeuralNetwork(PmmlBinding, ModelElement):
"""Represents a <NeuralNetwork> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NeuralOutput(PmmlBinding):
"""Represents a <NeuralOutput> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NeuralOutputs(PmmlBinding):
"""Represents a <NeuralOutputs> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Neuron(PmmlBinding):
"""Represents a <Neuron> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Node(PmmlBinding):
"""Represents a <Node> tag and provides methods to convert to PFA."""
def nodes(self):
out = []
for node in self.Node:
out.append(node)
out.extend(node.nodes())
return out
def predicate(self):
return (self.SimplePredicate + self.CompoundPredicate + self.SimpleSetPredicate + self.AlwaysTrue + self.AlwaysFalse)[0]
def simpleWalk(self, context, functionName, splitCharacteristic, predicateTypes):
if len(self.Node) == 0:
if functionName == "regression":
return {"double": float(self.score)}
else:
return {"string": self.score}
else:
if splitCharacteristic == "binarySplit":
left, right = self.Node
if predicateTypes == set(["SimplePredicate"]):
fieldName = left.predicate().field
if right.predicate().field != fieldName:
raise NotImplementedError
valueType = context.dataDictionary[fieldName]["type"]
lop = left.predicate().operator
rop = right.predicate().operator
lval = left.predicate().value
rval = right.predicate().value
if valueType in ("int", "long", "float", "double"):
lval, rval = {"double": float(lval)}, {"double": float(rval)}
else:
lval, rval = {"string": lval}, {"string": rval}
if (lop, rop) == ("equal", "notEqual") and lval == rval:
return {"TreeNode": {
"field": fieldName,
"operator": "==",
"value": lval,
"pass": left.simpleWalk(context, functionName, splitCharacteristic, predicateTypes),
"fail": right.simpleWalk(context, functionName, splitCharacteristic, predicateTypes)}}
elif (lop, rop) == ("notEqual", "equal") and lval == rval:
return {"TreeNode": {
"field": fieldName,
"operator": "!=",
"value": lval,
"pass": left.simpleWalk(context, functionName, splitCharacteristic, predicateTypes),
"fail": right.simpleWalk(context, functionName, splitCharacteristic, predicateTypes)}}
elif (lop, rop) == ("lessThan", "greaterOrEqual") and lval == rval:
return {"TreeNode": {
"field": fieldName,
"operator": "<",
"value": lval,
"pass": left.simpleWalk(context, functionName, splitCharacteristic, predicateTypes),
"fail": right.simpleWalk(context, functionName, splitCharacteristic, predicateTypes)}}
elif (lop, rop) == ("lessOrEqual", "greaterThan") and lval == rval:
return {"TreeNode": {
"field": fieldName,
"operator": "<=",
"value": lval,
"pass": left.simpleWalk(context, functionName, splitCharacteristic, predicateTypes),
"fail": right.simpleWalk(context, functionName, splitCharacteristic, predicateTypes)}}
elif (lop, rop) == ("greaterThan", "lessOrEqual") and lval == rval:
return {"TreeNode": {
"field": fieldName,
"operator": ">",
"value": lval,
"pass": left.simpleWalk(context, functionName, splitCharacteristic, predicateTypes),
"fail": right.simpleWalk(context, functionName, splitCharacteristic, predicateTypes)}}
elif (lop, rop) == ("greaterOrEqual", "lessThan") and lval == rval:
return {"TreeNode": {
"field": fieldName,
"operator": ">=",
"value": lval,
"pass": left.simpleWalk(context, functionName, splitCharacteristic, predicateTypes),
"fail": right.simpleWalk(context, functionName, splitCharacteristic, predicateTypes)}}
else:
raise NotImplementedError
else:
raise NotImplementedError
else:
raise NotImplementedError
def toPFA(self, options, context):
raise NotImplementedError
class NormContinuous(PmmlBinding, Expression):
"""Represents a <NormContinuous> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NormDiscrete(PmmlBinding, Expression):
"""Represents a <NormDiscrete> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NormalizedCountTable(PmmlBinding):
"""Represents a <NormalizedCountTable> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NumericInfo(PmmlBinding):
"""Represents a <NumericInfo> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class NumericPredictor(PmmlBinding):
"""Represents a <NumericPredictor> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class OptimumLiftGraph(PmmlBinding):
"""Represents a <OptimumLiftGraph> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Output(PmmlBinding):
"""Represents a <Output> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class OutputField(PmmlBinding):
"""Represents a <OutputField> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class PCell(PmmlBinding):
"""Represents a <PCell> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class PCovCell(PmmlBinding):
"""Represents a <PCovCell> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class PCovMatrix(PmmlBinding):
"""Represents a <PCovMatrix> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class PPCell(PmmlBinding):
"""Represents a <PPCell> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class PPMatrix(PmmlBinding):
"""Represents a <PPMatrix> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class PairCounts(PmmlBinding):
"""Represents a <PairCounts> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ParamMatrix(PmmlBinding):
"""Represents a <ParamMatrix> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Parameter(PmmlBinding):
"""Represents a <Parameter> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ParameterField(PmmlBinding, HasDataType):
"""Represents a <ParameterField> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
return {self.name: context.avroTypeBuilder.resolveOneType(json.dumps(self.pmmlTypeToAvro()))}
class ParameterList(PmmlBinding):
"""Represents a <ParameterList> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Partition(PmmlBinding):
"""Represents a <Partition> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class PartitionFieldStats(PmmlBinding):
"""Represents a <PartitionFieldStats> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class PoissonDistribution(PmmlBinding):
"""Represents a <PoissonDistribution> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
def zValue(self, fieldName):
raise NotImplementedError
class PolynomialKernelType(PmmlBinding):
"""Represents a <PolynomialKernelType> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class PredictiveModelQuality(PmmlBinding):
"""Represents a <PredictiveModelQuality> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Predictor(PmmlBinding):
"""Represents a <Predictor> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class PredictorTerm(PmmlBinding):
"""Represents a <PredictorTerm> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Quantile(PmmlBinding):
"""Represents a <Quantile> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class REAL_Entries(PmmlBinding):
"""Represents a <REAL_Entries> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class REAL_SparseArray(PmmlBinding):
"""Represents a <REAL_SparseArray> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ROC(PmmlBinding):
"""Represents a <ROC> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ROCGraph(PmmlBinding):
"""Represents a <ROCGraph> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class RadialBasisKernelType(PmmlBinding):
"""Represents a <RadialBasisKernelType> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class RandomLiftGraph(PmmlBinding):
"""Represents a <RandomLiftGraph> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Regression(PmmlBinding):
"""Represents a <Regression> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class RegressionModel(PmmlBinding, ModelElement):
"""Represents a <RegressionModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class RegressionTable(PmmlBinding):
"""Represents a <RegressionTable> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ResultField(PmmlBinding):
"""Represents a <ResultField> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class RuleSelectionMethod(PmmlBinding):
"""Represents a <RuleSelectionMethod> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class RuleSet(PmmlBinding):
"""Represents a <RuleSet> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class RuleSetModel(PmmlBinding, ModelElement):
"""Represents a <RuleSetModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class ScoreDistribution(PmmlBinding):
"""Represents a <ScoreDistribution> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Scorecard(PmmlBinding, ModelElement):
"""Represents a <Scorecard> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SeasonalTrendDecomposition(PmmlBinding):
"""Represents a <SeasonalTrendDecomposition> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Seasonality_ExpoSmooth(PmmlBinding):
"""Represents a <Seasonality_ExpoSmooth> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Segment(PmmlBinding):
"""Represents a <Segment> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Segmentation(PmmlBinding):
"""Represents a <Segmentation> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SelectResult(PmmlBinding):
"""Represents a <SelectResult> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Sequence(PmmlBinding):
"""Represents a <Sequence> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SequenceModel(PmmlBinding, ModelElement):
"""Represents a <SequenceModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SequenceReference(PmmlBinding):
"""Represents a <SequenceReference> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SequenceRule(PmmlBinding):
"""Represents a <SequenceRule> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SetPredicate(PmmlBinding):
"""Represents a <SetPredicate> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SetReference(PmmlBinding):
"""Represents a <SetReference> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SigmoidKernelType(PmmlBinding):
"""Represents a <SigmoidKernelType> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SimplePredicate(PmmlBinding, Predicate):
"""Represents a <SimplePredicate> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SimpleRule(PmmlBinding):
"""Represents a <SimpleRule> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SimpleSetPredicate(PmmlBinding, Predicate):
"""Represents a <SimpleSetPredicate> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SpectralAnalysis(PmmlBinding):
"""Represents a <SpectralAnalysis> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SupportVector(PmmlBinding):
"""Represents a <SupportVector> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SupportVectorMachine(PmmlBinding):
"""Represents a <SupportVectorMachine> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SupportVectorMachineModel(PmmlBinding, ModelElement):
"""Represents a <SupportVectorMachineModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class SupportVectors(PmmlBinding):
"""Represents a <SupportVectors> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TableLocator(PmmlBinding):
"""Represents a <TableLocator> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Target(PmmlBinding):
"""Represents a <Target> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TargetValue(PmmlBinding):
"""Represents a <TargetValue> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TargetValueCount(PmmlBinding):
"""Represents a <TargetValueCount> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TargetValueCounts(PmmlBinding):
"""Represents a <TargetValueCounts> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TargetValueStat(PmmlBinding):
"""Represents a <TargetValueStat> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TargetValueStats(PmmlBinding):
"""Represents a <TargetValueStats> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Targets(PmmlBinding):
"""Represents a <Targets> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Taxonomy(PmmlBinding):
"""Represents a <Taxonomy> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TestDistributions(PmmlBinding):
"""Represents a <TestDistributions> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
if self.testStatistic == "zValue":
context.outputType = "double"
return self.Baseline[0].distribution().zValue(context.fieldRef(self.field))
else:
raise NotImplementedError
class TextCorpus(PmmlBinding):
"""Represents a <TextCorpus> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TextDictionary(PmmlBinding):
"""Represents a <TextDictionary> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TextDocument(PmmlBinding):
"""Represents a <TextDocument> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TextIndex(PmmlBinding):
"""Represents a <TextIndex> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TextIndexNormalization(PmmlBinding):
"""Represents a <TextIndexNormalization> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TextModel(PmmlBinding, ModelElement):
"""Represents a <TextModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TextModelNormalization(PmmlBinding):
"""Represents a <TextModelNormalization> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TextModelSimiliarity(PmmlBinding):
"""Represents a <TextModelSimiliarity> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Time(PmmlBinding):
"""Represents a <Time> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TimeAnchor(PmmlBinding):
"""Represents a <TimeAnchor> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TimeCycle(PmmlBinding):
"""Represents a <TimeCycle> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TimeException(PmmlBinding):
"""Represents a <TimeException> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TimeSeries(PmmlBinding):
"""Represents a <TimeSeries> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TimeSeriesModel(PmmlBinding, ModelElement):
"""Represents a <TimeSeriesModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TimeValue(PmmlBinding):
"""Represents a <TimeValue> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Timestamp(PmmlBinding):
"""Represents a <Timestamp> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TrainingInstances(PmmlBinding):
"""Represents a <TrainingInstances> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TransformationDictionary(PmmlBinding):
"""Represents a <TransformationDictionary> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class TreeModel(PmmlBinding, ModelElement):
"""Represents a <TreeModel> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
topNode = self.Node[0]
otherNodes = topNode.nodes()
splitCharacteristic = self.splitCharacteristic
if self.splitCharacteristic is None:
if all(len(node.Node) == 0 or len(node.Node) == 2 for node in otherNodes):
splitCharacteristic = "binarySplit"
if splitCharacteristic == "binarySplit":
if not isinstance(topNode.predicate(), AlwaysTrue):
raise NotImplementedError
predicateTypes = set(x.predicate().__class__.__name__ for x in otherNodes)
inputTypes = sorted(set(x["type"] for x in context.dataDictionary.values()))
if self.functionName == "regression":
outputTypes = ["TreeNode", "double"]
context.outputType = "double"
else:
outputTypes = ["TreeNode", "string"]
context.outputType = "string"
if predicateTypes == set(["SimplePredicate"]):
modelData = topNode.simpleWalk(context, self.functionName, splitCharacteristic, predicateTypes)["TreeNode"]
modelType = {"type": "record", "name": "TreeNode", "fields": [
{"name": "field", "type": {"type": "enum", "name": "TreeFields", "symbols": [x.name for x in context.inputType.fields]}},
{"name": "operator", "type": "string"},
{"name": "value", "type": inputTypes},
{"name": "pass", "type": outputTypes},
{"name": "fail", "type": outputTypes}
]}
if context.storageType == "cell":
context.cells[context.storageName] = ast.Cell(context.avroTypeBuilder.makePlaceholder(json.dumps(modelType)), json.dumps(modelData), False, False, ast.CellPoolSource.EMBEDDED)
return [ast.Call("model.tree.simpleWalk", [
ast.Ref("input"),
ast.CellGet(context.storageName, []),
ast.FcnDef([{"d": context.avroTypeBuilder.makePlaceholder('"DataDictionary"')}, {"t": context.avroTypeBuilder.makePlaceholder('"TreeNode"')}],
context.avroTypeBuilder.makePlaceholder('"boolean"'),
[ast.Call("model.tree.simpleTest", [ast.Ref("d"), ast.Ref("t")])])
])]
elif context.storageType == "pool":
poolName, itemName, refName = context.storageName
if poolName not in context.pools:
context.pools[poolName] = ast.Pool(context.avroTypeBuilder.makePlaceholder(json.dumps(modelType)), {}, False, ast.CellPoolSource.EMBEDDED)
context.pools[poolName].init[itemName] = json.dumps(modelData)
return [ast.Call("model.tree.simpleWalk", [
ast.Ref("input"),
ast.PoolGet(context.storageName, []),
ast.FcnDef([{"d": context.avroTypeBuilder.makePlaceholder('"DataDictionary"')}, {"t": context.avroTypeBuilder.makePlaceholder('"TreeNode"')}],
context.avroTypeBuilder.makePlaceholder('"boolean"'),
[ast.Call("model.tree.simpleTest", [ast.Ref("d"), ast.Ref("t")])])
])]
elif predicateTypes == set(["CompoundPredicate"]) or predicateTypes == set(["SimplePredicate", "CompoundPredicate"]):
modelData = topNode.simpleWalk(context, self.functionName, splitCharacteristic, predicateTypes)["TreeNode"]
modelType = {"type": "record", "name": "TreeNode", "fields": [
{"name": "operator", "type": "string"},
{"name": "comparisons", "type": {"type": "array", "items": {"type": "record", "name": "Comparison", "fields": [
{"name": "field", "type": {"type": "enum", "name": "TreeFields", "symbols": [x.name for x in context.inputType.fields]}},
{"name": "operator", "type": "string"},
{"name": "value", "type": inputTypes}
]}}},
{"name": "pass", "type": outputTypes},
{"name": "fail", "type": outputTypes}
]}
if context.storageType == "cell":
context.cells[context.storageName] = ast.Cell(context.avroTypeBuilder.makePlaceholder(json.dumps(modelType)), json.dumps(modelData), False, False, ast.CellPoolSource.EMBEDDED)
return [ast.Call("model.tree.simpleWalk", [
ast.Ref("input"),
ast.CellGet(context.storageName, [LiteralString("operator")]),
ast.CellGet(context.storageName, [LiteralString("comparisions")]),
ast.FcnDef([{"d": context.avroTypeBuilder.makePlaceholder('"DataDictionary"')}, {"c": context.avroTypeBuilder.makePlaceholder('"Comparison"')}],
context.avroTypeBuilder.makePlaceholder('"boolean"'),
[ast.Call("model.tree.simpleTest", [ast.Ref("d"), ast.Ref("c")])])
])]
else:
raise NotImplementedError
else:
raise NotImplementedError
class Trend(PmmlBinding):
"""Represents a <Trend> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Trend_ExpoSmooth(PmmlBinding):
"""Represents a <Trend_ExpoSmooth> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class UniformDistribution(PmmlBinding):
"""Represents a <UniformDistribution> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
def zValue(self, fieldName):
raise NotImplementedError
class UnivariateStats(PmmlBinding):
"""Represents a <UnivariateStats> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class Value(PmmlBinding):
"""Represents a <Value> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class VectorDictionary(PmmlBinding):
"""Represents a <VectorDictionary> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class VectorFields(PmmlBinding):
"""Represents a <VectorFields> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class VectorInstance(PmmlBinding):
"""Represents a <VectorInstance> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class VerificationField(PmmlBinding):
"""Represents a <VerificationField> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class VerificationFields(PmmlBinding):
"""Represents a <VerificationFields> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class XCoordinates(PmmlBinding):
"""Represents a <XCoordinates> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class YCoordinates(PmmlBinding):
"""Represents a <YCoordinates> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class binarySimilarity(PmmlBinding):
"""Represents a <binarySimilarity> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class chebychev(PmmlBinding):
"""Represents a <chebychev> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class cityBlock(PmmlBinding):
"""Represents a <cityBlock> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class euclidean(PmmlBinding):
"""Represents a <euclidean> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class jaccard(PmmlBinding):
"""Represents a <jaccard> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class minkowski(PmmlBinding):
"""Represents a <minkowski> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class row(PmmlBinding):
"""Represents a <row> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class simpleMatching(PmmlBinding):
"""Represents a <simpleMatching> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class squaredEuclidean(PmmlBinding):
"""Represents a <squaredEuclidean> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
class tanimoto(PmmlBinding):
"""Represents a <tanimoto> tag and provides methods to convert to PFA."""
def toPFA(self, options, context):
raise NotImplementedError
|
opendatagroup/hadrian
|
titus/titus/pmml/version_independent.py
|
Python
|
apache-2.0
| 70,112
|
[
"NEURON"
] |
32ea078585f6eedfedfdf332f5880bb047ef92d198afbdbdeaeee957100b2fab
|
from __future__ import print_function
import os.path
import re
import sys
import tarfile
import time
from datetime import datetime
# pylint: disable=unused-import,g-bad-import-order
import tensorflow.python.platform
from six.moves import urllib
import numpy as np
import tensorflow as tf
# pylint: enable=unused-import,g-bad-import-order
from tensorflow.python.platform import gfile
import h5py
import math
os.environ["GLOG_minloglevel"] ="3"
import caffe
from caffe.model_libs import *
from google.protobuf import text_format
paddings = {'VALID': [0, 0], 'SAME': [1, 1]}
FLAGS = tf.app.flags.FLAGS
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
tf.app.flags.DEFINE_string(
'model_dir', '/tmp/imagenet',
"""Path to classify_image_graph_def.pb, """
"""imagenet_synset_to_human_label_map.txt, and """
"""imagenet_2012_challenge_label_map_proto.pbtxt.""")
tf.app.flags.DEFINE_string('image_file', '',
"""Absolute path to image file.""")
tf.app.flags.DEFINE_integer('num_top_predictions', 5,
"""Display this many predictions.""")
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
cur_dir = os.path.dirname(os.path.realpath(__file__))
caffe_root = '{}/../'.format(cur_dir)
labelmap_file = caffe_root + 'data/ILSVRC2016/labelmap_ilsvrc_clsloc.prototxt'
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
def get_labelname(label):
num_labels = len(labelmap.item)
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
return labelmap.item[i].display_name
assert found == True
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.device('/cpu:0'):
_ = tf.import_graph_def(graph_def, name='')
def make_padding(padding_name, conv_shape):
if padding_name == 'VALID':
return [0, 0]
elif padding_name == 'SAME':
return [int(math.ceil(conv_shape[0]/2)), int(math.ceil(conv_shape[1]/2))]
else:
sys.exit('Invalid padding name '+padding_name)
def dump_inputlayer(sess, net, operation='create'):
if operation == 'create':
resize = sess.graph.get_tensor_by_name('ResizeBilinear/size:0').eval()
[height, width] = resize
sub = sess.graph.get_tensor_by_name('Sub/y:0').eval()
mean = sub
if not type(mean) is list:
mean = [float(mean)]
else:
mean = [int(x) for x in mean]
mul = sess.graph.get_tensor_by_name('Mul/y:0').eval()
scale = float(mul)
net['data'] = L.Input(shape=dict(dim=[1, 3, int(height), int(width)]), transform_param=dict(mean_value=mean, scale=scale))
def dump_convbn(sess, net, from_layer, out_layer, operation='create'):
conv = sess.graph.get_operation_by_name(out_layer + '/Conv2D')
weights = sess.graph.get_tensor_by_name(out_layer + '/conv2d_params:0').eval()
padding = make_padding(conv.get_attr('padding'), weights.shape)
strides = conv.get_attr('strides')
beta = sess.graph.get_tensor_by_name(out_layer + '/batchnorm/beta:0').eval()
gamma = sess.graph.get_tensor_by_name(out_layer + '/batchnorm/gamma:0').eval()
mean = sess.graph.get_tensor_by_name(out_layer + '/batchnorm/moving_mean:0').eval()
std = sess.graph.get_tensor_by_name(out_layer + '/batchnorm/moving_variance:0').eval()
# TF weight matrix is of order: height x width x input_channels x output_channels
# make it to caffe format: output_channels x input_channels x height x width
weights = np.transpose(weights, (3, 2, 0, 1))
if operation == 'create':
assert from_layer in net.keys(), '{} not in net'.format(from_layer)
[num_output, channels, kernel_h, kernel_w] = weights.shape
[pad_h, pad_w] = padding
[stride_h, stride_w] = strides[1:3]
std_eps = 0.001
# parameters for convolution layer with batchnorm.
conv_prefix = ''
conv_postfix = ''
kwargs = {
'param': [dict(lr_mult=1, decay_mult=1)],
'weight_filler': dict(type='gaussian', std=0.01),
'bias_term': False,
}
conv_name = '{}{}{}'.format(conv_prefix, out_layer, conv_postfix)
if kernel_h != kernel_w:
net[conv_name] = L.Convolution(net[from_layer], num_output=num_output,
kernel_h=kernel_h, kernel_w=kernel_w, pad_h=pad_h, pad_w=pad_w,
stride_h=stride_h, stride_w=stride_w, **kwargs)
else:
net[conv_name] = L.Convolution(net[from_layer], num_output=num_output,
kernel_size=kernel_h, pad=pad_h, stride=stride_h, **kwargs)
# parameters for batchnorm layer.
bn_prefix = ''
bn_postfix = '_bn'
bn_kwargs = {
'param': [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)],
}
bn_name = '{}{}{}'.format(bn_prefix, conv_name, bn_postfix)
net[bn_name] = L.BatchNorm(net[conv_name], in_place=True,
batch_norm_param=dict(eps=std_eps), **bn_kwargs)
# parameters for scale bias layer after batchnorm.
bias_prefix = ''
bias_postfix = '_bias'
bias_kwargs = {
'param': [dict(lr_mult=1, decay_mult=0)],
'filler': dict(type='constant', value=0.0),
}
bias_name = '{}{}{}'.format(bias_prefix, conv_name, bias_postfix)
net[bias_name] = L.Bias(net[bn_name], in_place=True, **bias_kwargs)
# relu layer.
relu_name = '{}_relu'.format(conv_name)
net[relu_name] = L.ReLU(net[conv_name], in_place=True)
elif operation == 'save':
conv_prefix = ''
conv_postfix = ''
conv_name = '{}{}{}'.format(conv_prefix, out_layer, conv_postfix)
net.params[conv_name][0].data.flat = weights.flat
# Copy bn parameters.
bn_prefix = ''
bn_postfix = '_bn'
bn_name = '{}{}{}'.format(bn_prefix, conv_name, bn_postfix)
net.params[bn_name][0].data.flat = mean
net.params[bn_name][1].data.flat = std
net.params[bn_name][2].data.flat = 1.
# Copy scale parameters.
bias_prefix = ''
bias_postfix = '_bias'
bias_name = '{}{}{}'.format(bias_prefix, conv_name, bias_postfix)
net.params[bias_name][0].data.flat = beta
def dump_pool(sess, net, from_layer, out_layer, operation='create'):
pooling = sess.graph.get_operation_by_name(out_layer)
ismax = pooling.type=='MaxPool' and 1 or 0
ksize = pooling.get_attr('ksize')
padding = make_padding(pooling.get_attr('padding'), ksize[1:3])
strides = pooling.get_attr('strides')
if operation == 'create':
if ismax:
pool = P.Pooling.MAX
else:
pool = P.Pooling.AVE
assert from_layer in net.keys()
[kernel_h, kernel_w] = ksize[1:3]
[pad_h, pad_w] = padding
[stride_h, stride_w] = strides[1:3]
if kernel_h != kernel_w:
net[out_layer] = L.Pooling(net[from_layer], pool=pool,
kernel_h=kernel_h, kernel_w=kernel_w, pad_h=pad_h, pad_w=pad_w,
stride_h=stride_h, stride_w=stride_w)
else:
net[out_layer] = L.Pooling(net[from_layer], pool=pool,
kernel_size=kernel_h, pad=pad_h, stride=stride_h)
def dump_softmax(sess, net, from_layer, out_layer, operation='create'):
softmax_w = sess.graph.get_tensor_by_name('softmax/weights:0').eval()
softmax_b = sess.graph.get_tensor_by_name('softmax/biases:0').eval()
softmax_w = np.transpose(softmax_w, (1, 0))
if operation == 'create':
assert from_layer in net.keys()
kwargs = {
'param': [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
'weight_filler': dict(type='xavier'),
'bias_filler': dict(type='constant', value=0)
}
[num_output, channels] = softmax_w.shape
net[out_layer] = L.InnerProduct(net[from_layer], num_output=num_output, **kwargs)
prob_layer = '{}_prob'.format(out_layer)
net[prob_layer] = L.Softmax(net[out_layer])
elif operation == 'save':
net.params[out_layer][0].data.flat = softmax_w.flat
net.params[out_layer][1].data.flat = softmax_b
def dump_tower(sess, net, from_layer, tower_name, tower_layers, operation='create'):
for tower_layer in tower_layers:
tower_layer = '{}/{}'.format(tower_name, tower_layer)
if 'pool' in tower_layer:
dump_pool(sess, net, from_layer, tower_layer, operation)
else:
dump_convbn(sess, net, from_layer, tower_layer, operation)
from_layer = tower_layer
def dump_inception(sess, net, inception_name, tower_names, operation='create', final=True):
if operation == 'create':
towers_layers = []
for tower_name in tower_names:
tower_name = '{}/{}'.format(inception_name, tower_name)
assert tower_name in net.keys(), tower_name
towers_layers.append(net[tower_name])
if final:
inception_name = '{}/join'.format(inception_name)
net[inception_name] = L.Concat(*towers_layers, axis=1)
def run_inference_on_image(image):
if not gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = gfile.FastGFile(image).read()
# Creates graph from saved GraphDef.
create_graph()
# sess = tf.InteractiveSession(config=tf.ConfigProto(
# allow_soft_placement=True))
sess = tf.InteractiveSession()
ops = sess.graph.get_operations()
for op in ops:
print(op.name)
# Run the graph until softmax
# start = datetime.now()
data_tensor = sess.graph.get_tensor_by_name('Mul:0')
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
data = sess.run(data_tensor, {'DecodeJpeg/contents:0': image_data})
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
# time_len = datetime.now() - start
# print(time_len.microseconds / 1000)
# print predictions indices and values
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for p in top_k:
print(get_labelname(p), predictions[p])
sess.close()
deploy_net_file = 'models/inception_v3/inception_v3_deploy.prototxt'
model_file = 'models/inception_v3/inception_v3.caffemodel'
net = caffe.Net(deploy_net_file, model_file, caffe.TEST)
net.blobs['data'].reshape(1, 3, 299, 299)
data = data.transpose(0, 3, 1, 2)
net.blobs['data'].data.flat = data.flat
output = net.forward()
predictions = output['softmax_prob']
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for p in top_k:
print(get_labelname(p), predictions[p])
def dump_model(operation='create', redo=False):
# Creates graph from saved GraphDef.
create_graph()
sess = tf.InteractiveSession()
# Creates caffe model.
deploy_net_file = 'models/inception_v3/inception_v3_deploy.prototxt'
model_file = 'models/inception_v3/inception_v3.caffemodel'
net = []
if operation == 'create' and (not os.path.exists(deploy_net_file) or redo):
net = caffe.NetSpec()
elif operation == 'save' and (not os.path.exists(model_file) or redo):
caffe.set_device(1)
caffe.set_mode_gpu()
net = caffe.Net(deploy_net_file, caffe.TEST)
else:
return
# dump the preprocessing parameters
dump_inputlayer(sess, net, operation)
# dump the filters
dump_convbn(sess, net, 'data', 'conv', operation)
dump_convbn(sess, net, 'conv', 'conv_1', operation)
dump_convbn(sess, net, 'conv_1', 'conv_2', operation)
dump_pool(sess, net, 'conv_2', 'pool', operation)
dump_convbn(sess, net, 'pool', 'conv_3', operation)
dump_convbn(sess, net, 'conv_3', 'conv_4', operation)
dump_pool(sess, net, 'conv_4', 'pool_1', operation)
# inceptions with 1x1, 3x3, 5x5 convolutions
from_layer = 'pool_1'
for inception_id in xrange(0, 3):
if inception_id == 0:
out_layer = 'mixed'
else:
out_layer = 'mixed_{}'.format(inception_id)
dump_tower(sess, net, from_layer, out_layer,
['conv'], operation)
dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer),
['conv', 'conv_1'], operation)
dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer),
['conv', 'conv_1', 'conv_2'], operation)
dump_tower(sess, net, from_layer, '{}/tower_2'.format(out_layer),
['pool', 'conv'], operation)
dump_inception(sess, net, out_layer,
['conv', 'tower/conv_1', 'tower_1/conv_2', 'tower_2/conv'], operation)
from_layer = '{}/join'.format(out_layer)
# inceptions with 1x1, 3x3(in sequence) convolutions
out_layer = 'mixed_3'
dump_tower(sess, net, from_layer, out_layer,
['conv'], operation)
dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer),
['conv', 'conv_1', 'conv_2'], operation)
dump_tower(sess, net, from_layer, out_layer,
['pool'], operation)
dump_inception(sess, net, out_layer,
['conv', 'tower/conv_2', 'pool'], operation)
from_layer = '{}/join'.format(out_layer)
# inceptions with 1x1, 7x1, 1x7 convolutions
for inception_id in xrange(4, 8):
out_layer = 'mixed_{}'.format(inception_id)
dump_tower(sess, net, from_layer, out_layer,
['conv'], operation)
dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer),
['conv', 'conv_1', 'conv_2'], operation)
dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer),
['conv', 'conv_1', 'conv_2', 'conv_3', 'conv_4'], operation)
dump_tower(sess, net, from_layer, '{}/tower_2'.format(out_layer),
['pool', 'conv'], operation)
dump_inception(sess, net, out_layer,
['conv', 'tower/conv_2', 'tower_1/conv_4', 'tower_2/conv'], operation)
from_layer = '{}/join'.format(out_layer)
# inceptions with 1x1, 3x3, 1x7, 7x1 filters
out_layer = 'mixed_8'
dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer),
['conv', 'conv_1'], operation)
dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer),
['conv', 'conv_1', 'conv_2', 'conv_3'], operation)
dump_tower(sess, net, from_layer, out_layer,
['pool'], operation)
dump_inception(sess, net, out_layer,
['tower/conv_1', 'tower_1/conv_3', 'pool'], operation)
from_layer = '{}/join'.format(out_layer)
for inception_id in xrange(9, 11):
out_layer = 'mixed_{}'.format(inception_id)
dump_tower(sess, net, from_layer, out_layer,
['conv'], operation)
dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer),
['conv'], operation)
dump_tower(sess, net, '{}/tower/conv'.format(out_layer),
'{}/tower/mixed'.format(out_layer), ['conv'], operation)
dump_tower(sess, net, '{}/tower/conv'.format(out_layer),
'{}/tower/mixed'.format(out_layer), ['conv_1'], operation)
dump_inception(sess, net, '{}/tower/mixed'.format(out_layer),
['conv', 'conv_1'], operation, False)
dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer),
['conv', 'conv_1'], operation)
dump_tower(sess, net, '{}/tower_1/conv_1'.format(out_layer),
'{}/tower_1/mixed'.format(out_layer), ['conv'], operation)
dump_tower(sess, net, '{}/tower_1/conv_1'.format(out_layer),
'{}/tower_1/mixed'.format(out_layer), ['conv_1'], operation)
dump_inception(sess, net, '{}/tower_1/mixed'.format(out_layer),
['conv', 'conv_1'], operation, False)
dump_tower(sess, net, from_layer, '{}/tower_2'.format(out_layer),
['pool', 'conv'], operation)
dump_inception(sess, net, out_layer,
['conv', 'tower/mixed', 'tower_1/mixed', 'tower_2/conv'], operation)
from_layer = '{}/join'.format(out_layer)
dump_pool(sess, net, from_layer, 'pool_3', operation)
dump_softmax(sess, net, 'pool_3', 'softmax', operation)
if operation == 'create' and (not os.path.exists(deploy_net_file) or redo):
model_dir = os.path.dirname(deploy_net_file)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
with open(deploy_net_file, 'w') as f:
print('name: "inception_v3_deploy"', file=f)
print(net.to_proto(), file=f)
elif operation == 'save' and (not os.path.exists(model_file) or redo):
net.save(model_file)
sess.close()
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,
reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
modelfilepath = os.path.join(dest_directory, 'classify_image_graph_def.pb')
if not os.path.exists(modelfilepath):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def main(_):
maybe_download_and_extract()
redo = True
operations = ['create', 'save']
for operation in operations:
dump_model(operation, redo)
eval = True
if eval:
image = (FLAGS.image_file if FLAGS.image_file else
os.path.join(FLAGS.model_dir, 'cropped_panda.jpg'))
run_inference_on_image(image)
if __name__ == '__main__':
tf.app.run()
|
IsThatYourBag/IsThatYourBag
|
scripts/convert_inception_v3.py
|
Python
|
mit
| 19,027
|
[
"Gaussian"
] |
f5555f3fb33391d68624ea63b48bb3b1a94b5b7c90c13a13d9e6dad153980fca
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
An EM algorithm for GMMs
"""
import numpy as np
import scipy as sc
import scipy.misc
import scipy.spatial
import scipy.linalg
from numpy import array, eye, ones, log
from scipy.linalg import norm
cdist = scipy.spatial.distance.cdist
logsumexp = scipy.logaddexp.reduce
import em
from twit.KMeansClustering import KMeansClusterer
class GaussianMixtureClusterer(em.EMAlgorithm):
"""
Gaussian Mixtures EM
(i) Using k-means++ start
(ii) Assuming spherical gaussians
"""
def __init__( self, k, d ):
self.K, self.D = k, d
em.EMAlgorithm.__init__( self )
def compute_expectation( self, X, O ):
"""Compute the most likely values of the latent variables; returns lhood"""
_, d = X.shape
M, sigma, w = O
total_lhood = 0
# Get pairwise distances between centers (D_ij = \|X_i - M_j\|)
D = cdist( X, M )
# Probability dist = 1/2(\sigma^2) D^2 + log w
Z = - 0.5/sigma**2 * (D**2) + log( w ) - 0.5 * d * log(sigma) # Ignoreing constant term
total_lhood += logsumexp( logsumexp(Z) )
# Normalise the probilities (soft EM)
Z = sc.exp(Z.T - logsumexp(Z, 1)).T
return -total_lhood, Z
def compute_maximisation( self, X, Z, O ):
"""Compute the most likely values of the parameters"""
N, d = X.shape
M, sigma, w = O
# Cluster weights (smoothed)
# Pseudo counts
w = Z.sum(axis=0) + 1
# Get new means
M = (Z.T.dot( X ).T / w).T
sigma = (cdist( X, M ) * Z).sum()/(d*N)
w /= w.sum()
return M, sigma, w
@staticmethod
def kmeanspp_initialisation(X, K):
"""Initialise means using K-Means++"""
N, D = X.shape
M = KMeansClusterer.kmeanspp_initialisation(X, K)
sigma = cdist( X, M ).sum()/(K*D*N)
w = ones(K)/float(K)
return M, sigma, w
def run( self, X, O = None, *args, **kwargs ):
"""O are the 'true' parameters"""
if O == None:
O = GaussianMixtureClusterer.kmeanspp_initialisation( X, self.K )
return em.EMAlgorithm.run( self, X, O, *args, **kwargs )
def test_gmm():
"""
Test whether gmm works or not.
"""
K = 3
D = 2
mvn = np.random.multivariate_normal
# Generate data
O = np.array([[-1,-1],[0,0],[1,1]])
X = np.vstack([mvn(o, 0.5*np.eye(D), size=10000) for o in O])
algo = GaussianMixtureClusterer(K, D)
lhood, Z, O_ = algo.run(X)
print(O)
print(lhood)
print(O_[0])
|
arunchaganty/presidential-debates
|
django/twit/GaussianMixtureClustering.py
|
Python
|
mit
| 2,611
|
[
"Gaussian"
] |
64779dab973f38bf9e854835b636bed6966c14d1cd4b17a5e7cde509d2b1f7de
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
'''CP2K atomic wavefunctions'''
import numpy as np
from horton.gbasis.iobas import str_to_shell_types
from horton.gbasis.cext import GOBasis, fac2
__all__ = ['load_atom_cp2k']
def _read_coeffs_helper(f, oe):
coeffs = {}
f.next()
while len(coeffs) < len(oe):
line = f.next()
assert line.startswith(" ORBITAL L =")
words = line.split()
l = int(words[3])
s = int(words[6])
c = []
while True:
line = f.next()
if len(line.strip()) == 0:
break
c.append(float(line))
coeffs[(l, s)] = np.array(c)
return coeffs
def _helper_norb(oe):
norb = 0
nel = 0
for l, s, occ, ener in oe:
norb += 2*l+1
nel += occ
return norb, nel
def _helper_exp(exp, oe, coeffs, shell_types, restricted):
# Find the offsets for each angular momentum
offset = 0
offsets = []
ls = abs(shell_types)
for l in sorted(set(ls)):
offsets.append(offset)
offset += (2*l+1)*(l == ls).sum()
del offset
# Fill in the coefficients
iorb = 0
for l, s, occ, ener in oe:
cs = coeffs.get((l, s))
if cs is None:
assert occ == 0
continue
stride = 2*l+1
for m in xrange(-l, l+1):
im = m + l
exp.energies[iorb] = ener
exp.occupations[iorb] = im < occ/(restricted+1)
for ic in xrange(len(cs)):
exp.coeffs[offsets[l] + stride*ic + im,iorb] = cs[ic]
iorb += 1
def _get_cp2k_norm_corrections(l, alphas):
expzet = 0.25*(2*l + 3)
prefac = np.sqrt(np.sqrt(np.pi)/2.0**(l+2)*fac2(2*l+1))
zeta = 2*np.array(alphas)
return zeta**expzet/prefac
def load_atom_cp2k(filename, lf):
'''Load data from a CP2K ATOM computation
**Arguments:**
filename
The name of the cp2k out file
**Returns** a dictionary with ``obasis``, ``exp_alpha``, ``coordinates``,
``numbers``, ``energy``, ``pseudo_numbers``. The dictionary may also
contain: ``exp_beta``.
'''
with open(filename) as f:
# Find the element number
for line in f:
if line.startswith(' Atomic Energy Calculation'):
number = int(line[-5:-1])
break
# Go to the pseudo basis set
for line in f:
if line.startswith(' Pseudopotential Basis'):
break
f.next() # empty line
line = f.next() # Check for GTO
assert line == ' ********************** Contracted Gaussian Type Orbitals **********************\n'
# Load the basis used for the PP wavefn
basis_desc = []
for line in f:
if line.startswith(' *******************'):
break
elif line[3:12] == 'Functions':
shell_type = str_to_shell_types(line[1:2], pure=True)[0]
a = []
c = []
basis_desc.append((shell_type, a, c))
else:
values = [float(w) for w in line.split()]
a.append(values[0])
c.append(values[1:])
# Convert the basis into HORTON format
shell_map = []
shell_types = []
nprims = []
alphas = []
con_coeffs = []
for shell_type, a, c in basis_desc:
# get correction to contraction coefficients.
corrections = _get_cp2k_norm_corrections(abs(shell_type), a)
c = np.array(c)/corrections.reshape(-1,1)
# fill in arrays
for col in c.T:
shell_map.append(0)
shell_types.append(shell_type)
nprims.append(len(col))
alphas.extend(a)
con_coeffs.extend(col)
# Create the basis object
coordinates = np.zeros((1, 3))
shell_map = np.array(shell_map)
nprims = np.array(nprims)
shell_types = np.array(shell_types)
alphas = np.array(alphas)
con_coeffs = np.array(con_coeffs)
obasis = GOBasis(coordinates, shell_map, nprims, shell_types, alphas, con_coeffs)
if lf.default_nbasis is not None and lf.default_nbasis != obasis.nbasis:
raise TypeError('The value of lf.default_nbasis does not match nbasis reported in the cp2k.out file.')
lf.default_nbasis = obasis.nbasis
# Search for (un)restricted
restricted = None
for line in f:
if line.startswith(' METHOD |'):
if 'U' in line:
restricted = False
break
elif 'R' in line:
restricted = True
break
# Search for the core charge (pseudo number)
for line in f:
if line.startswith(' Core Charge'):
pseudo_number = float(line[70:])
assert pseudo_number == int(pseudo_number)
pseudo_number = int(pseudo_number)
break
# Search for energy
for line in f:
if line.startswith(' Energy components [Hartree] Total Energy ::'):
energy = float(line[60:])
break
# Read orbital energies and occupations
for line in f:
if line.startswith(' Orbital energies'):
break
f.next()
oe_alpha = []
oe_beta = []
empty = 0
while empty < 2:
line = f.next()
words = line.split()
if len(words) == 0:
empty += 1
continue
empty = 0
s = int(words[0])
l = int(words[2-restricted])
occ = float(words[3-restricted])
ener = float(words[4-restricted])
if restricted or words[1] == 'alpha':
oe_alpha.append((l, s, occ, ener))
else:
oe_beta.append((l, s, occ, ener))
# Read orbital expansion coefficients
line = f.next()
assert (line == " Atomic orbital expansion coefficients [Alpha]\n") or \
(line == " Atomic orbital expansion coefficients []\n")
coeffs_alpha = _read_coeffs_helper(f, oe_alpha)
if not restricted:
line = f.next()
assert (line == " Atomic orbital expansion coefficients [Beta]\n")
coeffs_beta = _read_coeffs_helper(f, oe_beta)
# Turn orbital data into a HORTON orbital expansions
if restricted:
norb, nel = _helper_norb(oe_alpha)
assert nel%2 == 0
exp_alpha = lf.create_expansion(obasis.nbasis, norb)
exp_beta = None
_helper_exp(exp_alpha, oe_alpha, coeffs_alpha, shell_types, restricted)
else:
norb_alpha, nalpha = _helper_norb(oe_alpha)
norb_beta, nbeta = _helper_norb(oe_beta)
assert norb_alpha == norb_beta
exp_alpha = lf.create_expansion(obasis.nbasis, norb_alpha)
exp_beta = lf.create_expansion(obasis.nbasis, norb_beta)
_helper_exp(exp_alpha, oe_alpha, coeffs_alpha, shell_types, restricted)
_helper_exp(exp_beta, oe_beta, coeffs_beta, shell_types, restricted)
result = {
'obasis': obasis,
'lf': lf,
'exp_alpha': exp_alpha,
'coordinates': coordinates,
'numbers': np.array([number]),
'energy': energy,
'pseudo_numbers': np.array([pseudo_number]),
}
if exp_beta is not None:
result['exp_beta'] = exp_beta
return result
|
eustislab/horton
|
horton/io/cp2k.py
|
Python
|
gpl-3.0
| 8,458
|
[
"CP2K",
"Gaussian"
] |
e6513b66306820767c6f7028997ede1a5ee00eabdd9d0b99c3aa11faf2a04702
|
from ase import Atoms
from gpaw import GPAW, PW
h = Atoms('H', cell=(5, 5, 5))
h.center()
h.calc = GPAW(setups='ae', txt='H.ae.txt')
for ecut in range(200, 1001, 100):
h.calc.set(mode=PW(ecut))
e = h.get_potential_energy()
|
robwarm/gpaw-symm
|
doc/tutorials/hydrogen/h.py
|
Python
|
gpl-3.0
| 231
|
[
"ASE",
"GPAW"
] |
d2f026c85cff2254b5c501fd6be0dff707da3252851dfea987a967a90936bea1
|
__DESCRIPTION__="""2d Gauss Fit of a single gaussian
The model function would be
R=[
[ cos(psi_ell),sin(psi_ell)]
[-sin(psi_ell),cos(psi_ell)]
]
C=diag(lambda0,lambda1)
R0 = (x0;y0)
is
log(y)=-0.5*(A*x**2+2*B*x*y+C*y**2+K+E*x+F*y)
the fwhm of two axis comes from eigenvectors of matrix
AA=[[A,B],[B,C]]
the center x0,y0 from
(x0;y0)=inv(AA)*(E;F)
the zero point
"""
class MapProfile :
""" Handles the profile of a GRD Map """
def __init__(self,psi_deg_array,radius_array) :
"""MPF=MapProfile(psi_deg_array,radius_array)
psi_deg_array=list of angles along wich to compute profiles
radius_array=list of radii to sample the profiles
"""
import copy
from collections import OrderedDict
import numpy as np
self.M=OrderedDict()
self.psi_deg=copy.deepcopy(psi_deg_array)
self.psi=np.deg2rad(self.psi_deg)
self.radius=copy.deepcopy(radius_array)
self.M['_x']=self._template()
self.M['_y']=self._template()
self.M['_cos_psi']=self._template()
self.M['_sin_psi']=self._template()
for i in range(len(self.psi)) :
self.M['_x'][i]=self.radius*np.cos(self.psi[i])
self.M['_y'][i]=self.radius*np.sin(self.psi[i])
self.M['_cos_psi'][i]=np.cos(self.psi[i])
self.M['_sin_psi'][i]=np.sin(self.psi[i])
def _template(self,dtype='float'):
import numpy as np
return np.zeros([len(self.psi),len(self.radius)],dtype=dtype)
def __getitem__(self,this) :
try :
return self.M[this]
except :
return None
def __setitem__(self,this,that) :
self.M[this]=that
def keys() :
return self.M.keys()
def fill(self,name,GRDMap,argument) :
"extracts profiles of a map of given argument along a list of directions"
self.M[name]=self._template()
for ipsi in range(len(self.psi)) :
self.M[name][ipsi]=GRD.bilinearXY(argument,self.M['_x'][ipsi],self.M['_y'][ipsi])
def fwhm(self,name,returnItp=False,threshold=0.5,returnStats=True) :
"""extracts the fwhm (beware it assumes profiles can be sorted)
if returnItp=True (default False) returns also the value of the profile at the threshold point
if returnStats=True (default False) returns statistics as:
min, max, sqrt(min*max), sqrt(max/min),mean,rotation,amplitude
"""
import numpy as np
pp=np.zeros(len(self.psi))
tt=np.zeros(len(self.psi))
for ipsi in range(len(self.psi)) :
yv=self[name][ipsi]
idx=np.argsort(yv)
yv=self[name][ipsi][idx]
xv=self.radius[idx]
pp[ipsi]=np.interp(threshold,yv,xv)
tt[ipsi]=np.interp(pp[ipsi],self.radius,self[name][ipsi])
pp=2*pp
if returnStats :
Min=pp.min()
Max=pp.max()
A=(np.cos(self.psi)*(pp-pp.mean())).sum()
B=(np.sin(self.psi)*(pp-pp.mean())).sum()
return [Min,Max,(Min*Max)**0.5,(Max/Min)**0.5,pp.mean(),np.rad2deg(np.arctan2(A,B)),(A**2+B**2)**0.5]
if returnItp :
return pp,tt
return pp
class NoCentNoZer :
def __init__(self,U,V,Y,Yth=None,doNotScaleAxis=True) :
import numpy as np
import numpy.linalg as linalg
if Yth == None :
self.YTh = 10**(-0.3)
else :
self.YTh = Yth*1
self.YTh
self.peak=Y.max()
lmap=Y/self.peak
lmap.shape=lmap.size
idx = np.where(lmap>=self.YTh)[0]
lmap = np.log(lmap[idx])
u=U*1
v=V*1
u.shape=u.size
v.shape=v.size
if doNotScaleAxis :
self.uscal=1.
self.vscal=1.
else :
self.uscal=u.max()-u.min()
self.vscal=v.max()-v.min()
u=u[idx]/self.uscal
v=v[idx]/self.vscal
self.n=len(idx)
self.S=np.zeros([3,3])
self.VV=np.zeros(3)
self.S[0,0]= 0.25*(u**4).sum()
self.S[0,1]= 0.5*((u**3)*v).sum()
self.S[0,2]= 0.25*(u**2*v**2).sum()
self.S[1,1]=(u**2*v**2).sum()
self.S[1,2]=0.5*(u*v**3).sum()
self.S[2,2]=0.25*(v**4).sum()
self.S[2,3]=-0.5*(v**2).sum()
for r in range(len(self.VV)) :
for c in range(len(self.VV)) :
if r > c :
self.S[r,c]=self.S[c,r]*1
self.VV[0] = -0.5*(lmap*u**2).sum()
self.VV[1] = -(lmap*v*u).sum()
self.VV[2] = -0.5*(lmap*v**2).sum()
self.pars=np.dot(linalg.inv(self.S),self.VV)
self.inv=linalg.inv(self.S)
self.det=linalg.det(self.S)
self.y=lmap
self.u=u
self.v=v
self.res = (self.pars[0]*self.u**2+2*self.pars[1]*self.u*self.v+self.pars[2]*self.v**2)
self.ksq = self.res**2
self.A=np.zeros([2,2])
self.A[0][0]=self.pars[0]/self.uscal**2
self.A[0][1]=self.pars[1]/self.uscal/self.vscal
self.A[1][0]=self.pars[1]/self.uscal/self.vscal
self.A[1][1]=self.pars[2]/self.vscal**2
self.heighen_val,hv=linalg.eigh(self.A)
self.semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.)-self.pars[3])/self.heighen_val)*180./np.pi
self.fwhm=(self.semiaxis_fwhm[0]*self.semiaxis_fwhm[1])**0.5
self.ellipticity=self.semiaxis_fwhm.max()/self.semiaxis_fwhm.min()
self.rot=np.transpose(hv/linalg.det(hv))
self.psi_ell=np.arctan2(self.rot[0][1],self.rot[0][0])*180./np.pi
self.gauss_peak=self.peak
def mdl(self,U,V) :
acc = self.pars[0]*(U/self.uscal)**2
acc += 2*self.pars[1]*self.pars[2]*(U/self.uscal)*(V/self.vscal)
acc += self.pars[2]*(V/self.vscal)**2
return -0.5*acc+self.pars[3]
class NoCent :
def __init__(self,U,V,Y,YTh=None,doNotScaleAxis=True,allowed_radius_deg=None) :
import numpy as np
import numpy.linalg as linalg
if YTh == None :
self.YTh = 1e-3
else :
self.YTh = YTh*1
self.peak=Y.max()
lmap=Y/self.peak
lmap.shape=lmap.size
#
radius=np.rad2deg((U**2+V**2)**0.5)
radius.shape=radius.size
if allowed_radius_deg == None :
idx = np.where(lmap>=self.YTh)[0]
print "Select by YTH",len(idx),lmap[idx].min(),lmap.max()
self.allowed_radius=radius[idx].max()
else :
idx = np.where(radius<=allowed_radius_deg)[0]
self.allowed_radius=allowed_radius_deg
self.YTh=lmap[idx].min()
#
lmap = np.log(lmap[idx])
u=U*1
v=V*1
u.shape=u.size
v.shape=v.size
if doNotScaleAxis :
self.uscal=1.
self.vscal=1.
else :
self.uscal=u.max()-u.min()
self.vscal=v.max()-v.min()
u=u[idx]/self.uscal
v=v[idx]/self.uscal
self.n=len(idx)
self.N=len(idx)
self.S=np.zeros([4,4])
self.VV=np.zeros(4)
self.S[0,0]= 0.25*(u**4).sum()
self.S[0,1]= 0.5*((u**3)*v).sum()
self.S[0,2]= 0.25*(u**2*v**2).sum()
self.S[0,3]= -0.5*(u**2).sum()
self.S[1,1]=(u**2*v**2).sum()
self.S[1,2]=0.5*(u*v**3).sum()
self.S[1,3]=-(u*v).sum()
self.S[2,2]=0.25*(v**4).sum()
self.S[2,3]=-0.5*(v**2).sum()
self.S[3,3]=float(len(idx))
for r in range(len(self.VV)) :
for c in range(len(self.VV)) :
if r > c :
self.S[r,c]=self.S[c,r]*1
self.VV[0] = -0.5*(lmap*u**2).sum()
self.VV[1] = -(lmap*v*u).sum()
self.VV[2] = -0.5*(lmap*v**2).sum()
self.VV[3] = (lmap).sum()
self.pars=np.dot(linalg.inv(self.S),self.VV)
self.inv=linalg.inv(self.S)
self.det=linalg.det(self.S)
self.scaled_data=lmap
self.u=u
self.v=v
self.bf_model=-0.5*(self.pars[0]*self.u**2+2*self.pars[1]*self.u*self.v+self.pars[2]*self.v**2)+self.pars[3]
self.res = np.exp(self.bf_model)-np.exp(self.scaled_data)
self.ksq = (self.res**2).sum()
self.A=np.zeros([2,2])
self.A[0][0]=self.pars[0]/self.uscal**2
self.A[0][1]=self.pars[1]/self.uscal/self.vscal
self.A[1][0]=self.pars[1]/self.uscal/self.vscal
self.A[1][1]=self.pars[2]/self.vscal**2
#removes the regularizzation
self.Pars={}
self.Pars['A']=self.pars[0]/self.uscal**2
self.Pars['B']=self.pars[1]/self.uscal/self.vscal
self.Pars['C']=self.pars[2]/self.vscal**2
self.Pars['D']=np.nan
self.Pars['E']=np.nan
self.Pars['F']=self.pars[3]
#
self.R0=np.zeros(2)
self.heighen_val,hv=linalg.eigh(self.A)
self.semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.)-self.pars[3])/self.heighen_val)*180./np.pi
self.fwhm_min=self.semiaxis_fwhm.min()
self.fwhm_max=self.semiaxis_fwhm.max()
self.fwhm=(self.semiaxis_fwhm.prod())**0.5
self.ellipticity=self.fwhm_max/self.fwhm_min
self.rot=np.transpose(hv/linalg.det(hv))
self.psi_ell=np.arctan2(self.rot[0][1],self.rot[0][0])*180./np.pi
self.zero=self.pars[3]*1
self.gauss_peak=self.peak*np.exp(self.zero)
self.DataTh=self.YTh
def mdl(self,U,V) :
acc = self.pars[0]*(U/self.uscal)**2
acc += 2*self.pars[1]*self.pars[2]*(U/self.uscal)*(V/self.vscal)
acc += self.pars[2]*(V/self.vscal)**2
return -0.5*acc+self.zero
def __str__(self) :
l=[]
l.append("N : "+str(self.n))
#l.append("allowed_radius : "+str(self.allowed_radius))
#l.append("xscal : "+str(self.xscal))
#l.append("yscal : "+str(self.yscal))
#l.append(" : ")
l.append("peak : "+str(self.gauss_peak))
l.append("fwhm : "+str(self.fwhm))
#l.append("fwhm_min : "+str(self.fwhm_min))
#l.append("fwhm_max : "+str(self.fwhm_max))
l.append("ellipticity :"+str(self.ellipticity))
l.append("psi_ell :"+str(self.psi_ell))
return "\n".join(l)
class NoBackground_Deprecated :
def __init__(self,U,V,Y,Yth=None) :
import numpy as np
import numpy.linalg as linalg
if Yth == None :
self.YTh = 10**(-0.3)
else :
self.YTh = Yth*1
self.YTh
self.peak=Y.max()
lmap=Y/self.peak
lmap.shape=lmap.size
idx = np.where(lmap>=self.YTh)[0]
lmap = np.log(lmap[idx])
u=U*1
v=V*1
u.shape=u.size
v.shape=v.size
self.uscal=u.max()-u.min()
self.vscal=v.max()-v.min()
u=u[idx]/self.uscal
v=v[idx]/self.uscal
self.n=len(idx)
self.S=np.zeros([6,6])
self.VV=np.zeros(6)
self.S[0,0]= 0.25*(u**4).sum()
self.S[0,1]= 0.5*((u**3)*v).sum()
self.S[0,2]= 0.25*(u**2*v**2).sum()
self.S[0,3]= -0.5*(u**2).sum()
self.S[0,4]= -0.5*(u**3).sum()
self.S[0,5]= -0.5*(v*u**2).sum()
self.S[1,1]=(u**2*v**2).sum()
self.S[1,2]=0.5*(u*v**3).sum()
self.S[1,3]=-(u*v).sum()
self.S[1,4]= -(v*u**2).sum()
self.S[1,5]= -(u*v**2).sum()
self.S[2,2]=0.25*(v**4).sum()
self.S[2,3]=-0.5*(v**2).sum()
self.S[2,4]= -0.5*(u*v**2).sum()
self.S[2,5]= -0.5*(v**4).sum()
self.S[3,3]=float(len(idx))
self.S[3,4]= -0.5*(u).sum()
self.S[3,5]= -0.5*(v).sum()
self.S[4,4]= -0.5*(u**2).sum()
self.S[4,5]= -0.5*(u*v).sum()
self.S[5,5]= -0.5*(v**2).sum()
for r in range(len(self.VV)) :
for c in range(len(self.VV)) :
if r > c :
self.S[r,c]=self.S[c,r]*1
self.VV[0] = -0.5*(lmap*u**2).sum()
self.VV[1] = -(lmap*v*u).sum()
self.VV[2] = -0.5*(lmap*v**2).sum()
self.VV[3] = (lmap).sum()
self.VV[4] = -0.5*(lmap*u).sum()
self.VV[5] = -0.5*(lmap*v).sum()
self.pars=np.dot(linalg.inv(self.S),self.VV)
self.inv=linalg.inv(self.S)
self.det=linalg.det(self.S)
self.y=lmap
self.u=u
self.v=v
self.res = (self.pars[0]*self.u**2+2*self.pars[1]*self.u*self.v+self.pars[2]*self.v**2+self.pars[3])
self.ksq = self.res**2
self.A=np.zeros([2,2])
self.A[0][0]=self.pars[0]/self.uscal**2
self.A[0][1]=self.pars[1]/self.uscal/self.vscal
self.A[1][0]=self.pars[1]/self.uscal/self.vscal
self.A[1][1]=self.pars[2]/self.vscal**2
self.Vde=np.zeros(2)
self.Vde[0]=self.pars[4]/self.uscal
self.Vde[1]=self.pars[5]/self.vscal
self.R0=np.arcsin(np.dot(linalg.inv(self.A),self.Vde))*180./np.pi*3600.
self.heighen_val,hv=linalg.eigh(self.A)
self.semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.)-self.pars[3])/self.heighen_val)*180./np.pi
self.fwhm=(self.semiaxis_fwhm[0]*self.semiaxis_fwhm[1])**0.5
self.ellipticity=self.semiaxis_fwhm.max()/self.semiaxis_fwhm.min()
self.rot=np.transpose(hv/linalg.det(hv))
self.psi_ell=np.arctan2(self.rot[0][1],self.rot[0][0])*180./np.pi
a=np.dot(linalg.inv(self.A),self.Vde)
self.zero=self.pars[3]+0.5*(self.A[0][0]*a[0]*a[0]+2.*a[0]*a[1]*self.A[0][1]+a[1]*a[1]*self.A[1][1])
self.gauss_peak=self.peak*np.exp(self.zero)
def mdl(self,U,V) :
acc = self.pars[0]*(U/self.uscal)**2
acc += 2*self.pars[1]*self.pars[2]*(U/self.uscal)*(V/self.vscal)
acc += self.pars[2]*(V/self.vscal)**2
return -0.5*acc+self.pars[3]
class Model :
def __init__(self,xmin,xmax,nx,ymin,ymax,ny) :
import numpy as np
a=np.linspace(xmin,xmax,nx)
self.dX=a[1]-a[0]
self.X=np.tile(np.linspace(xmin,xmax,nx),(ny,1))
self.Y=np.transpose(np.tile(np.linspace(ymin,ymax,ny),(nx,1)))
a=np.linspace(ymin,ymax,ny)
self.dY=a[1]-a[0]
self.R=None
self.D=None
self.fwhm = None
self.fwhm_min = None
self.fwhm_max = None
self.gauss_peak = None
self.ellipticity = None
self.psi_ell = None
self.peak = None
self.R = None
def __str__(self) :
if self.D==None :
return ''
l=[]
l.append("gauss_peak : "+str(self.gauss_peak))
l.append("fwhm : "+str(self.fwhm))
l.append("fwhm_min : "+str(self.fwhm_min))
l.append("fwhm_max : "+str(self.fwhm_max))
l.append("ellipticity :"+str(self.ellipticity))
l.append("psi_ell :"+str(self.psi_ell))
l.append("X0 :"+str(self.R0[0]))
l.append("Y0 :"+str(self.R0[1]))
return "\n".join(l)
def __call__(self,*arg,**karg) :
"""call(NoBackground_Base)
call(peak,x0,y0,psi_ell,fwhm,ellipticity,MinMax=False)
MinMax = False : fwhm=p1, ellipticity=p2, fwhm_min and fwhm_max are derived
MinMax = True : fwhm_min=p1, fwhm_max=p2, fwhm and ellipticity are derived
"""
import numpy as np
if len(arg) == 0 :
return
elif len(arg) == 1 :
try :
self.gauss_peak=arg[0].gauss_peak
self.R0=arg[0].R0
self.psi_ell=arg[0].psi_ell
self.fwhm=arg[0].fwhm
self.fwhm_min=arg[0].fwhm_min
self.fwhm_max=arg[0].fwhm_max
self.ellipticity=arg[0].ellipticity
except :
return
else :
MinMax=False
try :
MinMax=karg['MinMax']==True
except :
MinMax=False
self.gauss_peak=float(arg[0])
self.R0=np.zeros(2)
self.R0[0]=float(arg[1])
self.R0[1]=float(arg[2])
self.psi_ell=float(arg[3])
if MinMax :
self.fwhm_min=float(arg[4])
self.fwhm_max=float(arg[5])
self.fwhm = (self.fwhm_min*self.fwhm_max)**0.5
self.ellipticity = self.fwhm_max/self.fwhm_min
else :
self.fwhm=float(arg[4])
self.ellipticity=float(arg[5])
self.fwhm_min=self.fwhm/self.ellipticity**0.5
self.fwhm_max=self.fwhm*self.ellipticity**0.5
self.mdl()
def mdl(self) :
import numpy as np
x=self.X-self.R0[0]
y=self.Y-self.R0[1]
self.R=(x**2+y**2)**0.5
cp=np.cos(self.psi_ell/180.*np.pi)
sp=np.sin(self.psi_ell/180.*np.pi)
u=(cp*x-sp*y)**2/self.fwhm_max**2
u+=(sp*x+cp*y)**2/self.fwhm_min**2
u*=-8.*np.log(2.)/2.
self.D=self.gauss_peak*np.exp(u)
def imshow(self) :
try :
from matplotlib import pyplot as plt
except :
return
import numpy as np
plt.imshow(self.D,origin='lower')
plt.colorbar()
class NoBackground_Base :
def __init__(self,*arg,**karg) :
"NoBackground_Base(X,Y,D,DataTh=-np.inf,AllowedRadius=np.inf,Weight=None)"
import numpy as np
import numpy.linalg as linalg
if len(arg) < 3 :
return
try :
doNotScaleAxis=float(karg['doNotScaleAxis'])
except :
doNotScaleAxis=True
try :
self.DataTh=float(karg['DataTh'])
except :
self.DataTh=-np.inf
try :
self.AllowedRadius=float(karg['AllowedRadius'])
except :
self.AllowedRadius = np.inf
self.peak=arg[2].max()
# data are regularized
lmap=arg[2]/self.peak
lmap.shape=lmap.size
x=arg[0]*1
y=arg[1]*1
radius=(x**2+y**2)**0.5
radius.shape=radius.size
idx = np.where((lmap>=self.DataTh)*(radius<=self.AllowedRadius))[0]
lmap = np.log(lmap[idx])
try :
self.Weight=karg['Weight']*1
self.Weight.shape=arg[2].size
self.Weight=self.Weight[idx]
self.Weight*=1/self.Weight.sum()
except :
self.Weight=1
self.in_shape=arg[2].shape
self.in_size=arg[2].size
self.logdata_min=lmap.min()
self.tagged=np.zeros(arg[2].shape,dtype='int')
self.tagged.shape=arg[2].size
self.tagged[idx]=1
self.tagged.shape=arg[2].shape
x.shape=x.size
y.shape=y.size
if doNotScaleAxis :
self.xscal=1.
self.yscal=1.
else :
self.xscal=x.max()-x.min()
self.yscal=y.max()-y.min()
x=x[idx]/self.xscal
y=y[idx]/self.yscal
self.N=len(idx)
self.S=np.zeros([6,6])
self.VV=np.zeros(6)
#unknown are A,B,C,D,E,F
#
self.S[0,0]= 0.25*(self.Weight*x**4).sum()
self.S[0,1]= 0.5*((self.Weight*x**3)*y).sum()
self.S[0,2]= 0.25*(self.Weight*x**2*y**2).sum()
self.S[0,3]= 0.25*(self.Weight*x**3).sum()
self.S[0,4]= 0.25*(self.Weight*x**2*y).sum()
self.S[0,5]= -0.5*(self.Weight*x**2).sum()
#
self.S[1,1]= (self.Weight*x**2*y**2).sum()
self.S[1,2]= 0.5*(self.Weight*x*y**3).sum()
self.S[1,3]= 0.5*(self.Weight*x**2*y).sum()
self.S[1,4]= 0.5*(self.Weight*x*y**2).sum()
self.S[1,5]= -(self.Weight*x*y).sum()
#
self.S[2,2]= 0.25*(self.Weight*y**4).sum()
self.S[2,3]= 0.25*(self.Weight*x*y**2).sum()
self.S[2,4]= 0.25*(self.Weight*y**3).sum()
self.S[2,5]= -0.5*(self.Weight*y**2).sum()
#
self.S[3,3]= 0.25*(self.Weight*x**2).sum()
self.S[3,4]= 0.25*(self.Weight*x*y).sum()
self.S[3,5]= -0.5*((self.Weight*x).sum()).min()
#
self.S[4,4]= 0.25*(self.Weight*y**2).sum()
self.S[4,5]= -0.5*(self.Weight*y.sum()).min()
#
self.S[5,5]= float(len(idx))
for r in range(len(self.VV)) :
for c in range(len(self.VV)) :
if r > c :
self.S[r,c]=self.S[c,r]*1
self.VV[0] = -0.5*(self.Weight*lmap*x**2).sum()
self.VV[1] = -(self.Weight*lmap*x*y).sum()
self.VV[2] = -0.5*(self.Weight*lmap*y**2).sum()
self.VV[3] = -0.5*(self.Weight*lmap*x).sum()
self.VV[4] = -0.5*(self.Weight*lmap*y).sum()
self.VV[5] = ((self.Weight*lmap).sum()).min()
#
self.inv=linalg.inv(self.S)
self.det=linalg.det(self.S)
self.pars=np.dot(linalg.inv(self.S),self.VV)
self.ld=lmap
self.x=x
self.y=y
self.res = self.mdl(x,y)-lmap
self.ksq_log = (self.res**2).sum()
#removes the regularizzation
self.Pars={}
self.Pars['A']=self.pars[0]/self.xscal**2
self.Pars['B']=self.pars[1]/self.xscal/self.yscal
self.Pars['C']=self.pars[2]/self.yscal**2
self.Pars['D']=self.pars[3]/self.xscal
self.Pars['E']=self.pars[4]/self.yscal
self.Pars['F']=self.pars[5]+np.log(self.peak)
# find the invC matrix
self.MinvC=np.zeros([2,2])
self.MinvC[0][0]=self.Pars['A']*1.
self.MinvC[0][1]=self.Pars['B']*1.
self.MinvC[1][0]=self.Pars['B']*1.
self.MinvC[1][1]=self.Pars['C']*1.
# find the V0 vector
self.V0=np.zeros(2)
self.V0[0]=self.Pars['D']*1.
self.V0[1]=self.Pars['E']*1.
# find the center
self.MC = np.zeros([2,2])
self.MC[0][0]=self.Pars['C']*1.
self.MC[0][1]=-self.Pars['B']*1.
self.MC[1][0]=-self.Pars['B']*1.
self.MC[1][1]=self.Pars['A']*1.
self.MC=self.MC/(self.Pars['A']*self.Pars['C']-self.Pars['B']**2)
self.R0=np.zeros(2)
self.R0[0]=self.Pars['C']*self.Pars['D']-self.Pars['B']*self.Pars['E']
self.R0[1]=-self.Pars['B']*self.Pars['D']+self.Pars['A']*self.Pars['E']
self.R0 = -0.5*self.R0/(self.Pars['A']*self.Pars['C']-self.Pars['B']**2)
# find the allowed radius
self.allowed_radius=(((self.x*self.xscal-self.R0[0])**2+(self.y*self.yscal-self.R0[1])**2)**0.5).max()
# find the eigenvalues and eighenvectors
self.heighen_val,self.heighen_vec=linalg.eigh(self.MinvC)
semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.))/self.heighen_val)
self.rot=np.transpose(self.heighen_vec/linalg.det(self.heighen_vec))
for i in range(2) : self.rot[i]*=-1 if self.rot[i][i] < 0 else 1
# extract the gaussian parameters
hv=self.heighen_vec
for i in range(2) : hv[i]*=-1 if hv[i][i] < 0 else 1
self.psi_ell=np.arctan2(hv[1][0],hv[0][0])*180./np.pi
self.fwhm_min=semiaxis_fwhm.min()
self.fwhm_max=semiaxis_fwhm.max()
self.fwhm=(self.fwhm_max*self.fwhm_min)**0.5
self.ellipticity=self.fwhm_max/self.fwhm_min
#self.zero=self.Pars['F']-0.5/4.*self.Pars['A']*self.Pars['D']**2-1./4.*self.Pars['B']*self.Pars['E']*self.Pars['D']-0.5/4.*self.Pars['C']*self.Pars['E']**2
self.zero=self.Pars['F']+0.5*(self.Pars['A']*self.R0[0]**2+2*self.Pars['B']*self.R0[0]*self.R0[1]+self.Pars['C']*self.R0[1]**2)
self.gauss_at_center=np.exp(self.zero)
self.gauss_peak=np.exp(self.zero)
self.gauss_ksq=((np.exp(self.res)-self.peak*np.exp(lmap))**2).sum()
def mdl(self,x,y) :
acc = self.pars[0]*x**2
acc += 2.*self.pars[1]*x*y
acc += self.pars[2]*y**2
acc += self.pars[3]*x
acc += self.pars[4]*y
acc *= -0.5
acc += self.pars[5]
return acc
def test_map(self,X,Y,X0,Y0,fwhm_min,fwhm_max,psi_ell,peak) :
import numpy as np
cp=np.cos(psi_ell/180.*np.pi)
sp=np.sin(psi_ell/180.*np.pi)
u=(X-X0)*cp+(Y-Y0)*sp
v=-(X-X0)*sp+(Y-Y0)*cp
smin=fwhm_min/(2.*np.sqrt(2.*np.log(2.)))
smax=fwhm_max/(2.*np.sqrt(2.*np.log(2.)))
return peak*np.exp(-0.5*( (u/smax)**2 + (v/smin)**2))
def __str__(self) :
l=[]
l.append("in_shape : "+str(self.in_shape))
l.append("in_size : "+str(self.in_size))
l.append("DataTh : "+str(self.DataTh))
l.append("AllowedRadius : "+str(self.AllowedRadius))
l.append("N : "+str(self.N))
l.append("allowed_radius : "+str(self.allowed_radius))
l.append("xscal : "+str(self.xscal))
l.append("yscal : "+str(self.yscal))
l.append(" : ")
l.append("peak : "+str(self.gauss_peak))
l.append("fwhm : "+str(self.fwhm))
l.append("fwhm_min : "+str(self.fwhm_min))
l.append("fwhm_max : "+str(self.fwhm_max))
l.append("ellipticity :"+str(self.ellipticity))
l.append("psi_ell :"+str(self.psi_ell))
l.append("X0 :"+str(self.R0[0]))
l.append("Y0 :"+str(self.R0[1]))
return "\n".join(l)
class NoBackground(NoBackground_Base) :
def __init__(self,X,Y,D,DataTh=None,AllowedRadius=None,Weight=None,doNotScaleAxis=True) :
import numpy as np
NoBackground_Base.__init__(self,X,Y,D,DataTh=DataTh,AllowedRadius=AllowedRadius,Weight=Weight,doNotScaleAxis=doNotScaleAxis)
self.R0 = np.arcsin(self.R0)*180./np.pi
self.fwhm = self.fwhm*180./np.pi
self.fwhm_min = self.fwhm_min*180./np.pi
self.fwhm_max = self.fwhm_max*180./np.pi
self.xscal = np.arcsin(self.xscal)*180./np.pi
self.yscal = np.arcsin(self.yscal)*180./np.pi
self.allowed_radius= self.allowed_radius
class gaussCanonicalForm_NoCent :
"""used to convert gauss from Closed Form without Center:
D=0, E=0
"""
def __init__(self,GaussClosedForm) :
import numpy as np
#find the background
self.background=GaussClosedForm.b*1
# find the invC matrix
self.MinvC=np.zeros([2,2])
self.MinvC[0][0]=GaussClosedForm.A*1.
self.MinvC[0][1]=GaussClosedForm.B*1.
self.MinvC[1][0]=GaussClosedForm.B*1.
self.MinvC[1][1]=GaussClosedForm.C*1.
# find the center
self.MC = np.zeros([2,2])
self.MC[0][0]=GaussClosedForm.C*1.
self.MC[0][1]=-GaussClosedForm.B*1.
self.MC[1][0]=-GaussClosedForm.B*1.
self.MC[1][1]=GaussClosedForm.A*1.
self.MC=self.MC/(GaussClosedForm.A*GaussClosedForm.C-GaussClosedForm.B**2)
self.R0=np.zeros(2)
# find the allowed radius
self.allowed_radius=(((self.x*self.xscal-self.R0[0])**2+(self.y*self.yscal-self.R0[1])**2)**0.5).max()
# find the eigenvalues and eighenvectors
self.heighen_val,self.heighen_vec=linalg.eigh(self.MinvC)
semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.))/self.heighen_val)
self.rot=np.transpose(self.heighen_vec/linalg.det(self.heighen_vec))
for i in range(2) : self.rot[i]*=-1 if self.rot[i][i] < 0 else 1
# extract the gaussian parameters
hv=self.heighen_vec
for i in range(2) : hv[i]*=-1 if hv[i][i] < 0 else 1
self.psi_ell=np.arctan2(hv[1][0],hv[0][0])*180./np.pi
self.fwhm_min=semiaxis_fwhm.min()
self.fwhm_max=semiaxis_fwhm.max()
self.fwhm=(self.fwhm_max*self.fwhm_min)**0.5
self.ellipticity=self.fwhm_max/self.fwhm_min
self.zero=GaussClosedForm.F
self.gauss_peak=np.exp(self.zero)
def csv(self,header=False,fsept=', ',fmt='%20.18e') :
"returns a csv table line with the essential information, X0 and Y0 are forced to be 0"
if header :
return fsept.join(['peak','X0','Y0','fwhm','ellipticity','psi_ell','background'])
return fsept.join([fmt%self.gauss_peak,fmt%0,fmt%0,fmt%self.fwhm,fmt%self.ellipticity,fmt%self.psi_ell,fmt%self.background])
class gaussCanonicalForm :
"""used to convert gauss from Closed Form"""
def __init__(self,GaussClosedForm) :
import numpy as np
#find the background
self.background=GaussClosedForm.b*1
# find the invC matrix
self.MinvC=np.zeros([2,2])
self.MinvC[0][0]=GaussClosedForm.A*1.
self.MinvC[0][1]=GaussClosedForm.B*1.
self.MinvC[1][0]=GaussClosedForm.B*1.
self.MinvC[1][1]=GaussClosedForm.C*1.
# find the V0 vector
self.V0=np.zeros(2)
self.V0[0]=GaussClosedForm.D*1.
self.V0[1]=GaussClosedForm.E*1.
# find the center
self.MC = np.zeros([2,2])
self.MC[0][0]=GaussClosedForm.C*1.
self.MC[0][1]=-GaussClosedForm.B*1.
self.MC[1][0]=-GaussClosedForm.B*1.
self.MC[1][1]=GaussClosedForm.A*1.
self.MC=self.MC/(GaussClosedForm.A*GaussClosedForm.C-GaussClosedForm.B**2)
self.R0=np.zeros(2)
self.R0[0]=GaussClosedForm.C*GaussClosedForm.D-GaussClosedForm.B*GaussClosedForm.E
self.R0[1]=-GaussClosedForm.B*GaussClosedForm.D+GaussClosedForm.A*GaussClosedForm.E
self.R0 = -0.5*self.R0/(GaussClosedForm.A*GaussClosedForm.C-GaussClosedForm.B**2)
# find the allowed radius
self.allowed_radius=(((self.x*self.xscal-self.R0[0])**2+(self.y*self.yscal-self.R0[1])**2)**0.5).max()
# find the eigenvalues and eighenvectors
self.heighen_val,self.heighen_vec=linalg.eigh(self.MinvC)
semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.))/self.heighen_val)
self.rot=np.transpose(self.heighen_vec/linalg.det(self.heighen_vec))
for i in range(2) : self.rot[i]*=-1 if self.rot[i][i] < 0 else 1
# extract the gaussian parameters
hv=self.heighen_vec
for i in range(2) : hv[i]*=-1 if hv[i][i] < 0 else 1
self.psi_ell=np.arctan2(hv[1][0],hv[0][0])*180./np.pi
self.fwhm_min=semiaxis_fwhm.min()
self.fwhm_max=semiaxis_fwhm.max()
self.fwhm=(self.fwhm_max*self.fwhm_min)**0.5
self.ellipticity=self.fwhm_max/self.fwhm_min
self.zero=GaussClosedForm.F+0.5*(GaussClosedForm.A*self.R0[0]**2+2*GaussClosedForm.B*self.R0[0]*self.R0[1]+GaussClosedForm.C*self.R0[1]**2)
self.gauss_peak=np.exp(self.zero)
def csv(self,header=False,fsept=', ',fmt='%20.18e') :
"returns a csv table line with the essential information"
if header :
return fsept.join(['peak','X0','Y0','fwhm','ellipticity','psi_ell','background'])
return fsept.join([fmt%self.gauss_peak,fmt%self.R0[0],fmt%self.R0[1],fmt%self.fwhm,fmt%self.ellipticity,fmt%self.psi_ell,fmt%self.background])
class gaussClosedForm :
"""class to handle a gaussian curve in closed form
"""
def __init__(self,A,B,C,D,E,F,b) :
"defines a closed form gaussian for A,B,C,D,E,F,b"
self.A=A
self.B=B
self.C=C
self.D=D
self.E=E
self.F=F
self.b=b
def calc(self,X,Y):
"computes for X and Y"
import numpy as np
acc=self.A*X**2
acc+=self.B*X*Y
acc+=self.C*Y**2
acc+=self.D*X
acc+=self.E*Y
return np.exp(-0.5*acc+self.F)+self.b
def canonization(self) :
"convert closed form parameters to canonical form"
return gaussCanonicalForm(self)
def csv(self,header=False,fsept=', ',fmt='%20.18e') :
"returns a csv table line with the essential information"
if header :
return fsept.join(['A','B','C','D','E','F','b'])
return fsept.join([fmt%self.A,fmt%self.B,fmt%self.C,fmt%self.D,fmt%self.E,fmt%self.F,fmt%self.b])
def __call__(self,XY,A,B,C,D,E,F,b) :
"""call to perform fit with curve_fit
XY = array of X and Y"""
self.A=A
self.B=B
self.C=C
self.D=D
self.E=E
self.F=F
self.b=b
return self.calc(XY[0],XY[1])
class efficient_gaussClosedForm_for_fit :
"""class to compute 'efficiently' a gaussian distribution in closed form"""
def __init__(self,X,Y) :
"defines a closed form gaussian for A,B,C,D,E,F,b"
import copy
self.X=copy.deepcopy(X)
self.Y=copy.deepcopy(Y)
def __call__(self,A,B,C,D,E,F,b) :
"computes for X and Y"
import numpy as np
acc=A*self.X**2
acc+=B*self.X*self.Y
acc+=C*self.Y**2
acc+=D*self.X
acc+=E*self.Y
return np.exp(-0.5*acc+F)+b
class efficient_chisq_gaussClosedForm_for_fit :
"""class to compute 'efficiently' a chisq for a given gaussian distribution in closed form"""
def __init__(self,X,Y,Data) :
"defines a closed form gaussian for A,B,C,D,E,F,b"
import copy
self.X=copy.deepcopy(X)
self.Y=copy.deepcopy(Y)
self.Data=copy.deepcopy(Data)
def gauss(self,A,B,C,D,E,F,b) :
"computes for X and Y"
import numpy as np
acc=A*self.X**2
acc+=B*self.X*self.Y
acc+=C*self.Y**2
acc+=D*self.X
acc+=E*self.Y
return np.exp(-0.5*acc+F)+b
def residual(self,A,B,C,D,E,F,b) :
return (self.gauss(A,B,C,D,E,F,b)-self.Data)
def __call__(self,A,B,C,D,E,F,b) :
return (self.residual(A,B,C,D,E,F,b)**2).sum()
#class super_efficient_gaussClosedForm_forfit :
#def __init__(self,X,Y,V) :
#"defines a closed form gaussian for A,B,C,D,E,F,b"
#self.X=X
#self.Y=Y
#self.V=V
#def __call__(self,A,B,C,D,E,F,b) :
#"computes for X and Y"
#import numpy as np
#acc=A*self.X**2
#acc+=B*self.X*self.Y
#acc+=C*self.Y**2
#acc+=D*self.X
#acc+=E*self.Y
#return ((np.exp(-0.5*acc+F)+b-V)**2).sum()
if __name__=='__main__' :
def TestOut(title,m1,GF,latex=False) :
if latex :
fmt = '{\\bf %11s} & %13e & %13e & %13e\\\\'
print '\\begin{tabular}{lccc}\n\\hline\\hline\n\\multicolumn{4}{c}{%s}\\\\\n\hline'%title
print "\\hline\n&\\multicolumn{1}{c}{{\\bf Input}}&\\multicolumn{1}{c}{{\\bf Fit}}&\\multicolumn{1}{c}{{\\bf Residual}}\\\\ \n \hline"
else :
fmt = '%11s : %13e %13e %13e'
print title
for k in ['gauss_peak','fwhm','fwhm_min','fwhm_max','ellipticity','psi_ell','R0'] :
name=k
if latex :
name='\\_'.join(name.split('_'))
if k=='R0' :
print fmt%('X0',m1.__dict__[k][0],GF.__dict__[k][0],GF.__dict__[k][0]-m1.__dict__[k][0])
print fmt%('Y0',m1.__dict__[k][1],GF.__dict__[k][1],GF.__dict__[k][1]-m1.__dict__[k][1])
else :
print fmt%(name,m1.__dict__[k],GF.__dict__[k],GF.__dict__[k]-m1.__dict__[k])
if latex :
print '\\hline\\hline\n &&&\\\\ \n \\end{tabular}'
print
else :
print
print "\nA test\n"
latex=True
m1=Model(-1.5e-2,1.5e-2,301,-1.5e-2,1.5e-2,301)
pxl=m1.dX
m1(1.,0.,0.,0.,30.*pxl,1.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Simmetric, centered',m1,GF,latex=latex)
m1(1.,0,0.1*pxl,0.,30.*pxl,1.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Simmetric, North',m1,GF,latex=latex)
m1(1.,0.,-0.1*pxl,0.,30.*pxl,1.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Simmetric, West',m1,GF,latex=latex)
m1(1.,0,-0.1*pxl,0.,30.*pxl,1.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Simmetric, South',m1,GF,latex=latex)
m1(1.,0.1*pxl,0.,0.,30.*pxl,1.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Simmetric, East',m1,GF,latex=latex)
m1(1.,0.,0.,0.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, Center',m1,GF,latex=latex)
m1(1.,0.,0.1*pxl,0.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, North',m1,GF,latex=latex)
m1(1.,0.,-0.1*pxl,0.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, West',m1,GF,latex=latex)
m1(1.,0,-0.1*pxl,0.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, South',m1,GF,latex=latex)
m1(1.,0.1*pxl,0,0.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, East',m1,GF,latex=latex)
m1(1.,0.*pxl,0*pxl,45.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, Center, Rotated 45 deg',m1,GF,latex=latex)
m1(1.,0.*pxl,0*pxl,90.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, Center, Rotated 90 deg',m1,GF,latex=latex)
m1(1.,0.*pxl,0*pxl,-45.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, Center, Rotated -45 deg',m1,GF,latex=latex)
m1(1.,0.*pxl,0*pxl,-89.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, Center, Rotated -89 deg',m1,GF,latex=latex)
m1(1.,0.1*pxl,-0.1*pxl,-45.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, South East, Rotated -45 deg',m1,GF,latex=latex)
"""
Example of chisq fitting using iminuit
from numpy import *
from iminuit.util import make_func_code, describe
import iminuit
x=zeros([601,601]) ; y=zeros([601,601])
for k in range(601) : x[:,k]=float(k-300)
for k in range(601) : y[k,:]=float(k-300)
x.shape=x.size;y.shape=y.size;GG=GaussFit.efficient_gaussClosedForm_for_fit(x,y);KSQ=GaussFit.efficient_chisq_gaussClosedForm_for_fit(x,y,GG(0.001,0.,0.002,0.,0.,0.,1.)+randn(x.size)*0.1)
mKSQ=iminuit.Minuit(KSQ,D=0,E=0,fix_D=True,fix_E=True,A=0.005,B=0.005,C=0.005,F=0.,b=0.9)
mKSQ.migrad()
"""
|
MicheleMaris/GaussFit
|
__init__.py
|
Python
|
gpl-2.0
| 36,001
|
[
"Gaussian"
] |
b572855b86362e633cf94451a2d71663ee052d31fb1b355bc93c020a1b5b14ed
|
import io
import time
import click
import pysam
import logging
logger = logging.getLogger(__name__)
@click.command()
@click.argument('inbam', type=click.Path(exists=True))
@click.argument('fastq', type=click.Path())
@click.option('--mq-threshold', type=click.IntRange(min=0, max=255), help='Skip reads below this threshold', default=0)
@click.option('-v', count=True, help='Verbosity level')
@click.option('-p', is_flag=True, help='Show progress bar')
def cli(inbam, fastq, mq_threshold, v, p):
"""This consumes a BAM, treats it as a truth dataset and writes out all the mapped reads."""
level = logging.DEBUG if v > 0 else logging.WARNING
logging.basicConfig(level=level)
bam_in_fp = pysam.AlignmentFile(inbam, 'rb')
total_read_count = bam_in_fp.mapped + bam_in_fp.unmapped # Sadly, this is only approximate
progress_bar_update_interval = int(0.01 * total_read_count)
cnt, rs = 0, 0
t0 = time.time()
with click.progressbar(length=total_read_count, label='Processing BAM',
file=None if p else io.BytesIO()) as bar, open(fastq, 'w') as fastq_out_fp:
for cnt, rs in process_file(bam_in_fp=bam_in_fp, fastq_out_fp=fastq_out_fp,
mq_threshold=mq_threshold,
progress_bar_update_interval=progress_bar_update_interval):
bar.update(progress_bar_update_interval)
t1 = time.time()
logger.debug('Analyzed {:d} reads in {:2.2f}s. Wrote out {:d} templates'.format(cnt, t1 - t0, rs))
def process_file(bam_in_fp, fastq_out_fp, mq_threshold, progress_bar_update_interval=100):
"""Main processing function that goes through the bam file, analyzing read alignment and writing out
:param bam_in_fp: Pointer to original BAM
:param fastq_out_fp: Pointer to file to be written
:param mq_threshold: Pointer to PERBAM being created
:param progress_bar_update_interval: how many reads to process before yielding (to update progress bar as needed)
:return: number of reads processed
"""
n0 = progress_bar_update_interval
read_cache = {} # dictionary with qname as key and read as value
read_serial = 0
for tot_read_cnt, read in enumerate(bam_in_fp):
n0 -= 1
if n0 == 0:
yield tot_read_cnt, read_serial
n0 = progress_bar_update_interval
if read.is_unmapped or read.mate_is_unmapped:
continue
if read.reference_id != read.next_reference_id: # Mates are in different chroms, don't want that
continue
if read.is_paired:
if read.qname in read_cache: # Yay we found the mate
read2 = read_cache.pop(read.qname)
if read.mapping_quality >= mq_threshold and read2.mapping_quality >= mq_threshold:
read_serial = flush_reads([read, read2], fastq_out_fp, read_serial)
else:
read_cache[read.qname] = read
else:
if read.mapping_quality >= mq_threshold:
read_serial = flush_reads([read], fastq_out_fp, read_serial)
yield tot_read_cnt + 1, read_serial # tot_read_cnt starts from 0 actually ...
def flush_reads(read_l, fastq_out_fp, read_serial):
"""Write out reads to a fastq file.
qname is 'read_serial|chrom|copy|ro|pos|rlen|cigar|ro|pos|rlen|cigar'
:param read_l:
:param fastq_out_fp:
:param read_serial:
:return: read_serial, updated
"""
qname = '|'.join([str(read_serial), str(read_l[0].reference_id + 1), '0'])
for ro, r in enumerate(read_l):
qname += '|'.join([str(ro), str(r.pos), str(r.query_length), r.cigarstring])
for n, r in enumerate(read_l):
fastq_out_fp.write('@' + qname + ('/' + str(n + 1) if len(read_l) > 0 else '') + '\n'
+ r.query_sequence + '\n+\n'
+ '~' * len(r.query_sequence) + '\n')
return read_serial + 1
|
latticelabs/Mitty
|
mitty/benchmarking/convert_bam_to_truth_fastq.py
|
Python
|
gpl-2.0
| 3,742
|
[
"pysam"
] |
aa3bea74a84a0901344a83c653b1316d213284b00827cfd3bd64d01815156899
|
from api.models import Photo, Face, AlbumDate, Person
from django.db.models import Prefetch
from api.serializers_serpy import AlbumDateListWithPhotoHashSerializer as AlbumDateListWithPhotoHashSerializerSerpy
from api.serializers import AlbumDateListWithPhotoHashSerializer as AlbumDateListWithPhotoHashSerializer
import base64
import requests
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from seaborn import color_palette
import itertools
from datetime import datetime
import face_recognition
from tqdm import tqdm
from sklearn import mixture
from scipy import linalg
from scipy.cluster.hierarchy import dendrogram, linkage
import matplotlib as mpl
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, AgglomerativeClustering, DBSCAN
from sklearn.cluster import MeanShift, estimate_bandwidth
def get_or_create_person(name):
qs = Person.objects.filter(name=name)
if qs.count() > 0:
return qs[0]
else:
new_person = Person()
new_person.name = name
new_person.save()
return new_person
def get_face_encoding(face):
return np.frombuffer(bytes.fromhex(face.encoding))
def nuke_people():
for person in Person.objects.filter(name__startswith='Person'):
person.delete()
faces = list(Face.objects.all())
face_encodings = np.array([np.frombuffer(bytes.fromhex(f.encoding)) for f in faces])
num_groups = []
for _ in tqdm(range(50)):
groups = []
np.random.shuffle(faces)
for face in faces:
if len(groups) == 0:
groups.append([face])
else:
group_this_face_belongs_to = None
encoding_face_curr = get_face_encoding(face)
for group_idx, group in enumerate(groups):
face_group_repr = group[0]
encoding_face_group_repr = get_face_encoding(face_group_repr)
# encoding_face_group_repr = np.array([get_face_encoding(f) for f in group]).mean(0)
if face_recognition.compare_faces([encoding_face_group_repr], encoding_face_curr, tolerance=0.65)[0]:
group_this_face_belongs_to = group_idx
if group_this_face_belongs_to:
groups[group_this_face_belongs_to].append(face)
else:
groups.append([face])
num_groups.append(len(groups))
num_people = int(np.mean(num_groups))
if False:
faces = Face.objects.all()
face_encodings = np.array([np.frombuffer(bytes.fromhex(f.encoding)) for f in faces])
# Linkage clustering
Z = AgglomerativeClustering(linkage='ward', n_clusters=num_people)
labels = Z.fit_predict(face_encodings)
for face,label in zip(faces,labels):
person = get_or_create_person(name="Person %d"%label)
face.person = person
face.save()
# Z = linkage(face_encodings, 'ward')
# fig = plt.figure(figsize=(25, 10))
# dn = dendrogram(Z)
#mean-shift
if True:
nuke_people()
faces = list(Face.objects.all())
face_encodings = np.array([np.frombuffer(bytes.fromhex(f.encoding)) for f in faces])
X = StandardScaler().fit_transform(face_encodings)
bandwidth = estimate_bandwidth(X, quantile=0.1, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
#DBSCAN
if False:
nuke_people()
faces = list(Face.objects.all())
face_encodings = np.array([np.frombuffer(bytes.fromhex(f.encoding)) for f in faces])
X = StandardScaler().fit_transform(face_encodings)
# #############################################################################
# Compute DBSCAN
db = DBSCAN(eps=5, min_samples=2).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
for label,face in zip(labels,faces):
person = get_or_create_person(name="Person %d"%label)
face.person = person
face.save()
# naive using pairwise distance threshold
if False:
groups = []
for face in faces:
if len(groups) == 0:
groups.append([face])
else:
group_this_face_belongs_to = None
encoding_face_curr = get_face_encoding(face)
for group_idx, group in enumerate(groups):
# face_group_repr = group[0]
encoding_face_group_repr = np.array([get_face_encoding(f) for f in group]).mean(0)
if face_recognition.compare_faces([encoding_face_group_repr], encoding_face_curr, tolerance=0.6)[0]:
group_this_face_belongs_to = group_idx
if group_this_face_belongs_to:
groups[group_this_face_belongs_to].append(face)
else:
groups.append([face])
for group_idx, group in enumerate(groups):
person = get_or_create_person(name="Person %d"%group_idx)
for face in group:
face.person = person
face.save()
# gaussian mixture model for face clustering / classification
# and using BIC to compute the optimal number of classes
if False:
X = face_encodings
X = preprocessing.normalize(face_encodings)
# # Number of samples per component
# n_samples = 500
# # Generate random sample, two components
# np.random.seed(0)
# C = np.array([[0., -0.1], [1.7, .4]])
# X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
# .7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = [num_people]
cv_types = ['full']
for cv_type in cv_types:
for n_components in tqdm(n_components_range):
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(color_palette('Paired',20).as_hex())
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, cov, color) in enumerate(zip(clf.means_, clf.covariances_,
color_iter)):
# v, w = linalg.eigh(cov)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
# angle = np.arctan2(w[0][1], w[0][0])
# angle = 180. * angle / np.pi # convert to degrees
# v = 2. * np.sqrt(2.) * np.sqrt(v)
# ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
# ell.set_clip_box(splot.bbox)
# ell.set_alpha(.5)
# splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
# face_embedded = TSNE(n_components=2,n_iter=100000,verbose=1,perplexity=50).fit_transform(face_encodings)
# plt.scatter(face_embedded[:,0],face_embedded[:,1],c=Y_)
# plt.show()
for cluster_id, face in zip(Y_,faces):
print(face,cluster_id)
person_name = 'Person %d'%cluster_id
person = get_or_create_person(person_name)
face.person = person
face.person_label_is_inferred = True
face.save()
# p = Photo.objects.first()
# image_path = p.image_path
# captions = {}
# with open(image_path, "rb") as image_file:
# encoded_string = base64.b64encode(image_file.read())
# encoded_string = str(encoded_string)[2:-1]
# resp_captions = requests.post('http://localhost:5001/longcaptions/',data=encoded_string)
# faces = Face.objects.all()
# face_encodings = [np.frombuffer(bytes.fromhex(f.encoding)) for f in faces]
# person_ids = [f.person.id for f in faces]
# palette = color_palette('Paired',max(person_ids)+1).as_hex()
# colors = [palette[i] for i in person_ids]
# face_embedded = TSNE(n_components=2,n_iter=100000,verbose=1,perplexity=50).fit_transform(face_encodings)
# plt.scatter(face_embedded[:,0],face_embedded[:,1],c=colors)
# plt.show()
# start = datetime.now()
# qs = AlbumDate.objects.all().order_by('date').prefetch_related(
# Prefetch('photos', queryset=Photo.objects.all().only('image_hash','exif_timestamp','favorited','hidden')))
# qs_res = list(qs)
# print('db query took %.2f seconds'%(datetime.now()-start).total_seconds())
# start = datetime.now()
# res = AlbumDateListWithPhotoHashSerializerSerpy(qs_res,many=True).data
# print('serpy serializing took %.2f seconds'%(datetime.now()-start).total_seconds())
# start = datetime.now()
# res = AlbumDateListWithPhotoHashSerializer(qs_res,many=True).data
# print('drf serializing took %.2f seconds'%(datetime.now()-start).total_seconds())
# SELECT ("api_albumdate_photos"."albumdate_id") AS "_prefetch_related_val_albumdate_id",
# "api_photo"."image_hash",
# "api_photo"."exif_timestamp",
# "api_photo"."favorited",
# "api_photo"."hidden"
# FROM "api_photo"
# INNER JOIN "api_albumdate_photos"
# ON ("api_photo"."image_hash" = "api_albumdate_photos"."photo_id");
|
hooram/ownphotos-backend
|
api/bench.py
|
Python
|
mit
| 10,303
|
[
"Gaussian"
] |
d6c000aebf47dd6032abc3cf93267dc3d68d7bdb4e8452c5ee14073f118b14b3
|
import os
import sys
import time
import glob
import shutil
import argparse
from mpi4py import MPI
class ParallelMDruns(object):
##TICA
def write_script(self, script, directory, rank, mdpfile, grofile, topfile, output_grofile, start_coord, num_coord, tprfile='topol.tpr', trrfile='traj.trr', edrfile='ener.edr', shebang='/bin/bash', ndxfile='', grompp_options='', mdrun_options=''):
##TICA
frame_designation_path = directory + '/' + 'frame_desig.txt'
with open(frame_designation_path, 'w') as fiile:
for i in xrange(num_coord):
this_coord = start_coord + i
fiile.write("%d\n" % this_coord)
##TICAA
file_path = directory + '/' + script
if not ndxfile:
ndxfile_option=''
else:
ndxfile_option='-n '+ndxfile
with open(file_path, 'w') as file:
script ="""#!%(shebang)s
# this script was generated automatically by thread %(rank)i
startgro=%(grofile)s
tmpstartgro=tmpstart.gro
outgro=%(output_grofile)s
natoms=$(sed -n '2p' $startgro)
nlines_per_frame=$((natoms+3))
nlines=`wc -l %(directory)s/$startgro| cut -d' ' -f1`
nframes=$((nlines/nlines_per_frame))
rm -rf $outgro
for idx in `seq 1 $nframes`; do
start=$(($nlines_per_frame*(idx-1)+1))
end=$(($nlines_per_frame*idx))
sed "$start"','"$end"'!d' $startgro > $tmpstartgro
# gromacs preprocessing & MD
grompp %(grompp_options)s -f %(mdpfile)s -c $tmpstartgro -p %(topfile)s %(ndxfile_option)s -o %(tprfile)s 1>/dev/null 2>/dev/null
mdrun -nt 1 %(mdrun_options)s -s %(tprfile)s -o %(trrfile)s -e %(edrfile)s 1>/dev/null 2>/dev/null
# store data
cat confout.gro >> $outgro
##TICA store trajectories
if [ -e traj.xtc ]; then
save_name=`sed "$idx"'q;d' frame_desig.txt`
mv traj.xtc ../../latest_frames/traj"$save_name".xtc
fi
##TICA
done
# remove temporary files
rm -f $tmpstartgro
""" % locals()
file.write(script)
def create_arg_parser(self):
parser = argparse.ArgumentParser(description="Run parallel mdrun..")
# required options
parser.add_argument("-f",
type=str,
dest="mdpfile",
required=True)
parser.add_argument("-c",
type=str,
dest="grofile",
required=True)
parser.add_argument("-p",
type=str,
dest="topfile",
required=True)
# other options
parser.add_argument("-n",
type=str,
dest="ndxfile")
parser.add_argument("-o",
type=str,
dest="output_file",
default="out.gro")
parser.add_argument("-a", "--afiles",
type=str,
nargs='*',
dest="afiles",
help="additional files that should be copied in the subdirectories")
parser.add_argument("-s", "--sfiles",
type=str,
nargs='*',
dest="sfiles",
help="additional files that should be split over the subdirectories")
parser.add_argument("--grompp_options",
type=str,
dest="grompp_options",
default="")
parser.add_argument("--mdrun_options",
type=str,
dest="mdrun_options",
default="")
parser.add_argument("-t",
action="store",
type=str,
dest="tmpdir")
return parser
def copy_additional_files(self, afiles, rundir):
# copy additional files
if isinstance(afiles, basestring):
afiles = [afiles]
for afile in afiles:
shutil.copy(afile, rundir + '/' + afile)
return
def split_additional_files(self, sfiles, rundirs, ncoords_per_thread, size):
if isinstance(sfiles, basestring):
sfiles = [sfiles]
for sfile in sfiles:
with open(sfile, 'r') as sf:
for idx in xrange(size):
sfile_thread = rundirs[idx] + '/' + 'start.gro'
with open(sfile_thread, 'w') as sf_t:
nlines_per_thread = ncoords_per_thread[idx]
for jdx in xrange(nlines_per_thread):
line = sf.readline()
if line:
line = line.replace("\n", "")
print >> sf_t, line
else:
break
return
def run(self):
#initialize mpi variables
comm = MPI.COMM_WORLD # MPI environment
size = comm.Get_size() # number of threads
rank = comm.Get_rank() # number of the current thread
parser = self.create_arg_parser()
args = parser.parse_args() # set argument parser
tcpu0 = time.time()
# preprocessing
curdir = os.getcwd()
if args.tmpdir is None:
args.tmpdir = curdir + '/tmp'
rundir = args.tmpdir + '/' + 'thread%i'%rank
if rank == 0:
print "Creating subdirectories %s/threadX (1<=X<=%i)..." %(args.tmpdir, size)
shutil.rmtree(rundir, ignore_errors=True)
os.makedirs(rundir)
comm.Barrier()
##TICA
if rank == 0:
tica_dir = curdir + '/' + 'latest_frames'
tica_backup = curdir + '/' + 'back_up_latest_frames'
shutil.rmtree(tica_backup, ignore_errors=True)
try:
shutil.copytree(tica_dir, tica_backup)
except:
print "Making latest_frames directory for first time"
shutil.rmtree(tica_dir, ignore_errors=True)
os.makedirs(tica_dir)
try:
shutil.copy("weight.w", tica_dir) #store the current input files
shutil.copy("input.gro", tica_dir)
except:
print "Could not locate input files"
##TICAA
if rank==0:
print "Preparing .gro files..."
rundirs=[rundir]
for idx in range(1,size):
rundirs.append(comm.recv(source=idx, tag=idx))
grofile = open(args.grofile, 'r')
grofile.next()
natoms = int(grofile.next())
for idx, line in enumerate(grofile):
pass
nlines = idx + 3
ncoords = nlines/(natoms+3)
grofile.close()
if ncoords < size:
raise ValueError("the number of runs should be greater or equal to the number of threads.")
ncoords_per_thread = [ncoords/size for _ in xrange(size)]
nextra_coords = ncoords%size
for idx in xrange(nextra_coords):
ncoords_per_thread[idx] += 1
##TICA
thread_start_coord = [0]
current_coord = 0
##TICAA
with open(args.grofile, 'r') as grofile:
for idx in xrange(size):
##TICA
current_coord += ncoords_per_thread[idx]
thread_start_coord.append(current_coord)
##TICAA
grofile_thread = rundirs[idx] + '/' + 'start.gro'
with open(grofile_thread, 'w') as grofile_t:
nlines_per_thread = ncoords_per_thread[idx]*(natoms+3)
for jdx in xrange(nlines_per_thread):
line = grofile.readline()
if line:
line = line.replace("\n", "")
print >> grofile_t, line
else:
break
if args.sfiles is not None:
self.split_additional_files(args.sfiles, rundirs, ncoords_per_thread, size)
##TICA
thread_start_coord = thread_start_coord[:-1]
thread_num_coord = ncoords_per_thread[:]
##TICAA
else:
comm.send(rundir, dest=0, tag=rank)
thread_num_coord = None
thread_start_coord = None
comm.Barrier()
##TICA
thread_num_coord = comm.scatter(thread_num_coord, root=0)
thread_start_coord = comm.scatter(thread_start_coord, root=0)
##TICAA
if rank==0:
print "copying .mdp and .top files..."
shutil.copy(args.mdpfile, rundir+ '/' + 'grompp.mdp')
shutil.copy(args.topfile, rundir + '/' + 'topol.top')
if args.ndxfile is not None:
if os.path.isfile(args.ndxfile):
shutil.copy(args.ndxfile, rundir + '/' + args.ndxfile) # copy ndxfile if given
else:
print "Warning: index file %s does not exist" %args.ndxfile
if args.afiles is not None:
self.copy_additional_files(args.afiles, rundir)
# copying .itp files supposing that those are located in the same directory as the .top file
topdir = os.path.split(args.topfile)[0]
for itpfile in glob.glob(topdir + '*.itp'):
shutil.copy(topdir + itpfile, rundir + '/' + itpfile) # copy .itp files
comm.Barrier()
if rank == 0:
tcpu1 = time.time()
print "Time used for preprocessing (parallelization): %.2fs" %(tcpu1 - tcpu0)
script = 'run.sh'
self.write_script(script, rundir, rank, 'grompp.mdp', 'start.gro', 'topol.top', 'out.gro', thread_start_coord, thread_num_coord,\
ndxfile=args.ndxfile, grompp_options=args.grompp_options, mdrun_options=args.mdrun_options) ##TICA
comm.Barrier()
if rank == 0:
print "Run GROMACS preprocessing and MD..."
os.chdir(rundir)
os.system('chmod +x' + ' ' + script)
os.system('./' + script)
os.chdir(curdir)
comm.Barrier()
# post processing
if rank == 0:
tcpu2 = time.time()
print "Time used for MD: %.2fs" %(tcpu2 - tcpu1)
shutil.rmtree(args.output_file, ignore_errors=True)
with open(args.output_file, 'w') as output_file:
for rundir in rundirs:
with open(rundir + '/' + 'out.gro', 'r') as output_file_thread:
for line in output_file_thread:
print >> output_file, line.replace("\n", "")
print "Output data have been saved in %s" %args.output_file
if __name__ == '__main__':
ParallelMDruns().run()
|
jp43/lsdmap
|
dmdmd/tools/p_mdrun.py
|
Python
|
bsd-3-clause
| 10,815
|
[
"Gromacs"
] |
91c656dd10cb37db37ff02e3aca59b43a92fcde58259347fb27d478bfb90ad5f
|
########################################################################
# File: FileCatalogHandler.py
########################################################################
"""
:mod: FileCatalogHandler
.. module: FileCatalogHandler
:synopsis: FileCatalogHandler is a simple Replica and Metadata Catalog service
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
# imports
import csv
from io import StringIO
# from DIRAC
from DIRAC import gLogger, S_ERROR
from DIRAC.DataManagementSystem.Service.FileCatalogHandler import FileCatalogHandlerMixin
from DIRAC.Core.Tornado.Server.TornadoService import TornadoService
sLog = gLogger.getSubLogger(__name__)
class TornadoFileCatalogHandler(FileCatalogHandlerMixin, TornadoService):
"""
..class:: FileCatalogHandler
A simple Replica and Metadata Catalog service.
"""
# This is needed because the mixin class uses `cls.log`
log = sLog
def export_streamToClient(self, seName):
"""This method used to transfer the SEDump to the client,
formated as CSV with '|' separation
:param seName: name of the se to dump
:returns: the result of the FileHelper
"""
retVal = self.getSEDump(seName)
try:
csvOutput = StringIO()
writer = csv.writer(csvOutput, delimiter="|")
writer.writerows(retVal)
ret = csvOutput.getvalue()
return ret
except Exception as e:
sLog.exception("Exception while sending seDump", repr(e))
return S_ERROR("Exception while sendind seDump: %s" % repr(e))
finally:
csvOutput.close()
|
ic-hep/DIRAC
|
src/DIRAC/DataManagementSystem/Service/TornadoFileCatalogHandler.py
|
Python
|
gpl-3.0
| 1,737
|
[
"DIRAC"
] |
ba7a5da65407bf9eabf6b36932b6d0d4a4e3d8283c6ed5402b28f0cbae5e35d9
|
import pytest
from time import time
import capybara
from capybara.exceptions import ElementNotFound
from capybara.tests.helpers import extract_results
class CheckTestCase:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/form")
class TestCheck(CheckTestCase):
def test_checked_attribute_s_true_if_checked(self, session):
session.check("Terms of Use")
assert session.find("xpath", "//input[@id='form_terms_of_use']").checked
def test_checked_attribute_is_false_if_unchecked(self, session):
assert not session.find("xpath", "//input[@id='form_terms_of_use']").checked
@pytest.mark.requires("js")
def test_triggers_associated_events(self, session):
session.visit("/with_js")
session.check("checkbox_with_event")
assert session.has_css("#checkbox_event_triggered")
def test_checking_does_not_change_an_already_checked_checkbox(self, session):
assert session.find("xpath", "//input[@id='form_pets_dog']").checked
session.check("form_pets_dog")
assert session.find("xpath", "//input[@id='form_pets_dog']").checked
def test_checking_checks_an_unchecked_checkbox(self, session):
assert not session.find("xpath", "//input[@id='form_pets_cat']").checked
session.check("form_pets_cat")
assert session.find("xpath", "//input[@id='form_pets_cat']").checked
def test_unchecking_does_not_change_an_already_unchecked_checkbox(self, session):
assert not session.find("xpath", "//input[@id='form_pets_cat']").checked
session.uncheck("form_pets_cat")
assert not session.find("xpath", "//input[@id='form_pets_cat']").checked
def test_unchecking_unchecks_a_checked_checkbox(self, session):
assert session.find("xpath", "//input[@id='form_pets_dog']").checked
session.uncheck("form_pets_dog")
assert not session.find("xpath", "//input[@id='form_pets_dog']").checked
def test_checks_a_checkbox_by_id(self, session):
session.check("form_pets_cat")
session.click_button("awesome")
pets = extract_results(session).getlist("form[pets][]")
assert "dog" in pets
assert "cat" in pets
assert "hamster" in pets
def test_checks_a_checkbox_by_label(self, session):
session.check("Cat")
session.click_button("awesome")
pets = extract_results(session).getlist("form[pets][]")
assert "dog" in pets
assert "cat" in pets
assert "hamster" in pets
def test_raises_an_error_for_a_locator_that_does_not_exist(self, session):
with pytest.raises(ElementNotFound):
session.check("does not exist")
def test_raises_an_error_for_a_disabled_checkbox(self, session):
with pytest.raises(ElementNotFound):
session.check("Disabled Checkbox")
class TestCheckWithAutomaticLabelClick(CheckTestCase):
@pytest.fixture(autouse=True)
def setup_settings(self):
old_automatic_label_click = capybara.automatic_label_click
capybara.automatic_label_click = True
try:
yield
finally:
capybara.automatic_label_click = old_automatic_label_click
def test_checks_via_clicking_the_label_with_for_attribute_if_possible(self, session):
assert session.find("checkbox", "form_cars_tesla", unchecked=True, visible="hidden")
session.check("form_cars_tesla")
session.click_button("awesome")
assert "tesla" in extract_results(session).getlist("form[cars][]")
def test_checks_via_clicking_the_wrapping_label_if_possible(self, session):
assert session.find("checkbox", "form_cars_mclaren", unchecked=True, visible="hidden")
session.check("form_cars_mclaren")
session.click_button("awesome")
assert "mclaren" in extract_results(session).getlist("form[cars][]")
def test_does_not_click_the_label_if_unneeded(self, session):
assert session.find("checkbox", "form_cars_jaguar", checked=True, visible="hidden")
session.check("form_cars_jaguar")
session.click_button("awesome")
assert "jaguar" in extract_results(session).getlist("form[cars][]")
def test_raises_original_error_when_no_label_available(self, session):
with pytest.raises(ElementNotFound) as excinfo:
session.check("form_cars_ariel")
assert "Unable to find checkbox 'form_cars_ariel'" in str(excinfo.value)
def test_raises_error_if_not_allowed_to_click_label(self, session):
with pytest.raises(ElementNotFound) as excinfo:
session.check("form_cars_mclaren", allow_label_click=False)
assert "Unable to find checkbox 'form_cars_mclaren'" in str(excinfo.value)
class TestCheckWithoutAutomaticLabelClick(CheckTestCase):
@pytest.fixture(autouse=True)
def setup_settings(self):
old_automatic_label_click = capybara.automatic_label_click
capybara.automatic_label_click = False
try:
yield
finally:
capybara.automatic_label_click = old_automatic_label_click
def test_raises_error_if_checkbox_not_visible(self, session):
with pytest.raises(ElementNotFound) as excinfo:
session.check("form_cars_mclaren")
assert "Unable to find checkbox 'form_cars_mclaren'" in str(excinfo.value)
def test_checks_via_the_label_if_allow_label_click_is_true(self, session):
assert session.find("checkbox", "form_cars_tesla", unchecked=True, visible="hidden")
session.check("form_cars_tesla", allow_label_click=True)
session.click_button("awesome")
assert "tesla" in extract_results(session).getlist("form[cars][]")
def test_check_via_the_label_if_input_is_moved_off_the_left_edge_of_the_page(self, session):
assert session.find("checkbox", "form_cars_pagani", unchecked=True, visible="all") is not None
session.check("form_cars_pagani", allow_label_click=True)
session.click_button("awesome")
assert "pagani" in extract_results(session).getlist("form[cars][]")
def test_check_via_the_label_if_input_is_visible_but_blocked_by_another_element(self, session):
assert session.find("checkbox", "form_cars_bugatti", unchecked=True, visible="all") is not None
session.check("form_cars_bugatti", allow_label_click=True)
session.click_button("awesome")
assert "bugatti" in extract_results(session).getlist("form[cars][]")
def test_does_not_wait_the_full_time_if_label_can_be_clicked(self, session):
assert session.find("checkbox", "form_cars_tesla", unchecked=True, visible="hidden") is not None
start_time = time()
session.check("form_cars_tesla", allow_label_click=True, wait=10)
end_time = time()
assert end_time - start_time < 10
|
elliterate/capybara.py
|
capybara/tests/session/test_check.py
|
Python
|
mit
| 6,858
|
[
"Jaguar",
"VisIt"
] |
59fd8a240f9585da12641ff2effbc70dcc07b1cedaf7093f112a649ab78e6f63
|
"""
Differential evolution
"""
import numpy
def de(output_basename, parameter_names, transform, loglikelihood, prior, nsteps=40000, vizfunc=None, printfunc=None, **problem):
"""
**Differential evolution**
via `inspyred <http://inspyred.github.io/>`_
specially tuned. steady state replacement, n-point crossover,
pop size 20, gaussian mutation noise 0.01 & 1e-6.
stores intermediate results (can be used for resume, see seeds)
:param start: start point
:param seeds: list of start points
:param vizfunc: callback to do visualization of current best solution
:param printfunc: callback to summarize current best solution
:param seed: RNG initialization (if set)
"""
import json
import inspyred
import random
prng = random.Random()
if 'seed' in problem:
prng.seed(problem['seed'])
n_params = len(parameter_names)
seeds = problem.get('seeds', [])
if 'start' in problem:
seeds.append(problem['start'])
prefix = output_basename
def viz(candidate, args):
if vizfunc is not None:
vizfunc(candidate)
def print_candidate(candidate, l, args):
if printfunc is not None:
printfunc(cube=candidate, loglikelihood=l)
else:
print l, candidate
def eval_candidate(candidate):
params = transform(candidate)
l = loglikelihood(params)
p = prior(params)
if numpy.isinf(p) and p < 0:
print ' prior rejection'
return -1e300
if numpy.isnan(l):
return -1e300
return l, p
@inspyred.ec.utilities.memoize
@inspyred.ec.evaluators.evaluator
def fitness(candidate, args):
l, p = eval_candidate(candidate)
#print_candidate(candidate, (l + p), args)
return (l + p)
cutoff_store = 10
def solution_archiver(random, population, archive, args):
psize = len(population)
population.sort(reverse=True)
best = population[0].fitness
#print 'BEST: ', best,
all_candidates = sorted(population + archive, reverse=True)
all_fitness = numpy.array([c.fitness for c in all_candidates])
mask = best - all_fitness > cutoff_store / 3
if mask.sum() < 20:
mask = best - all_fitness > cutoff_store
newarchive = [c for i, c in enumerate(all_candidates) if i == 0 or all_fitness[i - 1] != c.fitness]
print 'ARCHIVE: ', len(archive), len(newarchive)
json.dump([{'candidate': [float(f) for f in c.candidate], 'fitness':c.fitness} for c in newarchive],
open(prefix + '_values.json', 'w'), indent=4)
return newarchive
def observer(population, num_generations, num_evaluations, args):
population.sort(reverse=True)
candidate = population[0]
print ('{0} evaluations'.format(num_evaluations)), ' best:',
print_candidate(candidate.candidate, candidate.fitness, args)
if num_evaluations % len(population) == 0 or num_evaluations < len(population) or args.get('force_viz', False):
# for each turnaround of a full generation
viz(candidate.candidate, args)
def generator(random, args):
u = [random.uniform(0, 1) for _ in range(n_params)]
u = [random.gauss(0.5, 0.1) for _ in range(n_params)]
return bounder(u, args)
ea = inspyred.ec.DEA(prng)
ea.terminator = inspyred.ec.terminators.evaluation_termination
ea.archiver = solution_archiver
bounder = inspyred.ec.Bounder(lower_bound=1e-10, upper_bound=1-1e-10)
#bounder = inspyred.ec.Bounder(lower_bound=-20, upper_bound=20)
import copy
from math import log
@inspyred.ec.variators.mutator
def double_exponential_mutation(random, candidate, args):
mut_rate = args.setdefault('mutation_rate', 0.1)
mean = args.setdefault('gaussian_mean', 0.0)
stdev = args.setdefault('gaussian_stdev', 1.0)
scale = log(0.5) / - (stdev)
bounder = args['_ec'].bounder
mutant = copy.copy(candidate)
for i, m in enumerate(mutant):
dice = random.random()
if dice < mut_rate:
sign = (dice < mut_rate / 2) * 2 - 1
delta = -log(random.random()) / scale
mutant[i] += delta * sign
mutant = bounder(mutant, args)
return mutant
def minute_gaussian_mutation(random, candidates, args):
args = dict(args)
args['mutation_rate'] = 1
args['gaussian_stdev'] = 1e-6
return inspyred.ec.variators.gaussian_mutation(random, candidates, args)
ea.variator = [inspyred.ec.variators.n_point_crossover, inspyred.ec.variators.gaussian_mutation, minute_gaussian_mutation]
#ea.variator = [inspyred.ec.variators.n_point_crossover, double_exponential_mutation]
ea.replacer = inspyred.ec.replacers.steady_state_replacement
ea.observer = observer
pop_size = 20
final_pop = ea.evolve(pop_size=pop_size,
max_evaluations=nsteps, maximize=True, seeds=seeds,
gaussian_stdev=0.01, #mutation_rate=0.3,
bounder=bounder, generator=generator, evaluator=fitness,
)
best = max(final_pop)
seeds = [c.candidate for c in ea.archive]
print 'final candidate:', best
return {'start': best.candidate, 'value': best.fitness,
'seeds': seeds, 'method': 'DE'}
|
JohannesBuchner/jbopt
|
jbopt/de.py
|
Python
|
bsd-2-clause
| 4,824
|
[
"Gaussian"
] |
16760569d23cefbd65f35d6cb5f02f4f7c336b3aa6793ee021c3ab1347de6b08
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import gzip
import json
import os
import unittest
import warnings
import xml.etree.cElementTree as ET
from pathlib import Path
from shutil import copyfile, copyfileobj
import numpy as np
import pytest
from monty.tempfile import ScratchDir
from pymatgen.core.lattice import Lattice
from pymatgen.electronic_structure.core import Orbital, Spin
from pymatgen.core.structure import Structure
from pymatgen.core import Element
from pymatgen.electronic_structure.core import Magmom, OrbitalType
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
from pymatgen.io.vasp.inputs import Kpoints, Poscar
from pymatgen.io.vasp.outputs import (
BSVasprun,
Chgcar,
Dynmat,
Eigenval,
Elfcar,
Locpot,
Oszicar,
Outcar,
Procar,
UnconvergedVASPWarning,
VaspParserError,
Vasprun,
Wavecar,
Waveder,
Xdatcar,
)
from pymatgen.io.wannier90 import Unk
from pymatgen.util.testing import PymatgenTest
class VasprunTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_multiple_dielectric(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.GW0.xml")
self.assertEqual(len(v.other_dielectric), 3)
def test_charge_charge_dielectric(self):
"""
VASP 5.4.4 writes out two dielectric functions to vasprun.xml
These are the "density-density" and "velocity-velocity" linear response functions.
See the comments in `linear_optics.F` for details.
"""
v = Vasprun(
self.TEST_FILES_DIR / "vasprun.xml.dielectric_5.4.4",
parse_potcar_file=False,
)
self.assertEqual(v.dielectric is not None, True)
self.assertEqual("density" in v.dielectric_data, True)
self.assertEqual("velocity" in v.dielectric_data, True)
def test_optical_absorption_coeff(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.BSE.xml.gz")
absorption_coeff = v.optical_absorption_coeff
self.assertEqual(absorption_coeff[1], 24966408728.917931)
def test_vasprun_with_more_than_two_unlabelled_dielectric_functions(self):
with self.assertRaises(NotImplementedError):
Vasprun(
self.TEST_FILES_DIR / "vasprun.xml.dielectric_bad",
parse_potcar_file=False,
)
def test_bad_vasprun(self):
self.assertRaises(ET.ParseError, Vasprun, self.TEST_FILES_DIR / "bad_vasprun.xml")
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
v = Vasprun(self.TEST_FILES_DIR / "bad_vasprun.xml", exception_on_bad_xml=False)
# Verify some things
self.assertEqual(len(v.ionic_steps), 1)
self.assertAlmostEqual(v.final_energy, -269.00551374)
self.assertTrue(issubclass(w[-1].category, UserWarning))
def test_runtype(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.GW0.xml")
self.assertIn(v.run_type, "HF")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.pbesol_vdw")
self.assertIn(v.run_type, "PBEsol+vdW-DFT-D3-BJ")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.hse06")
self.assertIn(v.run_type, "HSE06")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.scan_rvv10")
self.assertIn(v.run_type, "SCAN+rVV10")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dfpt.ionic")
self.assertIn(v.run_type, "GGA")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dfpt")
self.assertIn(v.run_type, "GGA+U")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.r2scan")
self.assertIn(v.run_type, "R2SCAN")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.scan")
self.assertIn(v.run_type, "SCAN")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.pbesol")
self.assertIn(v.run_type, "PBEsol")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.rscan")
self.assertIn(v.run_type, "RSCAN")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.random")
self.assertIn(v.run_type, "RANDOMFUNCTIONAL")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.unknown")
with pytest.warns(UserWarning, match="Unknown run type!"):
self.assertIn(v.run_type, "unknown")
def test_vdw(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.vdw")
self.assertAlmostEqual(v.final_energy, -9.78310677)
def test_nonlmn(self):
filepath = self.TEST_FILES_DIR / "vasprun.xml.nonlm"
vasprun = Vasprun(filepath, parse_potcar_file=False)
orbs = list(vasprun.complete_dos.pdos[vasprun.final_structure[0]].keys())
self.assertIn(OrbitalType.s, orbs)
def test_standard(self):
filepath = self.TEST_FILES_DIR / "vasprun.xml"
vasprun = Vasprun(filepath, parse_potcar_file=False)
# Test NELM parsing.
self.assertEqual(vasprun.parameters["NELM"], 60)
# test pdos parsing
pdos0 = vasprun.complete_dos.pdos[vasprun.final_structure[0]]
self.assertAlmostEqual(pdos0[Orbital.s][Spin.up][16], 0.0026)
self.assertAlmostEqual(pdos0[Orbital.pz][Spin.down][16], 0.0012)
self.assertEqual(pdos0[Orbital.s][Spin.up].shape, (301,))
filepath2 = self.TEST_FILES_DIR / "lifepo4.xml"
vasprun_ggau = Vasprun(filepath2, parse_projected_eigen=True, parse_potcar_file=False)
totalscsteps = sum([len(i["electronic_steps"]) for i in vasprun.ionic_steps])
self.assertEqual(29, len(vasprun.ionic_steps))
self.assertEqual(len(vasprun.structures), len(vasprun.ionic_steps))
trajectory = vasprun.get_trajectory()
self.assertEqual(len(trajectory), len(vasprun.ionic_steps))
self.assertIn("forces", trajectory[0].site_properties)
for i, step in enumerate(vasprun.ionic_steps):
self.assertEqual(vasprun.structures[i], step["structure"])
self.assertTrue(
all(vasprun.structures[i] == vasprun.ionic_steps[i]["structure"] for i in range(len(vasprun.ionic_steps)))
)
self.assertEqual(308, totalscsteps, "Incorrect number of energies read from vasprun.xml")
self.assertEqual(["Li"] + 4 * ["Fe"] + 4 * ["P"] + 16 * ["O"], vasprun.atomic_symbols)
self.assertEqual(vasprun.final_structure.composition.reduced_formula, "LiFe4(PO4)4")
self.assertIsNotNone(vasprun.incar, "Incar cannot be read")
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.eigenvalues, "Eigenvalues cannot be read")
self.assertAlmostEqual(vasprun.final_energy, -269.38319884, 7)
self.assertAlmostEqual(vasprun.tdos.get_gap(), 2.0589, 4)
expectedans = (2.539, 4.0906, 1.5516, False)
(gap, cbm, vbm, direct) = vasprun.eigenvalue_band_properties
self.assertAlmostEqual(gap, expectedans[0])
self.assertAlmostEqual(cbm, expectedans[1])
self.assertAlmostEqual(vbm, expectedans[2])
self.assertEqual(direct, expectedans[3])
self.assertFalse(vasprun.is_hubbard)
self.assertEqual(
vasprun.potcar_symbols,
[
"PAW_PBE Li 17Jan2003",
"PAW_PBE Fe 06Sep2000",
"PAW_PBE Fe 06Sep2000",
"PAW_PBE P 17Jan2003",
"PAW_PBE O 08Apr2002",
],
)
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints, "Actual kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints_weights, "Actual kpoints weights cannot be read")
for atomdoses in vasprun.pdos:
for orbitaldos in atomdoses:
self.assertIsNotNone(orbitaldos, "Partial Dos cannot be read")
# test skipping ionic steps.
vasprun_skip = Vasprun(filepath, 3, parse_potcar_file=False)
self.assertEqual(vasprun_skip.nionic_steps, 29)
self.assertEqual(len(vasprun_skip.ionic_steps), int(vasprun.nionic_steps / 3) + 1)
self.assertEqual(len(vasprun_skip.ionic_steps), len(vasprun_skip.structures))
self.assertEqual(len(vasprun_skip.ionic_steps), int(vasprun.nionic_steps / 3) + 1)
# Check that nionic_steps is preserved no matter what.
self.assertEqual(vasprun_skip.nionic_steps, vasprun.nionic_steps)
self.assertNotAlmostEqual(vasprun_skip.final_energy, vasprun.final_energy)
# Test with ionic_step_offset
vasprun_offset = Vasprun(filepath, 3, 6, parse_potcar_file=False)
self.assertEqual(len(vasprun_offset.ionic_steps), int(len(vasprun.ionic_steps) / 3) - 1)
self.assertEqual(vasprun_offset.structures[0], vasprun_skip.structures[2])
self.assertTrue(vasprun_ggau.is_hubbard)
self.assertEqual(vasprun_ggau.hubbards["Fe"], 4.3)
self.assertAlmostEqual(vasprun_ggau.projected_eigenvalues[Spin.up][0][0][96][0], 0.0032)
d = vasprun_ggau.as_dict()
self.assertEqual(d["elements"], ["Fe", "Li", "O", "P"])
self.assertEqual(d["nelements"], 4)
def test_unconverged(self):
filepath = self.TEST_FILES_DIR / "vasprun.xml.unconverged"
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
vasprun_unconverged = Vasprun(filepath, parse_potcar_file=False)
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, UnconvergedVASPWarning))
self.assertTrue(vasprun_unconverged.converged_ionic)
self.assertFalse(vasprun_unconverged.converged_electronic)
self.assertFalse(vasprun_unconverged.converged)
def test_dfpt(self):
filepath = self.TEST_FILES_DIR / "vasprun.xml.dfpt"
vasprun_dfpt = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][0], 3.26105533)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][1], -0.00459066)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[2][2], 3.24330517)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][0], 3.33402531)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][1], -0.00559998)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[2][2], 3.31237357)
self.assertTrue(vasprun_dfpt.converged)
entry = vasprun_dfpt.get_computed_entry()
entry = MaterialsProjectCompatibility(check_potcar_hash=False).process_entry(entry)
self.assertAlmostEqual(entry.uncorrected_energy + entry.correction, entry.energy)
def test_dfpt_ionic(self):
filepath = self.TEST_FILES_DIR / "vasprun.xml.dfpt.ionic"
vasprun_dfpt_ionic = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][0], 515.73485838)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][1], -0.00263523)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[2][2], 19.02110169)
def test_dfpt_unconverged(self):
filepath = self.TEST_FILES_DIR / "vasprun.xml.dfpt.unconverged"
vasprun_dfpt_unconv = Vasprun(filepath, parse_potcar_file=False)
self.assertFalse(vasprun_dfpt_unconv.converged_electronic)
self.assertTrue(vasprun_dfpt_unconv.converged_ionic)
self.assertFalse(vasprun_dfpt_unconv.converged)
def test_uniform(self):
vasprun_uniform = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.uniform", parse_potcar_file=False)
self.assertEqual(vasprun_uniform.kpoints.style, Kpoints.supported_modes.Reciprocal)
def test_no_projected(self):
vasprun_no_pdos = Vasprun(self.TEST_FILES_DIR / "Li_no_projected.xml", parse_potcar_file=False)
self.assertIsNotNone(vasprun_no_pdos.complete_dos)
self.assertFalse(vasprun_no_pdos.dos_has_errors)
def test_dielectric(self):
vasprun_diel = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dielectric", parse_potcar_file=False)
self.assertAlmostEqual(0.4294, vasprun_diel.dielectric[0][10])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][0])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][1])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][2])
self.assertAlmostEqual(0.0, vasprun_diel.dielectric[1][51][3])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][0])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][1])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][2])
self.assertAlmostEqual(0.0, vasprun_diel.dielectric[2][85][3])
def test_dielectric_vasp608(self):
# test reading dielectric constant in vasp 6.0.8
vasprun_diel = Vasprun(
self.TEST_FILES_DIR / "vasprun.xml.dielectric_6.0.8",
parse_potcar_file=False,
)
self.assertAlmostEqual(0.4338, vasprun_diel.dielectric[0][10])
self.assertAlmostEqual(5.267, vasprun_diel.dielectric[1][51][0])
self.assertAlmostEqual(0.4338, vasprun_diel.dielectric_data["density"][0][10])
self.assertAlmostEqual(5.267, vasprun_diel.dielectric_data["density"][1][51][0])
self.assertAlmostEqual(0.4338, vasprun_diel.dielectric_data["velocity"][0][10])
self.assertAlmostEqual(1.0741, vasprun_diel.dielectric_data["velocity"][1][51][0])
self.assertEqual(len(vasprun_diel.other_dielectric), 0)
def test_indirect_vasprun(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.indirect.gz")
(gap, cbm, vbm, direct) = v.eigenvalue_band_properties
self.assertFalse(direct)
def test_optical_vasprun(self):
vasprun_optical = Vasprun(
self.TEST_FILES_DIR / "vasprun.xml.opticaltransitions",
parse_potcar_file=False,
)
self.assertAlmostEqual(3.084, vasprun_optical.optical_transition[0][0])
self.assertAlmostEqual(3.087, vasprun_optical.optical_transition[3][0])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[0][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[1][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[7][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[19][1])
self.assertAlmostEqual(3.3799999999, vasprun_optical.optical_transition[54][0])
self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[55][0])
self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[56][0])
self.assertAlmostEqual(10554.9860, vasprun_optical.optical_transition[54][1])
self.assertAlmostEqual(0.0, vasprun_optical.optical_transition[55][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[56][1])
def test_force_constants(self):
vasprun_fc = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dfpt.phonon", parse_potcar_file=False)
fc_ans = [
[-0.00184451, -0.0, -0.0],
[-0.0, -0.00933824, -0.03021279],
[-0.0, -0.03021279, 0.01202547],
]
nm_ans = [
[0.0884346, -0.08837289, -0.24995639],
[-0.0884346, 0.08837289, 0.24995639],
[0.15306645, -0.05105771, -0.14441306],
[-0.15306645, 0.05105771, 0.14441306],
[-0.0884346, 0.08837289, 0.24995639],
[0.0884346, -0.08837289, -0.24995639],
[-0.15306645, 0.05105771, 0.14441306],
[0.15306645, -0.05105771, -0.14441306],
[-0.0884346, 0.08837289, 0.24995639],
[0.0884346, -0.08837289, -0.24995639],
[-0.15306645, 0.05105771, 0.14441306],
[0.15306645, -0.05105771, -0.14441306],
[0.0884346, -0.08837289, -0.24995639],
[-0.0884346, 0.08837289, 0.24995639],
[0.15306645, -0.05105771, -0.14441306],
[-0.15306645, 0.05105771, 0.14441306],
]
nm_eigenval_ans = [
-0.59067079,
-0.59067079,
-0.59067003,
-0.59067003,
-0.59067003,
-0.59067003,
-0.585009,
-0.585009,
-0.58500895,
-0.58500883,
-0.5062956,
-0.5062956,
]
self.assertEqual(vasprun_fc.force_constants.shape, (16, 16, 3, 3))
self.assertTrue(np.allclose(vasprun_fc.force_constants[8, 9], fc_ans))
self.assertEqual(vasprun_fc.normalmode_eigenvals.size, 48)
self.assertTrue(np.allclose(vasprun_fc.normalmode_eigenvals[17:29], nm_eigenval_ans))
self.assertEqual(vasprun_fc.normalmode_eigenvecs.shape, (48, 16, 3))
self.assertTrue(np.allclose(vasprun_fc.normalmode_eigenvecs[33], nm_ans))
def test_Xe(self):
vr = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.xe", parse_potcar_file=False)
self.assertEqual(vr.atomic_symbols, ["Xe"])
def test_invalid_element(self):
self.assertRaises(ValueError, Vasprun, self.TEST_FILES_DIR / "vasprun.xml.wrong_sp")
def test_selective_dynamics(self):
vsd = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.indirect.gz")
np.testing.assert_array_equal(
vsd.final_structure.site_properties.get("selective_dynamics"),
[[True] * 3, [False] * 3],
"Selective dynamics parsing error",
)
def test_as_dict(self):
filepath = self.TEST_FILES_DIR / "vasprun.xml"
vasprun = Vasprun(filepath, parse_potcar_file=False)
# Test that as_dict() is json-serializable
self.assertIsNotNone(json.dumps(vasprun.as_dict()))
self.assertEqual(
vasprun.as_dict()["input"]["potcar_type"],
["PAW_PBE", "PAW_PBE", "PAW_PBE", "PAW_PBE", "PAW_PBE"],
)
self.assertEqual(vasprun.as_dict()["input"]["nkpoints"], 24)
def test_get_band_structure(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = self.TEST_FILES_DIR / "vasprun_Si_bands.xml"
vasprun = Vasprun(filepath, parse_projected_eigen=True, parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / "KPOINTS_Si_bands")
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm["kpoint_index"], [13], "wrong cbm kpoint index")
self.assertAlmostEqual(cbm["energy"], 6.2301, "wrong cbm energy")
self.assertEqual(cbm["band_index"], {Spin.up: [4], Spin.down: [4]}, "wrong cbm bands")
self.assertEqual(vbm["kpoint_index"], [0, 63, 64])
self.assertAlmostEqual(vbm["energy"], 5.6158, "wrong vbm energy")
self.assertEqual(
vbm["band_index"],
{Spin.up: [1, 2, 3], Spin.down: [1, 2, 3]},
"wrong vbm bands",
)
self.assertEqual(vbm["kpoint"].label, "\\Gamma", "wrong vbm label")
self.assertEqual(cbm["kpoint"].label, None, "wrong cbm label")
projected = bs.get_projection_on_elements()
self.assertAlmostEqual(projected[Spin.up][0][0]["Si"], 0.4238)
projected = bs.get_projections_on_elements_and_orbitals({"Si": ["s"]})
self.assertAlmostEqual(projected[Spin.up][0][0]["Si"]["s"], 0.4238)
# Test compressed files case 1: compressed KPOINTS in current dir
with ScratchDir("./"):
copyfile(self.TEST_FILES_DIR / "vasprun_Si_bands.xml", "vasprun.xml")
# Check for error if no KPOINTS file
vasprun = Vasprun("vasprun.xml", parse_projected_eigen=True, parse_potcar_file=False)
with self.assertRaises(VaspParserError):
_ = vasprun.get_band_structure(line_mode=True)
# Check KPOINTS.gz succesfully inferred and used if present
with open(self.TEST_FILES_DIR / "KPOINTS_Si_bands", "rb") as f_in:
with gzip.open("KPOINTS.gz", "wb") as f_out:
copyfileobj(f_in, f_out)
bs_kpts_gzip = vasprun.get_band_structure()
self.assertEqual(bs.efermi, bs_kpts_gzip.efermi)
self.assertEqual(bs.as_dict(), bs_kpts_gzip.as_dict())
# Test compressed files case 2: compressed vasprun in another dir
with ScratchDir("./"):
os.mkdir("deeper")
copyfile(self.TEST_FILES_DIR / "KPOINTS_Si_bands", Path("deeper") / "KPOINTS")
with open(self.TEST_FILES_DIR / "vasprun_Si_bands.xml", "rb") as f_in:
with gzip.open(os.path.join("deeper", "vasprun.xml.gz"), "wb") as f_out:
copyfileobj(f_in, f_out)
vasprun = Vasprun(
os.path.join("deeper", "vasprun.xml.gz"),
parse_projected_eigen=True,
parse_potcar_file=False,
)
bs_vasprun_gzip = vasprun.get_band_structure(line_mode=True)
self.assertEqual(bs.efermi, bs_vasprun_gzip.efermi)
self.assertEqual(bs.as_dict(), bs_vasprun_gzip.as_dict())
# test hybrid band structures
vasprun.actual_kpoints_weights[-1] = 0.0
bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / "KPOINTS_Si_bands")
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm["kpoint_index"], [0])
self.assertAlmostEqual(cbm["energy"], 6.3676)
self.assertEqual(cbm["kpoint"].label, None)
self.assertEqual(vbm["kpoint_index"], [0])
self.assertAlmostEqual(vbm["energy"], 2.8218)
self.assertEqual(vbm["kpoint"].label, None)
# test self-consistent band structure calculation for non-hybrid functionals
vasprun = Vasprun(
self.TEST_FILES_DIR / "vasprun.xml.forcehybridlikecalc",
parse_projected_eigen=True,
parse_potcar_file=False,
)
bs = vasprun.get_band_structure(
kpoints_filename=self.TEST_FILES_DIR / "KPOINTS.forcehybridlikecalc",
force_hybrid_mode=True,
line_mode=True,
)
dict_to_test = bs.get_band_gap()
self.assertTrue(dict_to_test["direct"])
self.assertAlmostEqual(dict_to_test["energy"], 6.007899999999999)
self.assertEqual(dict_to_test["transition"], "\\Gamma-\\Gamma")
self.assertEqual(bs.get_branch(0)[0]["start_index"], 0)
self.assertEqual(bs.get_branch(0)[0]["end_index"], 0)
def test_projected_magnetisation(self):
filepath = self.TEST_FILES_DIR / "vasprun.lvel.Si2H.xml"
vasprun = Vasprun(filepath, parse_projected_eigen=True)
self.assertTrue(vasprun.projected_magnetisation is not None)
self.assertEqual(vasprun.projected_magnetisation.shape, (76, 240, 4, 9, 3))
self.assertAlmostEqual(vasprun.projected_magnetisation[0, 0, 0, 0, 0], -0.0712)
def test_smart_efermi(self):
# branch 1 - E_fermi does not cross a band
vrun = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.LiF")
smart_fermi = vrun.calculate_efermi()
self.assertAlmostEqual(smart_fermi, vrun.efermi, places=4)
eigen_gap = vrun.eigenvalue_band_properties[0]
bs_gap = vrun.get_band_structure(efermi=smart_fermi).get_band_gap()["energy"]
self.assertAlmostEqual(bs_gap, eigen_gap, places=3)
# branch 2 - E_fermi crosses a band but bandgap=0
vrun = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.Al")
smart_fermi = vrun.calculate_efermi()
self.assertAlmostEqual(smart_fermi, vrun.efermi, places=4)
eigen_gap = vrun.eigenvalue_band_properties[0]
bs_gap = vrun.get_band_structure(efermi=smart_fermi).get_band_gap()["energy"]
self.assertAlmostEqual(bs_gap, eigen_gap, places=3)
# branch 3 - E_fermi crosses a band in an insulator
vrun = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.LiH_bad_efermi")
smart_fermi = vrun.calculate_efermi()
self.assertNotAlmostEqual(smart_fermi, vrun.efermi, places=4)
eigen_gap = vrun.eigenvalue_band_properties[0]
bs_gap = vrun.get_band_structure(efermi="smart").get_band_gap()["energy"]
self.assertAlmostEqual(bs_gap, eigen_gap, places=3)
self.assertNotAlmostEqual(vrun.get_band_structure(efermi=None).get_band_gap()["energy"], eigen_gap, places=3)
self.assertNotEqual(bs_gap, 0)
def test_sc_step_overflow(self):
filepath = self.TEST_FILES_DIR / "vasprun.xml.sc_overflow"
# with warnings.catch_warnings(record=True) as w:
# warnings.simplefilter("always")
# vasprun = Vasprun(filepath)
# self.assertEqual(len(w), 3)
vasprun = Vasprun(filepath)
estep = vasprun.ionic_steps[0]["electronic_steps"][29]
self.assertTrue(np.isnan(estep["e_wo_entrp"]))
def test_update_potcar(self):
filepath = self.TEST_FILES_DIR / "vasprun.xml"
potcar_path = self.TEST_FILES_DIR / "POTCAR.LiFePO4.gz"
potcar_path2 = self.TEST_FILES_DIR / "POTCAR2.LiFePO4.gz"
vasprun = Vasprun(filepath, parse_potcar_file=False)
self.assertEqual(
vasprun.potcar_spec,
[
{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None},
],
)
vasprun.update_potcar_spec(potcar_path)
self.assertEqual(
vasprun.potcar_spec,
[
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
)
vasprun2 = Vasprun(filepath, parse_potcar_file=False)
self.assertRaises(ValueError, vasprun2.update_potcar_spec, potcar_path2)
vasprun = Vasprun(filepath, parse_potcar_file=potcar_path)
self.assertEqual(
vasprun.potcar_spec,
[
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
)
self.assertRaises(ValueError, Vasprun, filepath, parse_potcar_file=potcar_path2)
def test_search_for_potcar(self):
filepath = self.TEST_FILES_DIR / "vasprun.xml"
vasprun = Vasprun(filepath, parse_potcar_file=True)
self.assertEqual(
vasprun.potcar_spec,
[
{
"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8",
},
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115",
},
{
"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f",
},
{
"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982",
},
],
)
def test_potcar_not_found(self):
filepath = self.TEST_FILES_DIR / "vasprun.xml"
# Ensure no potcar is found and nothing is updated
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
vasprun = Vasprun(filepath, parse_potcar_file=".")
self.assertEqual(len(w), 2)
self.assertEqual(
vasprun.potcar_spec,
[
{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None},
],
)
def test_parsing_chemical_shift_calculations(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = self.TEST_FILES_DIR / "nmr" / "cs" / "basic" / "vasprun.xml.chemical_shift.scstep"
vasprun = Vasprun(filepath)
nestep = len(vasprun.ionic_steps[-1]["electronic_steps"])
self.assertEqual(nestep, 10)
self.assertTrue(vasprun.converged)
def test_parsing_efg_calcs(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = self.TEST_FILES_DIR / "nmr" / "efg" / "AlPO4" / "vasprun.xml"
vasprun = Vasprun(filepath)
nestep = len(vasprun.ionic_steps[-1]["electronic_steps"])
self.assertEqual(nestep, 18)
self.assertTrue(vasprun.converged)
def test_charged_structure(self):
vpath = self.TEST_FILES_DIR / "vasprun.charged.xml"
potcar_path = self.TEST_FILES_DIR / "POT_GGA_PAW_PBE" / "POTCAR.Si.gz"
vasprun = Vasprun(vpath, parse_potcar_file=False)
vasprun.update_charge_from_potcar(potcar_path)
self.assertEqual(vasprun.parameters.get("NELECT", 8), 9)
self.assertEqual(vasprun.structures[0].charge, 1)
vpath = self.TEST_FILES_DIR / "vasprun.split.charged.xml"
potcar_path = self.TEST_FILES_DIR / "POTCAR.split.charged.gz"
vasprun = Vasprun(vpath, parse_potcar_file=False)
vasprun.update_charge_from_potcar(potcar_path)
self.assertEqual(vasprun.parameters.get("NELECT", 0), 7)
self.assertEqual(vasprun.structures[-1].charge, 1)
def test_kpointset_electronvelocities(self):
vpath = self.TEST_FILES_DIR / "vasprun.lvel.Si2H.xml"
vasprun = Vasprun(vpath, parse_potcar_file=False)
self.assertEqual(vasprun.eigenvalues[Spin.up].shape[0], len(vasprun.actual_kpoints))
class OutcarTest(PymatgenTest):
_multiprocess_shared_ = True
def test_init(self):
for f in ["OUTCAR", "OUTCAR.gz"]:
filepath = self.TEST_FILES_DIR / f
outcar = Outcar(filepath)
expected_mag = (
{"d": 0.0, "p": 0.003, "s": 0.002, "tot": 0.005},
{"d": 0.798, "p": 0.008, "s": 0.007, "tot": 0.813},
{"d": 0.798, "p": 0.008, "s": 0.007, "tot": 0.813},
{"d": 0.0, "p": -0.117, "s": 0.005, "tot": -0.112},
{"d": 0.0, "p": -0.165, "s": 0.004, "tot": -0.162},
{"d": 0.0, "p": -0.117, "s": 0.005, "tot": -0.112},
{"d": 0.0, "p": -0.165, "s": 0.004, "tot": -0.162},
)
expected_chg = (
{"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232},
{"p": 0.707, "s": 0.463, "d": 8.316, "tot": 9.486},
{"p": 0.707, "s": 0.463, "d": 8.316, "tot": 9.486},
{"p": 3.388, "s": 1.576, "d": 0.0, "tot": 4.964},
{"p": 3.365, "s": 1.582, "d": 0.0, "tot": 4.947},
{"p": 3.388, "s": 1.576, "d": 0.0, "tot": 4.964},
{"p": 3.365, "s": 1.582, "d": 0.0, "tot": 4.947},
)
self.assertAlmostEqual(
outcar.magnetization,
expected_mag,
5,
"Wrong magnetization read from Outcar",
)
self.assertAlmostEqual(outcar.charge, expected_chg, 5, "Wrong charge read from Outcar")
self.assertFalse(outcar.is_stopped)
self.assertEqual(
outcar.run_stats,
{
"System time (sec)": 0.938,
"Total CPU time used (sec)": 545.142,
"Elapsed time (sec)": 546.709,
"Maximum memory used (kb)": 0.0,
"Average memory used (kb)": 0.0,
"User time (sec)": 544.204,
"cores": "8",
},
)
self.assertAlmostEqual(outcar.efermi, 2.0112)
self.assertAlmostEqual(outcar.nelect, 44.9999991)
self.assertAlmostEqual(outcar.total_mag, 0.9999998)
self.assertIsNotNone(outcar.as_dict())
self.assertFalse(outcar.lepsilon)
toten = 0
for k in outcar.final_energy_contribs.keys():
toten += outcar.final_energy_contribs[k]
self.assertAlmostEqual(toten, outcar.final_energy, 6)
def test_stopped_old(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.stopped"
outcar = Outcar(filepath)
self.assertTrue(outcar.is_stopped)
for f in ["OUTCAR.lepsilon_old_born", "OUTCAR.lepsilon_old_born.gz"]:
filepath = self.TEST_FILES_DIR / f
outcar = Outcar(filepath)
self.assertTrue(outcar.lepsilon)
self.assertAlmostEqual(outcar.dielectric_tensor[0][0], 3.716432)
self.assertAlmostEqual(outcar.dielectric_tensor[0][1], -0.20464)
self.assertAlmostEqual(outcar.dielectric_tensor[1][2], -0.20464)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][0], 0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][2], 0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[2][2], 0.001419)
self.assertAlmostEqual(outcar.piezo_tensor[0][0], 0.52799)
self.assertAlmostEqual(outcar.piezo_tensor[1][3], 0.35998)
self.assertAlmostEqual(outcar.piezo_tensor[2][5], 0.35997)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[0][0], 0.05868)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[1][3], 0.06241)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[2][5], 0.06242)
self.assertAlmostEqual(outcar.born[0][1][2], -0.385)
self.assertAlmostEqual(outcar.born[1][2][0], 0.36465)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][0][0], -572.5437, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][0], 683.2985, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][3], 73.07059, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][0][0], 570.98927, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][1][0], -683.68519, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][2][2], 570.98927, places=4)
def test_stopped(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.stopped"
outcar = Outcar(filepath)
self.assertTrue(outcar.is_stopped)
for f in ["OUTCAR.lepsilon", "OUTCAR.lepsilon.gz"]:
filepath = self.TEST_FILES_DIR / f
outcar = Outcar(filepath)
self.assertTrue(outcar.lepsilon)
self.assertAlmostEqual(outcar.dielectric_tensor[0][0], 3.716432)
self.assertAlmostEqual(outcar.dielectric_tensor[0][1], -0.20464)
self.assertAlmostEqual(outcar.dielectric_tensor[1][2], -0.20464)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][0], 0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][2], 0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[2][2], 0.001419)
self.assertAlmostEqual(outcar.piezo_tensor[0][0], 0.52799)
self.assertAlmostEqual(outcar.piezo_tensor[1][3], 0.35998)
self.assertAlmostEqual(outcar.piezo_tensor[2][5], 0.35997)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[0][0], 0.05868)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[1][3], 0.06241)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[2][5], 0.06242)
self.assertAlmostEqual(outcar.born[0][1][2], -0.385)
self.assertAlmostEqual(outcar.born[1][2][0], 0.36465)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][0][0], -572.5437, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][0], 683.2985, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][3], 73.07059, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][0][0], 570.98927, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][1][0], -683.68519, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][2][2], 570.98927, places=4)
def test_soc(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.NiO_SOC.gz"
outcar = Outcar(filepath)
expected_mag = (
{
"s": Magmom([0.0, 0.0, -0.001]),
"p": Magmom([0.0, 0.0, -0.003]),
"d": Magmom([0.0, 0.0, 1.674]),
"tot": Magmom([0.0, 0.0, 1.671]),
},
{
"s": Magmom([0.0, 0.0, 0.001]),
"p": Magmom([0.0, 0.0, 0.003]),
"d": Magmom([0.0, 0.0, -1.674]),
"tot": Magmom([0.0, 0.0, -1.671]),
},
{
"s": Magmom([0.0, 0.0, 0.0]),
"p": Magmom([0.0, 0.0, 0.0]),
"d": Magmom([0.0, 0.0, 0.0]),
"tot": Magmom([0.0, 0.0, 0.0]),
},
{
"s": Magmom([0.0, 0.0, 0.0]),
"p": Magmom([0.0, 0.0, 0.0]),
"d": Magmom([0.0, 0.0, 0.0]),
"tot": Magmom([0.0, 0.0, 0.0]),
},
)
# test note: Magmom class uses np.allclose() when testing for equality
# so fine to use assertEqual here
self.assertEqual(
outcar.magnetization,
expected_mag,
"Wrong vector magnetization read from Outcar for SOC calculation",
)
def test_polarization(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.BaTiO3.polar"
outcar = Outcar(filepath)
self.assertEqual(outcar.spin, True)
self.assertEqual(outcar.noncollinear, False)
self.assertAlmostEqual(outcar.p_ion[0], 0.0)
self.assertAlmostEqual(outcar.p_ion[1], 0.0)
self.assertAlmostEqual(outcar.p_ion[2], -5.56684)
self.assertAlmostEqual(outcar.p_sp1[0], 2.00068)
self.assertAlmostEqual(outcar.p_sp2[0], -2.00044)
self.assertAlmostEqual(outcar.p_elec[0], 0.00024)
self.assertAlmostEqual(outcar.p_elec[1], 0.00019)
self.assertAlmostEqual(outcar.p_elec[2], 3.61674)
def test_pseudo_zval(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.BaTiO3.polar"
outcar = Outcar(filepath)
self.assertDictEqual({"Ba": 10.00, "Ti": 10.00, "O": 6.00}, outcar.zval_dict)
filepath = self.TEST_FILES_DIR / "OUTCAR.LaSnNO2.polar"
outcar = Outcar(filepath)
self.assertDictEqual({"La": 11.0, "N": 5.0, "O": 6.0, "Sn": 14.0}, outcar.zval_dict)
def test_dielectric(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.dielectric"
outcar = Outcar(filepath)
outcar.read_corrections()
self.assertAlmostEqual(outcar.data["dipol_quadrupol_correction"], 0.03565)
self.assertAlmostEqual(outcar.final_energy, -797.46760559)
def test_freq_dielectric(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.LOPTICS"
outcar = Outcar(filepath)
outcar.read_freq_dielectric()
self.assertAlmostEqual(outcar.dielectric_energies[0], 0)
self.assertAlmostEqual(outcar.dielectric_energies[-1], 39.826101)
self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0], 8.96938800)
self.assertAlmostEqual(
outcar.dielectric_tensor_function[-1][0, 0],
7.36167000e-01 + 1.53800000e-03j,
)
self.assertEqual(len(outcar.dielectric_energies), len(outcar.dielectric_tensor_function))
np.testing.assert_array_equal(
outcar.dielectric_tensor_function[0],
outcar.dielectric_tensor_function[0].transpose(),
)
plasma_freq = outcar.plasma_frequencies
self.assertArrayAlmostEqual(plasma_freq["intraband"], np.zeros((3, 3)))
self.assertArrayAlmostEqual(
plasma_freq["interband"],
[
[367.49, 63.939, 11.976],
[63.939, 381.155, -24.461],
[11.976, -24.461, 297.844],
],
)
def test_freq_dielectric_vasp544(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.LOPTICS.vasp544"
outcar = Outcar(filepath)
outcar.read_freq_dielectric()
self.assertAlmostEqual(outcar.dielectric_energies[0], 0)
self.assertAlmostEqual(outcar.dielectric_energies[-1], 39.63964)
self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0], 12.769435 + 0j)
self.assertAlmostEqual(outcar.dielectric_tensor_function[-1][0, 0], 0.828615 + 0.016594j)
self.assertEqual(len(outcar.dielectric_energies), len(outcar.dielectric_tensor_function))
np.testing.assert_array_equal(
outcar.dielectric_tensor_function[0],
outcar.dielectric_tensor_function[0].transpose(),
)
def test_parse_sci_notation(self):
invalid_pattern = "23535.35 35235.34 325325.3"
valid_pattern1 = " 0.00000E+00 0.00000E+00 0.00000E+00 0.00000E+00 0.00000E+00 0.00000E+00 0.00000E+00"
valid_pattern2 = " 0.62963E+00 0.15467E+02 0.15467E+02 0.15467E+02-0.30654E-16-0.91612E-16 0.52388E-16"
self.assertEqual(Outcar._parse_sci_notation(invalid_pattern), [])
self.assertEqual(Outcar._parse_sci_notation(valid_pattern1), [0, 0, 0, 0, 0, 0, 0])
self.assertEqual(
Outcar._parse_sci_notation(valid_pattern2),
[
0.62963,
0.15467e02,
0.15467e02,
0.15467e02,
-0.30654e-16,
-0.91612e-16,
0.52388e-16,
],
)
def test_read_elastic_tensor(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.total_tensor.Li2O.gz"
outcar = Outcar(filepath)
outcar.read_elastic_tensor()
self.assertAlmostEqual(outcar.data["elastic_tensor"][0][0], 1986.3391)
self.assertAlmostEqual(outcar.data["elastic_tensor"][0][1], 187.8324)
self.assertAlmostEqual(outcar.data["elastic_tensor"][3][3], 586.3034)
def test_read_piezo_tensor(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.lepsilon.gz"
outcar = Outcar(filepath)
outcar.read_piezo_tensor()
self.assertAlmostEqual(outcar.data["piezo_tensor"][0][0], 0.52799)
self.assertAlmostEqual(outcar.data["piezo_tensor"][1][3], 0.35998)
self.assertAlmostEqual(outcar.data["piezo_tensor"][2][5], 0.35997)
def test_core_state_eigen(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.CL"
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[6]["2s"][-1], -174.4779)
filepath = self.TEST_FILES_DIR / "OUTCAR.icorelevel"
outcar = Outcar(filepath)
cl = outcar.read_core_state_eigen()
self.assertAlmostEqual(cl[4]["3d"][-1], -31.4522)
# test serialization
outcar.as_dict()
def test_avg_core_poten(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.lepsilon"
cp = Outcar(filepath).read_avg_core_poten()
self.assertAlmostEqual(cp[-1][1], -90.0487)
filepath = self.TEST_FILES_DIR / "OUTCAR"
cp = Outcar(filepath).read_avg_core_poten()
self.assertAlmostEqual(cp[0][6], -73.1068)
filepath = self.TEST_FILES_DIR / "OUTCAR.bad_core_poten.gz"
cp = Outcar(filepath).read_avg_core_poten()
self.assertAlmostEqual(cp[0][1], -101.5055)
def test_single_atom(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.Al"
outcar = Outcar(filepath)
expected_mag = ({"p": 0.0, "s": 0.0, "d": 0.0, "tot": 0.0},)
expected_chg = ({"p": 0.343, "s": 0.425, "d": 0.0, "tot": 0.768},)
self.assertAlmostEqual(outcar.magnetization, expected_mag)
self.assertAlmostEqual(outcar.charge, expected_chg)
self.assertFalse(outcar.is_stopped)
self.assertEqual(
outcar.run_stats,
{
"System time (sec)": 0.592,
"Total CPU time used (sec)": 50.194,
"Elapsed time (sec)": 52.337,
"Maximum memory used (kb)": 62900.0,
"Average memory used (kb)": 0.0,
"User time (sec)": 49.602,
"cores": "32",
},
)
self.assertAlmostEqual(outcar.efermi, 8.0942)
self.assertAlmostEqual(outcar.nelect, 3)
self.assertAlmostEqual(outcar.total_mag, 8.2e-06)
self.assertIsNotNone(outcar.as_dict())
def test_chemical_shielding(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "hydromagnesite" / "OUTCAR"
outcar = Outcar(filename)
expected_chemical_shielding = [
[191.9974, 69.5232, 0.6342],
[195.0808, 68.183, 0.833],
[192.0389, 69.5762, 0.6329],
[195.0844, 68.1756, 0.8336],
[192.005, 69.5289, 0.6339],
[195.0913, 68.1859, 0.833],
[192.0237, 69.565, 0.6333],
[195.0788, 68.1733, 0.8337],
]
self.assertAlmostEqual(
len(outcar.data["chemical_shielding"]["valence_only"][20:28]),
len(expected_chemical_shielding),
)
self.assertArrayAlmostEqual(
outcar.data["chemical_shielding"]["valence_and_core"][20:28],
expected_chemical_shielding,
decimal=5,
)
def test_chemical_shielding_with_different_core_contribution(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
c_vo = outcar.data["chemical_shielding"]["valence_only"][7]
for x1, x2 in zip(list(c_vo), [198.7009, 73.7484, 1.0000]):
self.assertAlmostEqual(x1, x2)
c_vc = outcar.data["chemical_shielding"]["valence_and_core"][7]
for x1, x2 in zip(list(c_vc), [-1.9406, 73.7484, 1.0000]):
self.assertAlmostEqual(x1, x2)
def test_cs_raw_tensors(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
unsym_tensors = outcar.data["unsym_cs_tensor"]
self.assertEqual(
unsym_tensors[0],
[
[-145.814605, -4.263425, 0.000301],
[4.263434, -145.812238, -8.7e-05],
[0.000136, -0.000189, -142.794068],
],
)
self.assertEqual(
unsym_tensors[29],
[
[287.789318, -53.799325, 30.900024],
[-53.799571, 225.668117, -17.839598],
[3.801103, -2.195218, 88.896756],
],
)
def test_cs_g0_contribution(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
g0_contrib = outcar.data["cs_g0_contribution"]
self.assertEqual(
g0_contrib,
[
[-8.773535, 9e-06, 1e-06],
[1.7e-05, -8.773536, -0.0792],
[-6e-06, -0.008328, -9.320237],
],
)
def test_cs_core_contribution(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
core_contrib = outcar.data["cs_core_contribution"]
self.assertEqual(core_contrib, {"Mg": -412.8248405, "C": -200.5098812, "O": -271.0766979})
def test_nmr_efg(self):
filename = self.TEST_FILES_DIR / "nmr" / "efg" / "AlPO4" / "OUTCAR"
outcar = Outcar(filename)
expected_efg = [
{"eta": 0.465, "nuclear_quadrupole_moment": 146.6, "cq": -5.573},
{"eta": 0.465, "nuclear_quadrupole_moment": 146.6, "cq": -5.573},
{"eta": 0.137, "nuclear_quadrupole_moment": 146.6, "cq": 6.327},
{"eta": 0.137, "nuclear_quadrupole_moment": 146.6, "cq": 6.327},
{"eta": 0.112, "nuclear_quadrupole_moment": 146.6, "cq": -7.453},
{"eta": 0.112, "nuclear_quadrupole_moment": 146.6, "cq": -7.453},
{"eta": 0.42, "nuclear_quadrupole_moment": 146.6, "cq": -5.58},
{"eta": 0.42, "nuclear_quadrupole_moment": 146.6, "cq": -5.58},
]
self.assertEqual(len(outcar.data["efg"][2:10]), len(expected_efg))
for e1, e2 in zip(outcar.data["efg"][2:10], expected_efg):
for k in e1.keys():
self.assertAlmostEqual(e1[k], e2[k], places=5)
exepected_tensors = [
[[11.11, 1.371, 2.652], [1.371, 3.635, -3.572], [2.652, -3.572, -14.746]],
[[11.11, -1.371, 2.652], [-1.371, 3.635, 3.572], [2.652, 3.572, -14.746]],
[[-3.098, 6.511, 7.732], [6.511, 1.419, 11.445], [7.732, 11.445, 1.678]],
[
[-3.098, -6.511, 7.732],
[-6.511, 1.419, -11.445],
[7.732, -11.445, 1.678],
],
[
[2.344, -10.775, -7.006],
[-10.775, -7.152, -11.309],
[-7.006, -11.309, 4.808],
],
[
[2.344, 10.775, -7.006],
[10.775, -7.152, 11.309],
[-7.006, 11.309, 4.808],
],
[[2.404, -0.588, -6.83], [-0.588, 10.435, 3.159], [-6.83, 3.159, -12.839]],
[[2.404, 0.588, -6.83], [0.588, 10.435, -3.159], [-6.83, -3.159, -12.839]],
]
self.assertEqual(len(outcar.data["unsym_efg_tensor"][2:10]), len(exepected_tensors))
for e1, e2 in zip(outcar.data["unsym_efg_tensor"][2:10], exepected_tensors):
self.assertArrayAlmostEqual(e1, e2)
def test_read_fermi_contact_shift(self):
filepath = self.TEST_FILES_DIR / "OUTCAR_fc"
outcar = Outcar(filepath)
outcar.read_fermi_contact_shift()
self.assertAlmostEqual(outcar.data["fermi_contact_shift"]["fch"][0][0], -0.002)
self.assertAlmostEqual(outcar.data["fermi_contact_shift"]["th"][0][0], -0.052)
self.assertAlmostEqual(outcar.data["fermi_contact_shift"]["dh"][0][0], 0.0)
def test_drift(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR")
self.assertEqual(len(outcar.drift), 5)
self.assertAlmostEqual(np.sum(outcar.drift), 0)
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR.CL")
self.assertEqual(len(outcar.drift), 79)
self.assertAlmostEqual(np.sum(outcar.drift), 0.448010)
def test_electrostatic_potential(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR")
self.assertEqual(outcar.ngf, [54, 30, 54])
self.assertTrue(np.allclose(outcar.sampling_radii, [0.9748, 0.9791, 0.7215]))
self.assertTrue(
np.allclose(
outcar.electrostatic_potential,
[-26.0704, -45.5046, -45.5046, -72.9539, -73.0621, -72.9539, -73.0621],
)
)
def test_mag_electrostatic_error(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR.electrostaticerror.gz")
self.assertEqual(
outcar.electrostatic_potential,
[
-21.1667,
-19.6865,
-22.3983,
-22.3307,
-20.5213,
-20.9292,
-21.5063,
-21.3554,
-21.74,
-21.7018,
-20.3422,
-20.6128,
-21.4405,
-21.0022,
-21.975,
-21.915,
-21.0156,
-21.9027,
-22.3712,
-21.5816,
-21.8535,
-20.5061,
-22.2474,
-22.1904,
-22.2203,
-20.1727,
-21.1068,
-20.1669,
-22.1272,
-21.3446,
-82.4717,
-83.035,
-81.8289,
-82.5957,
-81.7813,
-82.5011,
-82.6098,
-82.2885,
-81.606,
-99.1621,
-99.3146,
-99.1742,
-99.4728,
-100.2139,
-99.852,
-99.3575,
-99.4135,
-98.9092,
-99.8867,
-99.3707,
-99.0794,
-98.8376,
-99.3656,
-98.6474,
-99.3264,
-98.844,
-99.074,
-98.9354,
-99.1643,
-99.2412,
-68.7667,
-68.2528,
-66.7326,
-67.7113,
-69.2228,
-67.014,
-69.1456,
-67.3151,
-68.2625,
-67.6156,
-69.8112,
-68.9266,
-67.8286,
-69.3289,
-68.7017,
-67.2834,
-68.4665,
-68.0188,
-67.7083,
-69.7195,
-67.4078,
-67.9646,
-68.584,
-69.2387,
-69.7822,
-67.0701,
-67.8236,
-68.2468,
-68.6533,
-68.3218,
-67.5923,
-69.1266,
-68.4615,
-68.302,
-67.999,
-68.6709,
-68.9973,
-67.4147,
-68.4463,
-68.0899,
-67.665,
-69.6705,
-68.6433,
-68.4288,
-66.9027,
-67.3211,
-68.604,
-69.1299,
-67.5565,
-69.0845,
-67.4289,
-66.6864,
-67.6484,
-67.9783,
-67.7661,
-66.9797,
-67.8007,
-68.3194,
-69.3671,
-67.2708,
],
)
def test_onsite_density_matrix(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR.LinearResponseU.gz")
matrices = outcar.data["onsite_density_matrices"]
self.assertEqual(matrices[0][Spin.up][0][0], 1.0227)
self.assertEqual(len(matrices[0][Spin.up]), 5)
self.assertEqual(len(matrices[0][Spin.up][0]), 5)
self.assertTrue("onsite_density_matrices" in outcar.as_dict())
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR_merged_numbers")
matrices = outcar.data["onsite_density_matrices"]
self.assertEqual(matrices[0][Spin.up][0][-1], 0.0)
self.assertEqual(len(matrices[0][Spin.up]), 7)
self.assertEqual(len(matrices[0][Spin.up][0]), 7)
self.assertTrue("onsite_density_matrices" in outcar.as_dict())
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR_merged_numbers2")
self.assertTrue("onsite_density_matrices" in outcar.as_dict())
def test_nplwvs(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR")
self.assertEqual(outcar.data["nplwv"], [[34560]])
self.assertEqual(
outcar.data["nplwvs_at_kpoints"],
[
1719,
1714,
1722,
1728,
1722,
1726,
1722,
1720,
1717,
1724,
1715,
1724,
1726,
1724,
1728,
1715,
1722,
1715,
1726,
1730,
1730,
1715,
1716,
1729,
1727,
1723,
1721,
1712,
1723,
1719,
1717,
1717,
1724,
1719,
1719,
1727,
1726,
1730,
1719,
1720,
1718,
1717,
1722,
1719,
1709,
1714,
1724,
1726,
1718,
1713,
1720,
1713,
1711,
1713,
1715,
1717,
1728,
1726,
1712,
1722,
1714,
1713,
1717,
1714,
1714,
1717,
1712,
1710,
1721,
1722,
1724,
1720,
1726,
1719,
1722,
1714,
],
)
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR.CL")
self.assertEqual(outcar.data["nplwv"], [[None]])
self.assertEqual(outcar.data["nplwvs_at_kpoints"], [85687])
def test_vasp620_format(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.vasp.6.2.0"
outcar = Outcar(filepath)
self.assertEqual(outcar.run_stats["Average memory used (kb)"], None)
class BSVasprunTest(PymatgenTest):
_multiprocess_shared_ = True
def test_get_band_structure(self):
filepath = self.TEST_FILES_DIR / "vasprun_Si_bands.xml"
vasprun = BSVasprun(filepath, parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / "KPOINTS_Si_bands")
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm["kpoint_index"], [13], "wrong cbm kpoint index")
self.assertAlmostEqual(cbm["energy"], 6.2301, "wrong cbm energy")
self.assertEqual(cbm["band_index"], {Spin.up: [4], Spin.down: [4]}, "wrong cbm bands")
self.assertEqual(vbm["kpoint_index"], [0, 63, 64])
self.assertAlmostEqual(vbm["energy"], 5.6158, "wrong vbm energy")
self.assertEqual(
vbm["band_index"],
{Spin.up: [1, 2, 3], Spin.down: [1, 2, 3]},
"wrong vbm bands",
)
self.assertEqual(vbm["kpoint"].label, "\\Gamma", "wrong vbm label")
self.assertEqual(cbm["kpoint"].label, None, "wrong cbm label")
d = vasprun.as_dict()
self.assertIn("eigenvalues", d["output"])
class OszicarTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / "OSZICAR"
oszicar = Oszicar(filepath)
self.assertEqual(len(oszicar.electronic_steps), len(oszicar.ionic_steps))
self.assertEqual(len(oszicar.all_energies), 60)
self.assertAlmostEqual(oszicar.final_energy, -526.63928)
class LocpotTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / "LOCPOT"
locpot = Locpot.from_file(filepath)
self.assertAlmostEqual(-217.05226954, sum(locpot.get_average_along_axis(0)))
self.assertAlmostEqual(locpot.get_axis_grid(0)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(1)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(2)[-1], 2.87629, 2)
class ChgcarTest(PymatgenTest):
@classmethod
def setUpClass(cls):
filepath = cls.TEST_FILES_DIR / "CHGCAR.nospin"
cls.chgcar_no_spin = Chgcar.from_file(filepath)
filepath = cls.TEST_FILES_DIR / "CHGCAR.spin"
cls.chgcar_spin = Chgcar.from_file(filepath)
filepath = cls.TEST_FILES_DIR / "CHGCAR.Fe3O4"
cls.chgcar_fe3o4 = Chgcar.from_file(filepath)
filepath = cls.TEST_FILES_DIR / "CHGCAR.NiO_SOC.gz"
cls.chgcar_NiO_SOC = Chgcar.from_file(filepath)
def test_init(self):
self.assertAlmostEqual(self.chgcar_no_spin.get_integrated_diff(0, 2)[0, 1], 0)
self.assertAlmostEqual(self.chgcar_spin.get_integrated_diff(0, 1)[0, 1], -0.0043896932237534022)
# test sum
chgcar = self.chgcar_spin + self.chgcar_spin
self.assertAlmostEqual(chgcar.get_integrated_diff(0, 1)[0, 1], -0.0043896932237534022 * 2)
chgcar = self.chgcar_spin - self.chgcar_spin
self.assertAlmostEqual(chgcar.get_integrated_diff(0, 1)[0, 1], 0)
ans = [1.56472768, 3.25985108, 3.49205728, 3.66275028, 3.8045896, 5.10813352]
myans = self.chgcar_fe3o4.get_integrated_diff(0, 3, 6)
self.assertTrue(np.allclose(myans[:, 1], ans))
def test_write(self):
self.chgcar_spin.write_file("CHGCAR_pmg")
with open("CHGCAR_pmg") as f:
for i, line in enumerate(f):
if i == 22130:
self.assertEqual("augmentation occupancies 1 15\n", line)
if i == 44255:
self.assertEqual("augmentation occupancies 1 15\n", line)
os.remove("CHGCAR_pmg")
def test_soc_chgcar(self):
self.assertEqual(
set(self.chgcar_NiO_SOC.data.keys()),
{"total", "diff_x", "diff_y", "diff_z", "diff"},
)
self.assertTrue(self.chgcar_NiO_SOC.is_soc)
self.assertEqual(
self.chgcar_NiO_SOC.data["diff"].shape,
self.chgcar_NiO_SOC.data["diff_y"].shape,
)
# check our construction of chg.data['diff'] makes sense
# this has been checked visually too and seems reasonable
self.assertEqual(
abs(self.chgcar_NiO_SOC.data["diff"][0][0][0]),
np.linalg.norm(
[
self.chgcar_NiO_SOC.data["diff_x"][0][0][0],
self.chgcar_NiO_SOC.data["diff_y"][0][0][0],
self.chgcar_NiO_SOC.data["diff_z"][0][0][0],
]
),
)
# and that the net magnetization is about zero
# note: we get ~ 0.08 here, seems a little high compared to
# vasp output, but might be due to chgcar limitations?
self.assertAlmostEqual(self.chgcar_NiO_SOC.net_magnetization, 0.0, places=0)
self.chgcar_NiO_SOC.write_file("CHGCAR_pmg_soc")
chg_from_file = Chgcar.from_file("CHGCAR_pmg_soc")
self.assertTrue(chg_from_file.is_soc)
os.remove("CHGCAR_pmg_soc")
def test_hdf5(self):
chgcar = Chgcar.from_file(self.TEST_FILES_DIR / "CHGCAR.NiO_SOC.gz")
chgcar.to_hdf5("chgcar_test.hdf5")
import h5py
with h5py.File("chgcar_test.hdf5", "r") as f:
self.assertArrayAlmostEqual(np.array(f["vdata"]["total"]), chgcar.data["total"])
self.assertArrayAlmostEqual(np.array(f["vdata"]["diff"]), chgcar.data["diff"])
self.assertArrayAlmostEqual(np.array(f["lattice"]), chgcar.structure.lattice.matrix)
self.assertArrayAlmostEqual(np.array(f["fcoords"]), chgcar.structure.frac_coords)
for z in f["Z"]:
self.assertIn(z, [Element.Ni.Z, Element.O.Z])
for sp in f["species"]:
self.assertIn(sp, ["Ni", "O"])
chgcar2 = Chgcar.from_hdf5("chgcar_test.hdf5")
self.assertArrayAlmostEqual(chgcar2.data["total"], chgcar.data["total"])
os.remove("chgcar_test.hdf5")
def test_spin_data(self):
d = self.chgcar_spin.spin_data
for k, v in d.items():
self.assertEqual(v.shape, (48, 48, 48))
def test_add(self):
chgcar_sum = self.chgcar_spin + self.chgcar_spin
self.assertArrayAlmostEqual(chgcar_sum.data["total"], self.chgcar_spin.data["total"] * 2)
chgcar_copy = self.chgcar_spin.copy()
chgcar_copy.structure = self.get_structure("Li2O")
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
chgcar_sum = chgcar_copy + self.chgcar_spin
# Verify some things
assert len(w) == 1
assert "Structures are different. Make sure you know what you are doing..." in str(w[-1].message)
self.assertRaises(ValueError, self.chgcar_spin.__add__, self.chgcar_fe3o4)
self.assertRaises(ValueError, self.chgcar_spin.__add__, self.chgcar_no_spin)
def test_as_dict_and_from_dict(self):
d = self.chgcar_NiO_SOC.as_dict()
chgcar_from_dict = Chgcar.from_dict(d)
self.assertArrayAlmostEqual(self.chgcar_NiO_SOC.data["total"], chgcar_from_dict.data["total"])
self.assertArrayAlmostEqual(
self.chgcar_NiO_SOC.structure.lattice.matrix,
chgcar_from_dict.structure.lattice.matrix,
)
class ElfcarTest(PymatgenTest):
def test_init(self):
elfcar = Elfcar.from_file(self.TEST_FILES_DIR / "ELFCAR.gz")
self.assertAlmostEqual(0.19076207645194002, np.mean(elfcar.data["total"]))
self.assertAlmostEqual(0.19076046677910055, np.mean(elfcar.data["diff"]))
reconstituted = Elfcar.from_dict(elfcar.as_dict())
self.assertEqual(elfcar.data, reconstituted.data)
self.assertEqual(elfcar.poscar.structure, reconstituted.poscar.structure)
def test_alpha(self):
elfcar = Elfcar.from_file(self.TEST_FILES_DIR / "ELFCAR.gz")
alpha = elfcar.get_alpha()
self.assertAlmostEqual(2.936678808979031, np.median(alpha.data["total"]))
def test_interpolation(self):
elfcar = Elfcar.from_file(self.TEST_FILES_DIR / "ELFCAR.gz")
self.assertAlmostEqual(0.0918471, elfcar.value_at(0.4, 0.5, 0.6))
self.assertEqual(100, len(elfcar.linear_slice([0.0, 0.0, 0.0], [1.0, 1.0, 1.0])))
class ProcarTest(PymatgenTest):
_multiprocess_shared_ = True
def test_init(self):
filepath = self.TEST_FILES_DIR / "PROCAR.simple"
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, "d")[Spin.up], 0)
self.assertAlmostEqual(p.get_occupation(0, "s")[Spin.up], 0.35381249999999997)
self.assertAlmostEqual(p.get_occupation(0, "p")[Spin.up], 1.19540625)
self.assertRaises(ValueError, p.get_occupation, 1, "m")
self.assertEqual(p.nbands, 10)
self.assertEqual(p.nkpoints, 10)
self.assertEqual(p.nions, 3)
lat = Lattice.cubic(3.0)
s = Structure(
lat,
["Li", "Na", "K"],
[[0.0, 0.0, 0.0], [0.25, 0.25, 0.25], [0.75, 0.75, 0.75]],
)
d = p.get_projection_on_elements(s)
self.assertAlmostEqual(d[Spin.up][2][2], {"Na": 0.042, "K": 0.646, "Li": 0.042})
filepath = self.TEST_FILES_DIR / "PROCAR"
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, "dxy")[Spin.up], 0.96214813853000025)
self.assertAlmostEqual(p.get_occupation(0, "dxy")[Spin.down], 0.85796295426000124)
def test_phase_factors(self):
filepath = self.TEST_FILES_DIR / "PROCAR.phase"
p = Procar(filepath)
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0], -0.746 + 0.099j)
self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 0, 0], 0.372 - 0.654j)
# Two Li should have same phase factor.
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0], p.phase_factors[Spin.up][0, 0, 1, 0])
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 2, 0], -0.053 + 0.007j)
self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 2, 0], 0.027 - 0.047j)
# new style phase factors (VASP 5.4.4+)
filepath = self.TEST_FILES_DIR / "PROCAR.new_format_5.4.4"
p = Procar(filepath)
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0], -0.13 + 0.199j)
class XdatcarTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / "XDATCAR_4"
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
filepath = self.TEST_FILES_DIR / "XDATCAR_5"
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
x.concatenate(self.TEST_FILES_DIR / "XDATCAR_4")
self.assertEqual(len(x.structures), 8)
self.assertIsNotNone(x.get_string())
filepath = self.TEST_FILES_DIR / "XDATCAR_6"
x = Xdatcar(filepath)
structures = x.structures
self.assertNotEqual(structures[0].lattice, structures[-1].lattice)
class DynmatTest(PymatgenTest):
def test_init(self):
# nosetests pymatgen/io/vasp/tests/test_outputs.py:DynmatTest.test_init
filepath = self.TEST_FILES_DIR / "DYNMAT"
d = Dynmat(filepath)
self.assertEqual(d.nspecs, 2)
self.assertEqual(d.natoms, 6)
self.assertEqual(d.ndisps, 3)
self.assertTrue(np.allclose(d.masses, [63.546, 196.966]))
self.assertTrue(4 in d.data)
self.assertTrue(2 in d.data[4])
self.assertTrue(np.allclose(d.data[4][2]["dispvec"], [0.0, 0.05, 0.0]))
self.assertTrue(np.allclose(d.data[4][2]["dynmat"][3], [0.055046, -0.298080, 0.0]))
# TODO: test get_phonon_frequencies once cross-checked
class WavecarTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
a = np.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]])
self.vol = np.dot(a[0, :], np.cross(a[1, :], a[2, :]))
b = np.array(
[
np.cross(a[1, :], a[2, :]),
np.cross(a[2, :], a[0, :]),
np.cross(a[0, :], a[1, :]),
]
)
self.b = 2 * np.pi * b / self.vol
self.a = a
self.w = Wavecar(self.TEST_FILES_DIR / "WAVECAR.N2")
self.wH2 = Wavecar(self.TEST_FILES_DIR / "WAVECAR.H2_low_symm")
self.wH2_gamma = Wavecar(self.TEST_FILES_DIR / "WAVECAR.H2_low_symm.gamma")
self.w_ncl = Wavecar(self.TEST_FILES_DIR / "WAVECAR.H2.ncl")
def test_standard(self):
w = self.w
a = np.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]])
vol = np.dot(a[0, :], np.cross(a[1, :], a[2, :]))
b = np.array(
[
np.cross(a[1, :], a[2, :]),
np.cross(a[2, :], a[0, :]),
np.cross(a[0, :], a[1, :]),
]
)
b = 2 * np.pi * b / vol
self.assertEqual(w.filename, self.TEST_FILES_DIR / "WAVECAR.N2")
self.assertAlmostEqual(w.efermi, -5.7232, places=4)
self.assertEqual(w.encut, 25)
self.assertEqual(w.nb, 9)
self.assertEqual(w.nk, 1)
self.assertTrue(np.allclose(w.a, a))
self.assertTrue(np.allclose(w.b, b))
self.assertAlmostEqual(w.vol, vol)
self.assertEqual(len(w.kpoints), w.nk)
self.assertEqual(len(w.coeffs), w.nk)
self.assertEqual(len(w.coeffs[0]), w.nb)
self.assertEqual(len(w.band_energy), w.nk)
self.assertEqual(w.band_energy[0].shape, (w.nb, 3))
self.assertLessEqual(len(w.Gpoints[0]), 257)
for k in range(w.nk):
for b in range(w.nb):
self.assertEqual(len(w.coeffs[k][b]), len(w.Gpoints[k]))
with self.assertRaises(ValueError):
Wavecar(self.TEST_FILES_DIR / "WAVECAR.N2.malformed")
with self.assertRaises(ValueError):
Wavecar(self.TEST_FILES_DIR / "WAVECAR.N2", vasp_type="poop")
with self.assertRaises(ValueError):
Wavecar(self.TEST_FILES_DIR / "WAVECAR.N2", vasp_type="g")
with self.assertRaises(ValueError):
Wavecar(self.TEST_FILES_DIR / "WAVECAR.N2", vasp_type="n")
import sys
from io import StringIO
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
Wavecar(self.TEST_FILES_DIR / "WAVECAR.N2", verbose=True)
self.assertNotEqual(out.getvalue().strip(), "")
finally:
sys.stdout = saved_stdout
def test_n2_45210(self):
w = Wavecar(self.TEST_FILES_DIR / "WAVECAR.N2.45210")
self.assertEqual(w.filename, self.TEST_FILES_DIR / "WAVECAR.N2.45210")
self.assertAlmostEqual(w.efermi, -5.7232, places=4)
self.assertEqual(w.encut, 25)
self.assertEqual(w.nb, 9)
self.assertEqual(w.nk, 1)
self.assertTrue(np.allclose(w.a, self.a))
self.assertTrue(np.allclose(w.b, self.b))
self.assertAlmostEqual(w.vol, self.vol)
self.assertEqual(len(w.kpoints), w.nk)
self.assertEqual(len(w.coeffs), w.nk)
self.assertEqual(len(w.coeffs[0]), w.nb)
self.assertEqual(len(w.band_energy), w.nk)
self.assertEqual(w.band_energy[0].shape, (w.nb, 3))
self.assertLessEqual(len(w.Gpoints[0]), 257)
def test_n2_spin(self):
w = Wavecar(self.TEST_FILES_DIR / "WAVECAR.N2.spin")
self.assertEqual(len(w.coeffs), 2)
self.assertEqual(len(w.band_energy), 2)
self.assertEqual(len(w.kpoints), w.nk)
self.assertEqual(len(w.Gpoints), w.nk)
self.assertEqual(len(w.coeffs[0][0]), w.nb)
self.assertEqual(len(w.band_energy[0]), w.nk)
temp_ggp = Wavecar._generate_G_points
try:
Wavecar._generate_G_points = lambda x, y, gamma: []
with self.assertRaises(ValueError):
Wavecar(self.TEST_FILES_DIR / "WAVECAR.N2")
finally:
Wavecar._generate_G_points = temp_ggp
def test__generate_nbmax(self):
self.w._generate_nbmax()
self.assertEqual(self.w._nbmax.tolist(), [5, 5, 5])
def test__generate_G_points(self):
for k in range(self.w.nk):
kp = self.w.kpoints[k]
self.assertLessEqual(len(self.w._generate_G_points(kp)), 257)
def test_evaluate_wavefunc(self):
self.w.Gpoints.append(np.array([0, 0, 0]))
self.w.kpoints.append(np.array([0, 0, 0]))
self.w.coeffs.append([[1 + 1j]])
self.assertAlmostEqual(
self.w.evaluate_wavefunc(-1, -1, [0, 0, 0]),
(1 + 1j) / np.sqrt(self.vol),
places=4,
)
self.assertAlmostEqual(
self.w.evaluate_wavefunc(0, 0, [0, 0, 0]),
np.sum(self.w.coeffs[0][0]) / np.sqrt(self.vol),
places=4,
)
w = Wavecar(self.TEST_FILES_DIR / "WAVECAR.N2.spin")
w.Gpoints.append(np.array([0, 0, 0]))
w.kpoints.append(np.array([0, 0, 0]))
w.coeffs[0].append([[1 + 1j]])
self.assertAlmostEqual(
w.evaluate_wavefunc(-1, -1, [0, 0, 0]),
(1 + 1j) / np.sqrt(self.vol),
places=4,
)
def test_fft_mesh_basic(self):
mesh = self.w.fft_mesh(0, 5)
ind = np.argmax(np.abs(mesh))
self.assertEqual(np.unravel_index(ind, mesh.shape), (14, 1, 1))
self.assertEqual(mesh[tuple((self.w.ng / 2).astype(np.int_))], 0j)
mesh = self.w.fft_mesh(0, 5, shift=False)
ind = np.argmax(np.abs(mesh))
self.assertEqual(np.unravel_index(ind, mesh.shape), (6, 8, 8))
self.assertEqual(mesh[0, 0, 0], 0j)
def test_fft_mesh_advanced(self):
ik = 0
ib = 0
mesh = self.wH2.fft_mesh(ik, ib)
mesh_gamma = self.wH2_gamma.fft_mesh(ik, ib)
mesh_ncl = self.w_ncl.fft_mesh(ik, ib)
# check equality of plane-wave coefficients
ind_max = np.unravel_index(np.argmax(np.abs(mesh)), mesh.shape)
phase = mesh[ind_max] / mesh_gamma[ind_max]
self.assertLessEqual(np.max(np.abs(mesh - phase * mesh_gamma)), 1.0e-6)
# transform to real space for further checking
mesh = np.fft.ifftn(mesh)
mesh_gamma = np.fft.ifftn(mesh_gamma)
mesh_ncl = np.fft.ifftn(mesh_ncl)
# check equality in real space for regular vs. gamma only
ind_max = np.unravel_index(np.argmax(np.abs(mesh)), mesh.shape)
phase = mesh[ind_max] / mesh_gamma[ind_max]
self.assertLessEqual(np.max(np.abs(mesh - phase * mesh_gamma)), 1.0e-6)
# spot check some points in real space
p1 = (
int(mesh.shape[0] / 2),
int(mesh.shape[1] / 2) - 1,
int(mesh.shape[2] / 2) - 2,
)
p2 = (p1[0] + 1, p1[1], p1[2])
c = np.array([[5, 0, 0], [0, 4, 0], [0, 0, 6]]) # this needs to match POSCAR, which we don't have
r1 = np.dot(np.array(p1) / mesh.shape, c)
r2 = np.dot(np.array(p2) / mesh.shape, c)
# check equality of FFT and slow FT for regular mesh (ratio, to account for normalization)
v1 = self.wH2.evaluate_wavefunc(ik, ib, r1)
v2 = self.wH2.evaluate_wavefunc(ik, ib, r2)
self.assertAlmostEqual(np.abs(mesh[p1]) / np.abs(mesh[p2]), np.abs(v1) / np.abs(v2), places=6)
# spot check one value that we happen to know from reference run
self.assertAlmostEqual(v1, -0.01947068011502887 + 0.23340228099620275j, places=8)
# check equality of FFT and slow FT for gamma-only mesh (ratio again)
v1_gamma = self.wH2_gamma.evaluate_wavefunc(ik, ib, r1)
v2_gamma = self.wH2_gamma.evaluate_wavefunc(ik, ib, r2)
self.assertAlmostEqual(
np.abs(mesh_gamma[p1]) / np.abs(mesh_gamma[p2]),
np.abs(v1_gamma) / np.abs(v2_gamma),
places=6,
)
# check equality of FFT and slow FT for ncl mesh (ratio again)
v1_ncl = self.w_ncl.evaluate_wavefunc(ik, ib, r1)
v2_ncl = self.w_ncl.evaluate_wavefunc(ik, ib, r2)
self.assertAlmostEqual(
np.abs(mesh_ncl[p1]) / np.abs(mesh_ncl[p2]),
np.abs(v1_ncl) / np.abs(v2_ncl),
places=6,
)
def test_get_parchg(self):
poscar = Poscar.from_file(self.TEST_FILES_DIR / "POSCAR")
w = self.w
c = w.get_parchg(poscar, 0, 0, spin=0, phase=False)
self.assertTrue("total" in c.data)
self.assertTrue("diff" not in c.data)
self.assertEqual(np.prod(c.data["total"].shape), np.prod(w.ng * 2))
self.assertTrue(np.all(c.data["total"] > 0.0))
c = w.get_parchg(poscar, 0, 0, spin=0, phase=True)
self.assertTrue("total" in c.data)
self.assertTrue("diff" not in c.data)
self.assertEqual(np.prod(c.data["total"].shape), np.prod(w.ng * 2))
self.assertFalse(np.all(c.data["total"] > 0.0))
w = Wavecar(self.TEST_FILES_DIR / "WAVECAR.N2.spin")
c = w.get_parchg(poscar, 0, 0, phase=False, scale=1)
self.assertTrue("total" in c.data)
self.assertTrue("diff" in c.data)
self.assertEqual(np.prod(c.data["total"].shape), np.prod(w.ng))
self.assertTrue(np.all(c.data["total"] > 0.0))
self.assertFalse(np.all(c.data["diff"] > 0.0))
c = w.get_parchg(poscar, 0, 0, spin=0, phase=False)
self.assertTrue("total" in c.data)
self.assertTrue("diff" not in c.data)
self.assertEqual(np.prod(c.data["total"].shape), np.prod(w.ng * 2))
self.assertTrue(np.all(c.data["total"] > 0.0))
c = w.get_parchg(poscar, 0, 0, spin=0, phase=True)
self.assertTrue("total" in c.data)
self.assertTrue("diff" not in c.data)
self.assertEqual(np.prod(c.data["total"].shape), np.prod(w.ng * 2))
self.assertFalse(np.all(c.data["total"] > 0.0))
w = self.w_ncl
w.coeffs.append([np.ones((2, 100))])
c = w.get_parchg(poscar, -1, 0, phase=False, spinor=None)
self.assertTrue("total" in c.data)
self.assertTrue("diff" not in c.data)
self.assertEqual(np.prod(c.data["total"].shape), np.prod(w.ng * 2))
self.assertFalse(np.all(c.data["total"] > 0.0))
c = w.get_parchg(poscar, -1, 0, phase=True, spinor=0)
self.assertTrue("total" in c.data)
self.assertTrue("diff" not in c.data)
self.assertEqual(np.prod(c.data["total"].shape), np.prod(w.ng * 2))
self.assertFalse(np.all(c.data["total"] > 0.0))
w.coeffs[-1] = [np.zeros((2, 100))]
c = w.get_parchg(poscar, -1, 0, phase=False, spinor=1)
self.assertTrue("total" in c.data)
self.assertTrue("diff" not in c.data)
self.assertEqual(np.prod(c.data["total"].shape), np.prod(w.ng * 2))
self.assertTrue(np.allclose(c.data["total"], 0.0))
def test_write_unks(self):
unk_std = Unk.from_file(self.TEST_FILES_DIR / "UNK.N2.std")
unk_ncl = Unk.from_file(self.TEST_FILES_DIR / "UNK.H2.ncl")
with self.assertRaises(ValueError):
self.w.write_unks(self.TEST_FILES_DIR / "UNK.N2.std")
# different grids
with ScratchDir("."):
self.w.write_unks("./unk_dir")
self.assertEqual(len(list(Path("./unk_dir").glob("UNK*"))), 1)
unk = Unk.from_file("./unk_dir/UNK00001.1")
self.assertNotEqual(unk, unk_std)
# correct grid
self.w.ng = np.array([12, 12, 12])
with ScratchDir("."):
self.w.write_unks(".")
unk = Unk.from_file("UNK00001.1")
self.assertEqual(unk, unk_std)
# ncl test
with ScratchDir("."):
self.w_ncl.write_unks(".")
unk = Unk.from_file("UNK00001.NC")
self.assertEqual(unk, unk_ncl)
class EigenvalTest(PymatgenTest):
_multiprocess_shared_ = True
def test_init(self):
eig = Eigenval(self.TEST_FILES_DIR / "EIGENVAL.gz")
self.assertEqual(eig.ispin, 1)
self.assertEqual(eig.nkpt, len(eig.kpoints))
self.assertEqual(eig.nkpt, len(eig.kpoints_weights))
self.assertEqual(eig.nkpt, eig.eigenvalues[Spin.up].shape[0])
self.assertEqual(eig.nelect, 16)
self.assertEqual(eig.nbands, eig.eigenvalues[Spin.up].shape[1])
self.assertTrue(np.max(eig.eigenvalues[Spin.up]) > 0)
self.assertTrue(np.min(eig.eigenvalues[Spin.up]) < 0)
def test_ispin2(self):
eig = Eigenval(self.TEST_FILES_DIR / "EIGENVAL.ispin2.gz")
self.assertEqual(eig.ispin, 2)
self.assertEqual(eig.nkpt, eig.eigenvalues[Spin.up].shape[0])
self.assertEqual(eig.nbands, eig.eigenvalues[Spin.up].shape[1])
self.assertEqual(eig.nkpt, eig.eigenvalues[Spin.down].shape[0])
self.assertEqual(eig.nbands, eig.eigenvalues[Spin.down].shape[1])
def test_eigenvalue_band_properties(self):
eig = Eigenval(self.TEST_FILES_DIR / "EIGENVAL.gz")
props = eig.eigenvalue_band_properties
self.assertAlmostEqual(props[0], 6.4153, places=4)
self.assertAlmostEqual(props[1], 7.5587, places=4)
self.assertAlmostEqual(props[2], 1.1434, places=4)
self.assertEqual(props[3], False)
class WavederTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
wder = Waveder(self.TEST_FILES_DIR / "WAVEDER", gamma_only=True)
self.assertEqual(wder.nbands, 36)
self.assertEqual(wder.nkpoints, 56)
self.assertEqual(wder.nelect, 8)
band_i = 0
band_j = 0
kp_index = 0
spin_index = 0
cart_dir_index = 0
cder = wder.get_orbital_derivative_between_states(band_i, band_j, kp_index, spin_index, cart_dir_index)
self.assertAlmostEqual(cder, -1.33639226092e-103, places=114)
def test_consistency(self):
wder = Waveder(self.TEST_FILES_DIR / "WAVEDER.Si")
wderf = np.loadtxt(self.TEST_FILES_DIR / "WAVEDERF.Si", skiprows=1)
with open(self.TEST_FILES_DIR / "WAVEDERF.Si", "r") as f:
first_line = [int(a) for a in f.readline().split()]
self.assertEqual(wder.nkpoints, first_line[1])
self.assertEqual(wder.nbands, first_line[2])
for i in range(10):
self.assertAlmostEqual(
first=wder.get_orbital_derivative_between_states(0, i, 0, 0, 0).real,
second=wderf[i, 6],
places=10,
)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 0].real, wderf[i, 6], places=10)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 0].imag, wderf[i, 7], places=10)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 1].real, wderf[i, 8], places=10)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 1].imag, wderf[i, 9], places=10)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 2].real, wderf[i, 10], places=10)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 2].imag, wderf[i, 11], places=10)
if __name__ == "__main__":
unittest.main()
|
richardtran415/pymatgen
|
pymatgen/io/vasp/tests/test_outputs.py
|
Python
|
mit
| 87,130
|
[
"VASP",
"Wannier90",
"pymatgen"
] |
ac23ee560be84b7b750fb5083ff7312ec22f4399dc447433efb4e8792aef3de1
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Patched by Brad Chapman.
# Chris Wroe added modifications for work in myGrid
"""
This module provides code to work with the WWW version of BLAST
provided by the NCBI.
http://www.ncbi.nlm.nih.gov/BLAST/
Classes:
BlastParser Parses output from WWW blast.
_Scanner Scans output from NCBI's BLAST WWW server.
Functions:
qblast Do a BLAST search using the QBLAST API.
"""
import re
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from Bio.ParserSupport import *
class BlastParser(AbstractParser):
"""Parses WWW BLAST data into a Record.Blast object.
"""
def __init__(self):
"""__init__(self)"""
import NCBIStandalone
self._scanner = _Scanner()
self._consumer = SGMLStrippingConsumer(NCBIStandalone._BlastConsumer())
def parse(self, handle):
"""parse(self, handle)"""
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class _Scanner:
"""Scan BLAST output from NCBI's web server at:
http://www.ncbi.nlm.nih.gov/BLAST/
Tested with BLAST v2.0.10
Methods:
feed Feed data into the scanner.
"""
def feed(self, handle, consumer):
"""S.feed(handle, consumer)
Feed in a BLAST report for scanning. handle is a file-like
object that contains the BLAST report. consumer is a Consumer
object that will receive events as the report is scanned.
"""
from Bio import File
# This stuff appears in 2.0.12.
# <p><!--
# QBlastInfoBegin
# Status=READY
# QBlastInfoEnd
# --><p>
# <HTML>
# <HEAD>
# <TITLE>BLAST Search Results </TITLE>
# </HEAD>
# <BODY BGCOLOR="#FFFFFF" LINK="#0000FF" VLINK="#660099" ALINK="#660099
# <A HREF="http://www.ncbi.nlm.nih.gov/BLAST/blast_form.map"> <IMG SRC=
# <BR><BR><PRE>
# BLAST Formatted information
#
# </BODY>
# </HTML>
# </BODY>
# </HTML>
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
# Read HTML formatting up to the "BLAST" version line.
read_and_call_until(uhandle, consumer.noevent,
has_re=re.compile(r'<b>.?BLAST'))
self._scan_header(uhandle, consumer)
self._scan_rounds(uhandle, consumer)
self._scan_database_report(uhandle, consumer)
self._scan_parameters(uhandle, consumer)
# Read HTML footer information.
while uhandle.peekline():
read_and_call(uhandle, consumer.noevent)
def _scan_header(self, uhandle, consumer):
# <b>BLASTP 2.0.10 [Aug-26-1999]</b>
#
#
# <b><a href="http://www.ncbi.nlm.nih.gov/htbin-
# post/Entrez/query?uid=9254694&form=6&db=m&Dopt=r">Reference</a>:</b>
# Altschul, Stephen F., Thomas L. Madden, Alejandro A. Schäffer,
# Jinghui Zhang, Zheng Zhang, Webb Miller, and David J. Lipman (1997),
# "Gapped BLAST and PSI-BLAST: a new generation of protein database sea
# programs", Nucleic Acids Res. 25:3389-3402.
# <p>
# <b>Query=</b> gi|120291|sp|P21297|FLBT_CAUCR FLBT PROTEIN.
# (141 letters)
#
# <b>Database:</b> Non-redundant SwissProt sequences
# 82,258 sequences; 29,652,561 total letters
#
# <p> <p>If you have any problems or questions with the results of this
# If there are hits, and Graphical Overview was selected:
# <FORM NAME="BLASTFORM">
# </PRE>
# <CENTER>
# <H3><a href="/BLAST/newoptions.html#graphical-overview"> Distribution
# <input name=defline size=80 value="Mouse-over to show defline and sco
# </CENTER>
# <map name=img_map>
# <area shape=rect coords=69,101,476,106 href="#120291" ONMOUSEOVER='do
# <area shape=rect coords=156,108,305,113 href="#3024946" ONMOUSEOVER='
# </map>
# <CENTER>
# <IMG WIDTH=529 HEIGHT=115 USEMAP=#img_map BORDER=1 SRC="nph-getgif.cg
# <HR>
# <PRE> XXX
consumer.start_header()
# Read the "BLAST" version line and the following blanks.
read_and_call(uhandle, consumer.version, contains='BLAST')
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Read the reference lines and the '<p>' line.
# TBLASTN 2.2.6 has a blank line instead of a "<p>".
while 1:
line = uhandle.readline()
if line[:3] == '<p>' or not line.strip():
consumer.noevent(line)
break
consumer.reference(line)
# Read the RID line, for version 2.0.12 (2.0.11?) and above.
attempt_read_and_call(uhandle, consumer.noevent, start='RID')
# Brad Chapman noticed a '<p>' line in BLASTN 2.1.1; this line
# seems to have disappeared again.
# attempt_read_and_call(uhandle, consumer.noevent, start='<p>')
attempt_read_and_call(uhandle, consumer.noevent)
# Apparently, there's some discrepancy between whether the
# Query or database comes first. Usually the Query does, but
# Brad noticed a case where the database came first.
if uhandle.peekline().find("Query=") >= 0:
self._scan_query_info(uhandle, consumer)
self._scan_database_info(uhandle, consumer)
else:
self._scan_database_info(uhandle, consumer)
self._scan_query_info(uhandle, consumer)
read_and_call_while(uhandle, consumer.noevent, blank=1)
consumer.end_header()
def _scan_blastform(self, uhandle, consumer):
if attempt_read_and_call(uhandle, consumer.noevent,
contains="BLASTFORM"):
while 1:
line = uhandle.peekline()
if is_blank_line(line):
break
elif "Query=" in line:
break
consumer.noevent(uhandle.readline())
def _scan_database_info(self, uhandle, consumer):
attempt_read_and_call(uhandle, consumer.noevent, start='<p>')
read_and_call(uhandle, consumer.database_info, contains='Database')
# Sagar Damle reported that databases can consist of multiple lines.
# But, trickily enough, sometimes the second line can also have the
# word sequences in it. Try to use 'sequences;' (with a semicolon)
read_and_call_until(uhandle, consumer.database_info,
contains='sequences;')
read_and_call(uhandle, consumer.database_info, contains='sequences;')
read_and_call(uhandle, consumer.noevent, blank=1)
attempt_read_and_call(uhandle, consumer.noevent,
contains='problems or questions')
self._scan_blastform(uhandle, consumer)
attempt_read_and_call(uhandle, consumer.noevent, blank=1)
if attempt_read_and_call(uhandle, consumer.noevent,
start="<table border=0 width=600"):
read_and_call_until(uhandle, consumer.noevent,
contains="</table>")
consumer.noevent(uhandle.readline())
read_and_call(uhandle, consumer.noevent, blank=1)
attempt_read_and_call(uhandle, consumer.noevent, start="<p>")
if attempt_read_and_call(uhandle, consumer.noevent,
contains="Taxonomy reports"):
read_and_call(uhandle, consumer.noevent, start="<BR>")
attempt_read_and_call(uhandle, consumer.noevent, start="<PRE>")
# </PRE>
# <!-- Progress msg from the server 500 7-->
# <!-- Progress msg from the server 1000 15-->
# <!-- Progress msg from the server 1500 21-->
# ...
# <PRE><HR><BR><b>Query=</b> test
# (60 letters)
if attempt_read_and_call(uhandle, consumer.noevent, start="</PRE>"):
read_and_call_until(uhandle, consumer.noevent, start="<PRE>")
while 1:
line = uhandle.peekline()
if not line[:5] == "<PRE>" or line.find("Query=") >= 0:
break
read_and_call(uhandle, consumer.noevent, start="<PRE>")
read_and_call_while(uhandle, consumer.noevent, blank=1)
def _scan_query_info(self, uhandle, consumer):
# Read the Query lines and the following blank line.
read_and_call(uhandle, consumer.query_info, contains='Query=')
read_and_call_until(uhandle, consumer.query_info, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
if attempt_read_and_call(uhandle, consumer.noevent, start="<PRE>"):
read_and_call_while(uhandle, consumer.noevent, blank=1)
self._scan_blastform(uhandle, consumer)
def _scan_rounds(self, uhandle, consumer):
self._scan_descriptions(uhandle, consumer)
self._scan_alignments(uhandle, consumer)
def _scan_descriptions(self, uhandle, consumer):
consumer.start_descriptions()
# Three things can happen here:
# 1. line contains 'Score E'
# 2. line contains "No significant similarity"
# 3. no descriptions
if not attempt_read_and_call(
uhandle, consumer.description_header,
has_re=re.compile(r"Score {4,5}E")):
# Either case 2 or 3. Look for "No hits found".
attempt_read_and_call(uhandle, consumer.no_hits,
contains='No significant similarity')
read_and_call_while(uhandle, consumer.noevent, blank=1)
consumer.end_descriptions()
# Stop processing.
return
# Sequences producing significant alignments:
#
# <a href="http://www.ncbi.nlm.nih.gov:80/entrez/query.fcgi?cmd=Ret
# <a href="http://www.ncbi.nlm.nih.gov:80/entrez/query.fcgi?cmd=Ret
#
# Read the score header lines and a blank line.
read_and_call(uhandle, consumer.description_header,
start='Sequences producing')
read_and_call(uhandle, consumer.noevent, blank=1)
# Read the descriptions
# The description contains at least an <a href> into the alignments.
# What is no alignments are chosen?
read_and_call_while(uhandle, consumer.description,
blank=0, contains='<a')
# two choices here, either blank lines or a </PRE>
if not attempt_read_and_call(uhandle, consumer.noevent,
contains='</PRE>'):
read_and_call_while(uhandle, consumer.noevent, blank=1)
consumer.end_descriptions()
def _scan_alignments(self, uhandle, consumer):
# Check to see whether I'm at an alignment or database report.
# Possibilities:
# 1) BLASTP 2.0.14, pairwise alignment
# <CENTER><b><FONT color="green">Alignments</FONT></b></CENTER>
# <PRE>
# ><a name = 121837></a><a href="http://www.ncbi.nlm.nih.gov:80/entre
# 2) BLASTP 2.0.10, pairwise alignment
# <PRE>
# <a name = 120291> </a><a href="http://www.ncbi.nlm.nih.gov:80/entre
# 3) BLASTP 2.0.10, master-slave
# <PRE>
# blast_tmp 1 MFQQIGAVQAKSGTDEPAHPCEKFPPERKCEAVFWKPLPRHEAREILLAARK
# 4) BLASTP 2.0.10, 2.0.14, database
# <PRE>
# Database: Non-redundant SwissProt sequences
# 5) BLASTX 2.2.4, pairwise alignment
# <CENTER><b><FONT color="green">Alignments</FONT></b></CENTER>
# </form>
# <script src="blastResult.js"></script><table border="0"><tr><td><FO
# <PRE>
# 6) Qblast 2.2.10, database (no 'Database' line)
# <PRE>
# Lambda K H
# Get the first two lines and examine them.
line1 = safe_readline(uhandle)
line2 = safe_readline(uhandle)
uhandle.saveline(line2)
uhandle.saveline(line1)
is_pairwise = is_masterslave = 0
if 'Alignments' in line2:
is_pairwise = 1
elif line2.startswith(' Database'):
pass
elif line2.startswith('Lambda K H'):
pass
elif line2.startswith('blast_tmp'):
is_masterslave = 1
elif line1.startswith('<PRE>'):
is_pairwise = 1
else:
raise ValueError, "Cannot resolve location at lines:\n%s\n%s" % (line1, line2)
if is_pairwise:
self._scan_pairwise_alignments(uhandle, consumer)
elif is_masterslave:
self._scan_masterslave_alignment(uhandle, consumer)
def _scan_pairwise_alignments(self, uhandle, consumer):
while 1:
read_and_call_until(uhandle, consumer.noevent, start='<PRE>')
# The first line is <PRE>. Check the second line to see if
# I'm still at an alignment.
line1 = safe_readline(uhandle)
line2 = safe_readline(uhandle)
uhandle.saveline(line2)
uhandle.saveline(line1)
# Lambda is for Q-blast results, which do not have a Database line
if line1.find('Database') >= 0 or line2.find("Database") >= 0 \
or line2.find('Lambda K H') >= 0:
break
# Occasionally, there's a bug where the alignment_header and
# hsp_header are skipped, leaving only the hsp_alignment.
# Detect this and handle it accordingly.
if line2[:6] == 'Query:':
self._scan_abbreviated_pairwise_alignment(uhandle, consumer)
else:
self._scan_one_pairwise_alignment(uhandle, consumer)
def _scan_abbreviated_pairwise_alignment(self, uhandle, consumer):
# Sometimes all header information is skipped, leaving
# only the raw alignments. I believe this is a bug because
# without the header information, you lose vital information such
# as score, target sequence id, etc.
# Format:
# <PRE>
# hsp_alignment
consumer.start_alignment()
consumer.start_hsp()
read_and_call(uhandle, consumer.noevent, start='<PRE>')
self._scan_hsp_alignment(uhandle, consumer)
consumer.end_hsp()
consumer.end_alignment()
def _scan_one_pairwise_alignment(self, uhandle, consumer):
# Alignment format:
# <CENTER><b><FONT color="green">Alignments</FONT></b></CENTER>
# (BLAST 2.0.14)
# <PRE>
# alignment_header
# hsp_header
# hsp_alignment
# [...]
# The hsp_header and hsp_alignment blocks can be repeated.
consumer.start_alignment()
read_and_call(uhandle, consumer.noevent, start='<PRE>')
self._scan_alignment_header(uhandle, consumer)
# Scan a bunch of score/alignment's.
while 1:
# An HSP header starts with ' Score'.
# However, if the HSP header is not the first one in the
# alignment, there will be a '<PRE>' line first. Therefore,
# I will need to check either of the first two lines to
# see if I'm at an HSP header.
line1 = safe_readline(uhandle)
line2 = safe_readline(uhandle)
line3 = safe_readline(uhandle)
uhandle.saveline(line3)
uhandle.saveline(line2)
uhandle.saveline(line1)
# There can be <a> links in front of 'Score'
rea = re.compile(r"</?a[^>]*>")
line1 = rea.sub("", line1)
line2 = rea.sub("", line2)
line3 = rea.sub("", line3)
if line1[:6] != ' Score' and line2[:6] != ' Score' and \
line3[:6] != ' Score':
break
self._scan_hsp(uhandle, consumer)
consumer.end_alignment()
def _scan_alignment_header(self, uhandle, consumer):
# <a name = 120291> </a><a href="http://www.ncbi.nlm.nih.gov:80/entrez/
# Length = 141
#
while 1:
line = safe_readline(uhandle)
if line.lstrip().startswith('Length ='):
consumer.length(line)
break
elif is_blank_line(line):
# Check to make sure I haven't missed the Length line
raise ValueError, "I missed the Length in an alignment header"
consumer.title(line)
if not attempt_read_and_call(uhandle, consumer.noevent,
start=' '):
read_and_call(uhandle, consumer.noevent, blank=1)
def _scan_hsp(self, uhandle, consumer):
consumer.start_hsp()
self._scan_hsp_header(uhandle, consumer)
self._scan_hsp_alignment(uhandle, consumer)
consumer.end_hsp()
def _scan_hsp_header(self, uhandle, consumer):
# If the HSP is not the first one within an alignment, includes:
# <PRE>
# Score = 22.7 bits (47), Expect = 2.5
# Identities = 10/36 (27%), Positives = 18/36 (49%)
# Strand = Plus / Plus
# Frame = +3
#
attempt_read_and_call(uhandle, consumer.noevent, start='<PRE>')
attempt_read_and_call(uhandle, consumer.noevent, blank=1)
read_and_call(uhandle, consumer.score,
has_re=re.compile(r'^ (<a[^>]*></a>)*Score'))
read_and_call(uhandle, consumer.identities, start=' Identities')
# BLASTN
attempt_read_and_call(uhandle, consumer.strand, start = ' Strand')
# BLASTX, TBLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.frame, start = ' Frame')
read_and_call(uhandle, consumer.noevent, blank=1)
def _scan_hsp_alignment(self, uhandle, consumer):
# Query: 11 GRGVSACA-------TCDGFFYRNQKVAVIGGGNTAVEEALYLSNIASEVHLIHRRDGF
# GRGVS+ TC Y + + V GGG+ + EE L + I R+
# Sbjct: 12 GRGVSSVVRRCIHKPTCKE--YAVKIIDVTGGGSFSAEEVQELREATLKEVDILRKVSG
#
# Query: 64 AEKILIKR 71
# I +K
# Sbjct: 70 PNIIQLKD 77
# </PRE>
#
#
while 1:
# Blastn adds an extra line filled with spaces before Query
attempt_read_and_call(uhandle, consumer.noevent, start=' ')
read_and_call(uhandle, consumer.query, start='Query')
read_and_call(uhandle, consumer.align, start=' ')
read_and_call(uhandle, consumer.sbjct, start='Sbjct')
if not attempt_read_and_call(uhandle, consumer.noevent, blank=1):
break
read_and_call(uhandle, consumer.noevent, start='</PRE>')
read_and_call_while(uhandle, consumer.noevent, blank=1)
def _scan_masterslave_alignment(self, uhandle, consumer):
consumer.start_alignment()
read_and_call(uhandle, consumer.noevent, start='<PRE>')
while 1:
line = safe_readline(uhandle)
if is_blank_line(line):
consumer.noevent(line)
elif line[:6] == '</PRE>':
consumer.noevent(line)
break
else:
consumer.multalign(line)
read_and_call_while(uhandle, consumer.noevent, blank=1)
consumer.end_alignment()
def _scan_database_report(self, uhandle, consumer):
# <PRE>
# Database: Non-redundant SwissProt sequences
# Posted date: Dec 18, 1999 8:26 PM
# Number of letters in database: 29,652,561
# Number of sequences in database: 82,258
#
# Lambda K H
# 0.317 0.133 0.395
#
# Gapped
# Lambda K H
# 0.270 0.0470 0.230
#
# qblast (BLASTN 2.2.10) does not give the Database: bits before the Lambda
# information, so that needs to be skipped
consumer.start_database_report()
# TBALSTN 2.2.6
# <PRE> Database: /tmp/affyA.fasta
line = uhandle.peekline()
# only look for database information if we aren't already at the
# Lambda bits
if line.find("Database") < 0:
read_and_call(uhandle, consumer.noevent, start='<PRE>')
line2 = uhandle.peekline()
if line2.find("Lambda K H") < 0:
read_and_call(uhandle, consumer.database, contains=' Database')
read_and_call_until(uhandle, consumer.database, contains="Posted")
read_and_call(uhandle, consumer.posted_date, start=' Posted')
read_and_call(uhandle, consumer.num_letters_in_database,
start=' Number of letters')
read_and_call(uhandle, consumer.num_sequences_in_database,
start=' Number of sequences')
read_and_call(uhandle, consumer.noevent, start=' ')
read_and_call(uhandle, consumer.noevent, start='Lambda')
read_and_call(uhandle, consumer.ka_params)
read_and_call(uhandle, consumer.noevent, blank=1)
# not BLASTP
attempt_read_and_call(uhandle, consumer.gapped, start='Gapped')
# not TBLASTX
if attempt_read_and_call(uhandle, consumer.noevent, start='Lambda'):
read_and_call(uhandle, consumer.ka_params_gap)
read_and_call_while(uhandle, consumer.noevent, blank=1)
consumer.end_database_report()
def _scan_parameters(self, uhandle, consumer):
# Matrix: BLOSUM62
# Number of Hits to DB: 1st pass: 41542626, 2nd pass: 9765
# Number of Sequences: 1st pass: 89405, 2nd pass: 84
# Number of extensions: 1st pass: 500847, 2nd pass: 6747
# Number of successful extensions: 1st pass: 14, 2nd pass: 49
# Number of sequences better than 10.0: 20
# length of query: 205
# length of database: 10,955,950
# effective HSP length: 46
# effective length of query: 158
# effective length of database: 6,843,320
# effective search space: 1081244560
# effective search space used: 1081244560
# frameshift window, decay const: 50, 0.5
# T: 13
# A: 40
# X1: 16 ( 7.3 bits)
# X2: 0 ( 0.0 bits)
# S1: 41 (21.7 bits)
# S2: 52 (26.7 bits)
#
# </PRE>
# 6/3/2001, </PRE> is gone, replaced by </form>
consumer.start_parameters()
# qblast doesn't have Matrix line
attempt_read_and_call(uhandle, consumer.matrix, start='Matrix')
# not TBLASTX
attempt_read_and_call(uhandle, consumer.gap_penalties, start='Gap')
# in qblast the Number of Hits and Number of Sequences lines are
# reversed
if attempt_read_and_call(uhandle, consumer.num_hits,
start='Number of Hits'):
read_and_call(uhandle, consumer.num_sequences,
start='Number of Sequences')
else:
read_and_call(uhandle, consumer.num_sequences,
start='Number of Sequences')
read_and_call(uhandle, consumer.num_hits,
start='Number of Hits')
read_and_call(uhandle, consumer.num_extends,
start='Number of extensions')
read_and_call(uhandle, consumer.num_good_extends,
start='Number of successful')
read_and_call(uhandle, consumer.num_seqs_better_e,
start='Number of sequences')
# not BLASTN, TBLASTX
if attempt_read_and_call(uhandle, consumer.hsps_no_gap,
start="Number of HSP's better"):
# for qblast order of HSP info is changed
if attempt_read_and_call(uhandle, consumer.hsps_prelim_gapped,
start="Number of HSP's successfully"):
read_and_call(uhandle, consumer.hsps_prelim_gap_attempted,
start="Number of HSP's that")
read_and_call(uhandle, consumer.hsps_gapped,
start="Number of HSP's gapped")
else:
read_and_call(uhandle, consumer.no_event,
start="Number of HSP's gapped")
read_and_call(uhandle, consumer.no_event,
start="Number of HSP's successfully")
read_and_call(uhandle, consumer.no_event,
start="Number of extra gapped")
# QBlast has different capitalization on the Length info:
if attempt_read_and_call(uhandle, consumer.query_length,
start='Length of query'):
read_and_call(uhandle, consumer.database_length,
start='Length of database')
read_and_call(uhandle, consumer.no_event,
start='Length adjustment')
attempt_read_and_call(uhandle, consumer.effective_query_length,
start='Effective length of query')
read_and_call(uhandle, consumer.effective_database_length,
start='Effective length of database')
attempt_read_and_call(uhandle, consumer.effective_search_space,
start='Effective search space:')
attempt_read_and_call(uhandle, consumer.effective_search_space_used,
start='Effective search space used')
else:
attempt_read_and_call(uhandle, consumer.query_length,
start='length of query')
read_and_call(uhandle, consumer.database_length,
start='length of database')
read_and_call(uhandle, consumer.effective_hsp_length,
start='effective HSP')
attempt_read_and_call(uhandle, consumer.effective_query_length,
start='effective length of query')
read_and_call(uhandle, consumer.effective_database_length,
start='effective length of database')
attempt_read_and_call(uhandle, consumer.effective_search_space,
start='effective search space:')
attempt_read_and_call(uhandle, consumer.effective_search_space_used,
start='effective search space used')
# BLASTX, TBLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.frameshift, start='frameshift')
attempt_read_and_call(uhandle, consumer.threshold, start='T')
read_and_call(uhandle, consumer.window_size, start='A')
read_and_call(uhandle, consumer.dropoff_1st_pass, start='X1')
read_and_call(uhandle, consumer.gap_x_dropoff, start='X2')
# not BLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.gap_x_dropoff_final,
start='X3')
read_and_call(uhandle, consumer.gap_trigger, start='S1')
attempt_read_and_call(uhandle, consumer.blast_cutoff, start='S2')
attempt_read_and_call(uhandle, consumer.noevent, blank=1)
attempt_read_and_call(uhandle, consumer.noevent, start="</PRE>")
attempt_read_and_call(uhandle, consumer.noevent, start="</form>")
consumer.end_parameters()
def qblast(program, database, sequence,
auto_format=None,composition_based_statistics=None,
db_genetic_code=None,endpoints=None,entrez_query='(none)',
expect=10.0,filter=None,gapcosts=None,genetic_code=None,
hitlist_size=50,i_thresh=None,layout=None,lcase_mask=None,
matrix_name=None,nucl_penalty=None,nucl_reward=None,
other_advanced=None,perc_ident=None,phi_pattern=None,
query_file=None,query_believe_defline=None,query_from=None,
query_to=None,searchsp_eff=None,service=None,threshold=None,
ungapped_alignment=None,word_size=None,
alignments=500,alignment_view=None,descriptions=500,
entrez_links_new_window=None,expect_low=None,expect_high=None,
format_entrez_query=None,format_object=None,format_type='XML',
ncbi_gi=None,results_file=None,show_overview=None
):
"""Do a BLAST search using the QBLAST server at NCBI.
Supports all parameters of the qblast API for Put and Get.
Some useful parameters:
program BLASTP or BLASTN
database Which database to search against.
sequence The sequence to search.
ncbi_gi TRUE/FALSE whether to give 'gi' identifier.
descriptions Number of descriptions to show. Def 500.
alignments Number of alignments to show. Def 500.
expect An expect value cutoff. Def 10.0.
matrix_name Specify an alt. matrix (PAM30, PAM70, BLOSUM80, BLOSUM45).
filter "none" turns off filtering. Default no filtering
format_type "HTML", "Text", "ASN.1", or "XML". Def. "XML".
entrez_query Entrez query to limit Blast search
hitlist_size Number of hits to return. Default 50
This function does no checking of the validity of the parameters
and passes the values to the server as is. More help is available at:
http://www.ncbi.nlm.nih.gov/BLAST/blast_overview.html
"""
import urllib, urllib2
from Bio.WWW import RequestLimiter
assert program == 'blastn' or program == 'blastp'
# Format the "Put" command, which sends search requests to qblast.
# Parameters taken from http://www.ncbi.nlm.nih.gov/BLAST/Doc/node5.html on 9 July 2007
parameters = [
('AUTO_FORMAT',auto_format),
('COMPOSITION_BASED_STATISTICS',composition_based_statistics),
('DATABASE',database),
('DB_GENETIC_CODE',db_genetic_code),
('ENDPOINTS',endpoints),
('ENTREZ_QUERY',entrez_query),
('EXPECT',expect),
('FILTER',filter),
('GAPCOSTS',gapcosts),
('GENETIC_CODE',genetic_code),
('HITLIST_SIZE',hitlist_size),
('I_THRESH',i_thresh),
('LAYOUT',layout),
('LCASE_MASK',lcase_mask),
('MATRIX_NAME',matrix_name),
('NUCL_PENALTY',nucl_penalty),
('NUCL_REWARD',nucl_reward),
('OTHER_ADVANCED',other_advanced),
('PERC_IDENT',perc_ident),
('PHI_PATTERN',phi_pattern),
('PROGRAM',program),
('QUERY',sequence),
('QUERY_FILE',query_file),
('QUERY_BELIEVE_DEFLINE',query_believe_defline),
('QUERY_FROM',query_from),
('QUERY_TO',query_to),
('SEARCHSP_EFF',searchsp_eff),
('SERVICE',service),
('THRESHOLD',threshold),
('UNGAPPED_ALIGNMENT',ungapped_alignment),
('WORD_SIZE',word_size),
('CMD', 'Put'),
]
query = [x for x in parameters if x[1] is not None]
message = urllib.urlencode(query)
# Send off the initial query to qblast.
request = urllib2.Request("http://www.ncbi.nlm.nih.gov/blast/Blast.cgi",
message,
{"User-Agent":"BiopythonClient"})
handle = urllib2.urlopen(request)
# Format the "Get" command, which gets the formatted results from qblast
# Parameters taken from http://www.ncbi.nlm.nih.gov/BLAST/Doc/node6.html on 9 July 2007
rid, rtoe = _parse_qblast_ref_page(handle)
parameters = [
('ALIGNMENTS',alignments),
('ALIGNMENT_VIEW',alignment_view),
('DESCRIPTIONS',descriptions),
('ENTREZ_LINKS_NEW_WINDOW',entrez_links_new_window),
('EXPECT_LOW',expect_low),
('EXPECT_HIGH',expect_high),
('FORMAT_ENTREZ_QUERY',format_entrez_query),
('FORMAT_OBJECT',format_object),
('FORMAT_TYPE',format_type),
('NCBI_GI',ncbi_gi),
('RID',rid),
('RESULTS_FILE',results_file),
('SERVICE',service),
('SHOW_OVERVIEW',show_overview),
('CMD', 'Get'),
]
query = [x for x in parameters if x[1] is not None]
message = urllib.urlencode(query)
# Poll NCBI until the results are ready.
limiter = RequestLimiter(3)
while 1:
limiter.wait()
request = urllib2.Request("http://www.ncbi.nlm.nih.gov/blast/Blast.cgi",
message,
{"User-Agent":"BiopythonClient"})
handle = urllib2.urlopen(request)
results = handle.read()
# XML results don't have the Status tag when finished
if results.find("Status=") < 0:
break
i = results.index("Status=")
j = results.index("\n", i)
status = results[i+len("Status="):j].strip()
if status.upper() == "READY":
break
return StringIO.StringIO(results)
def _parse_qblast_ref_page(handle):
"""Return tuple of RID, RTOE."""
s = handle.read()
i = s.find("RID =")
j = s.find("\n", i)
rid = s[i+len("RID ="):j].strip()
i = s.find("RTOE =")
j = s.find("\n", i)
rtoe = s[i+len("RTOE ="):j].strip()
return rid, int(rtoe)
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/Blast/NCBIWWW.py
|
Python
|
apache-2.0
| 33,660
|
[
"BLAST",
"Biopython"
] |
a9ca876fbbef7a938d72b20a25d7ddd2c56dda4124e4252243e987df51827e92
|
# coding: utf-8
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as niu
import nipype.interfaces.fsl as fsl
import os
def create_dmri_preprocessing(name='dMRI_preprocessing', use_fieldmap=True, fieldmap_registration=False):
"""Creates a workflow that chains the necessary pipelines to
correct for motion, eddy currents, and, if selected, susceptibility
artifacts in EPI dMRI sequences.
.. warning::
IMPORTANT NOTICE: this workflow rotates the b-vectors, so please be adviced
that not all the dicom converters ensure the consistency between the resulting
nifti orientation and the b matrix table (e.g. dcm2nii checks it).
Example
-------
>>> nipype_dmri_preprocess = create_dmri_preprocessing('nipype_dmri_prep')
>>> nipype_dmri_preprocess.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_dmri_preprocess.inputs.inputnode.in_bvec = 'diffusion.bvec'
>>> nipype_dmri_preprocess.inputs.inputnode.ref_num = 0
>>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_mag = 'magnitude.nii'
>>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_pha = 'phase.nii'
>>> nipype_dmri_preprocess.inputs.inputnode.te_diff = 2.46
>>> nipype_dmri_preprocess.inputs.inputnode.epi_echospacing = 0.77
>>> nipype_dmri_preprocess.inputs.inputnode.epi_rev_encoding = False
>>> nipype_dmri_preprocess.inputs.inputnode.pi_accel_factor = True
>>> nipype_dmri_preprocess.run() # doctest: +SKIP
Inputs::
inputnode.in_file - The diffusion data
inputnode.in_bvec - The b-matrix file, in FSL format and consistent with the in_file orientation
inputnode.ref_num - The reference volume (a b=0 volume in dMRI)
inputnode.fieldmap_mag - The magnitude of the fieldmap
inputnode.fieldmap_pha - The phase difference of the fieldmap
inputnode.te_diff - TE increment used (in msec.) on the fieldmap acquisition (generally 2.46ms for 3T scanners)
inputnode.epi_echospacing - The EPI EchoSpacing parameter (in msec.)
inputnode.epi_rev_encoding - True if reverse encoding was used (generally False)
inputnode.pi_accel_factor - Parallel imaging factor (aka GRAPPA acceleration factor)
inputnode.vsm_sigma - Sigma (in mm.) of the gaussian kernel used for in-slice smoothing of the deformation field (voxel shift map, vsm)
Outputs::
outputnode.dmri_corrected
outputnode.bvec_rotated
Optional arguments::
use_fieldmap - True if there are fieldmap files that should be used (default True)
fieldmap_registration - True if registration to fieldmap should be performed (default False)
"""
pipeline = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_file', 'in_bvec', 'ref_num', 'fieldmap_mag',
'fieldmap_pha', 'te_diff', 'epi_echospacing',
'epi_rev_encoding', 'pi_accel_factor', 'vsm_sigma']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['dmri_corrected', 'bvec_rotated']),
name='outputnode')
motion = create_motion_correct_pipeline()
eddy = create_eddy_correct_pipeline()
if use_fieldmap: # we have a fieldmap, so lets use it (yay!)
susceptibility = create_epidewarp_pipeline(
fieldmap_registration=fieldmap_registration)
pipeline.connect([
(inputnode, motion, [('in_file', 'inputnode.in_file'),
('in_bvec', 'inputnode.in_bvec'),
('ref_num', 'inputnode.ref_num')]),
(inputnode, eddy, [('ref_num', 'inputnode.ref_num')]),
(motion, eddy, [('outputnode.motion_corrected', 'inputnode.in_file')]),
(eddy, susceptibility, [('outputnode.eddy_corrected', 'inputnode.in_file')]),
(inputnode, susceptibility, [('ref_num', 'inputnode.ref_num'),
('fieldmap_mag', 'inputnode.fieldmap_mag'),
('fieldmap_pha', 'inputnode.fieldmap_pha'),
('te_diff', 'inputnode.te_diff'),
('epi_echospacing', 'inputnode.epi_echospacing'),
('epi_rev_encoding', 'inputnode.epi_rev_encoding'),
('pi_accel_factor', 'inputnode.pi_accel_factor'),
('vsm_sigma', 'inputnode.vsm_sigma')]),
(motion, outputnode, [('outputnode.out_bvec', 'bvec_rotated')]),
(susceptibility, outputnode, [('outputnode.epi_corrected', 'dmri_corrected')])
])
else: # we don't have a fieldmap, so we just carry on without it :(
pipeline.connect([
(inputnode, motion, [('in_file', 'inputnode.in_file'),
('in_bvec', 'inputnode.in_bvec'),
('ref_num', 'inputnode.ref_num')]),
(inputnode, eddy, [('ref_num', 'inputnode.ref_num')]),
(motion, eddy, [('outputnode.motion_corrected', 'inputnode.in_file')]),
(motion, outputnode, [('outputnode.out_bvec', 'bvec_rotated')]),
(eddy, outputnode, [('outputnode.eddy_corrected', 'dmri_corrected')])
])
return pipeline
def create_motion_correct_pipeline(name='motion_correct'):
"""Creates a pipeline that corrects for motion artifact in dMRI sequences.
It takes a series of diffusion weighted images and rigidly co-registers
them to one reference image. Finally, the b-matrix is rotated accordingly
(Leemans et al. 2009 - http://www.ncbi.nlm.nih.gov/pubmed/19319973),
making use of the rotation matrix obtained by FLIRT.
.. warning::
IMPORTANT NOTICE: this workflow rotates the b-vectors, so please be adviced
that not all the dicom converters ensure the consistency between the resulting
nifti orientation and the b matrix table (e.g. dcm2nii checks it).
Example
-------
>>> nipype_motioncorrect = create_motion_correct_pipeline('nipype_motioncorrect')
>>> nipype_motioncorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_motioncorrect.inputs.inputnode.in_bvec = 'diffusion.bvec'
>>> nipype_motioncorrect.inputs.inputnode.ref_num = 0
>>> nipype_motioncorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file
inputnode.ref_num
inputnode.in_bvec
Outputs::
outputnode.motion_corrected
outputnode.out_bvec
"""
inputnode = pe.Node(
niu.IdentityInterface(
fields=['in_file', 'ref_num', 'in_bvec']),
name='inputnode')
pipeline = pe.Workflow(name=name)
split = pe.Node(fsl.Split(dimension='t'), name='split')
pick_ref = pe.Node(niu.Select(), name='pick_ref')
coregistration = pe.MapNode(fsl.FLIRT(no_search=True, interp='spline',
padding_size=1, dof=6), name='coregistration', iterfield=['in_file'])
rotate_bvecs = pe.Node(niu.Function(input_names=['in_bvec', 'in_matrix'], output_names=[
'out_file'], function=_rotate_bvecs), name='rotate_b_matrix')
merge = pe.Node(fsl.Merge(dimension='t'), name='merge')
outputnode = pe.Node(
niu.IdentityInterface(
fields=['motion_corrected', 'out_bvec']),
name='outputnode')
pipeline.connect([
(inputnode, split, [('in_file', 'in_file')])
,(split, pick_ref, [('out_files', 'inlist')])
,(inputnode, pick_ref, [('ref_num', 'index')])
,(split, coregistration, [('out_files', 'in_file')])
,(inputnode, rotate_bvecs, [('in_bvec', 'in_bvec')])
,(coregistration, rotate_bvecs, [('out_matrix_file', 'in_matrix')])
,(pick_ref, coregistration, [('out', 'reference')])
,(coregistration, merge, [('out_file', 'in_files')])
,(merge, outputnode, [('merged_file', 'motion_corrected')])
,(rotate_bvecs, outputnode, [('out_file', 'out_bvec')])
])
return pipeline
def create_eddy_correct_pipeline(name='eddy_correct'):
"""Creates a pipeline that replaces eddy_correct script in FSL. It takes a
series of diffusion weighted images and linearly co-registers them to one
reference image. No rotation of the B-matrix is performed, so this pipeline
should be executed after the motion correction pipeline.
Example
-------
>>> nipype_eddycorrect = create_eddy_correct_pipeline('nipype_eddycorrect')
>>> nipype_eddycorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_eddycorrect.inputs.inputnode.ref_num = 0
>>> nipype_eddycorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file
inputnode.ref_num
Outputs::
outputnode.eddy_corrected
"""
inputnode = pe.Node(
niu.IdentityInterface(fields=['in_file', 'ref_num']),
name='inputnode')
pipeline = pe.Workflow(name=name)
split = pe.Node(fsl.Split(dimension='t'), name='split')
pick_ref = pe.Node(niu.Select(), name='pick_ref')
coregistration = pe.MapNode(fsl.FLIRT(no_search=True, padding_size=1,
dof=12, interp='spline'), name='coregistration', iterfield=['in_file'])
merge = pe.Node(fsl.Merge(dimension='t'), name='merge')
outputnode = pe.Node(
niu.IdentityInterface(fields=['eddy_corrected']),
name='outputnode')
pipeline.connect([
(inputnode, split, [('in_file', 'in_file')])
,(split, pick_ref, [('out_files', 'inlist')])
,(inputnode, pick_ref, [('ref_num', 'index')])
,(split, coregistration, [('out_files', 'in_file')])
,(pick_ref, coregistration, [('out', 'reference')])
,(coregistration, merge, [('out_file', 'in_files')])
,(merge, outputnode, [('merged_file', 'eddy_corrected')])
])
return pipeline
def fieldmap_correction(name='fieldmap_correction', nocheck=False):
"""
Fieldmap-based retrospective correction of EPI images for the susceptibility distortion
artifact (Jezzard et al., 1995). Fieldmap images are assumed to be already registered
to EPI data, and a brain mask is required.
Replaces the former workflow, still available as create_epidewarp_pipeline(). The difference
with respect the epidewarp pipeline is that now the workflow uses the new fsl_prepare_fieldmap
available as of FSL 5.0.
Example
-------
>>> nipype_epicorrect = fieldmap_correction('nipype_epidewarp')
>>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_epicorrect.inputs.inputnode.in_mask = 'brainmask.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii'
>>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46
>>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77
>>> nipype_epicorrect.inputs.inputnode.encoding_direction = 'y'
>>> nipype_epicorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file - The volume acquired with EPI sequence
inputnode.in_mask - A brain mask
inputnode.fieldmap_pha - The phase difference map from the fieldmapping, registered to in_file
inputnode.fieldmap_mag - The magnitud maps (usually 4D, one magnitude per GRE scan)
from the fieldmapping, registered to in_file
inputnode.te_diff - Time difference in msec. between TE in ms of the fieldmapping (usually a GRE sequence).
inputnode.epi_echospacing - The effective echo spacing (aka dwell time) in msec. of the EPI sequence. If
EPI was acquired with parallel imaging, then the effective echo spacing is
eff_es = es / acc_factor.
inputnode.encoding_direction - The phase encoding direction in EPI acquisition (default y)
inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map)
Outputs::
outputnode.epi_corrected
outputnode.out_vsm
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_file',
'in_mask',
'fieldmap_pha',
'fieldmap_mag',
'te_diff',
'epi_echospacing',
'vsm_sigma',
'encoding_direction'
]), name='inputnode'
)
pipeline = pe.Workflow(name=name)
# Keep first frame from magnitude
select_mag = pe.Node(fsl.utils.ExtractROI(
t_size=1, t_min=0), name='select_magnitude')
# Mask magnitude (it is required by PreparedFieldMap)
mask_mag = pe.Node( fsl.maths.ApplyMask(), name='mask_magnitude' )
# Run fsl_prepare_fieldmap
fslprep = pe.Node( fsl.PrepareFieldmap(), name='prepare_fieldmap' )
if nocheck:
fslprep.inputs.nocheck = True
# Use FUGUE to generate the voxel shift map (vsm)
vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm')
# VSM demean is not anymore present in the epi_reg script
#vsm_mean = pe.Node(niu.Function(input_names=['in_file', 'mask_file', 'in_unwarped'], output_names=[
# 'out_file'], function=_vsm_remove_mean), name='vsm_mean_shift')
# fugue_epi
dwi_split = pe.Node(niu.Function(input_names=[
'in_file'], output_names=['out_files'], function=_split_dwi), name='dwi_split')
# 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name )
dwi_applyxfm = pe.MapNode(fsl.FUGUE(
icorr=True, save_shift=False), iterfield=['in_file'], name='dwi_fugue')
# Merge back all volumes
dwi_merge = pe.Node(fsl.utils.Merge(
dimension='t'), name='dwi_merge')
outputnode = pe.Node(
niu.IdentityInterface(fields=['epi_corrected','out_vsm']),
name='outputnode')
pipeline.connect([
(inputnode, select_mag, [('fieldmap_mag', 'in_file')])
,(inputnode, fslprep, [('fieldmap_pha', 'in_phase'),('te_diff', 'delta_TE') ])
,(inputnode, mask_mag, [('in_mask', 'mask_file' )])
,(select_mag, mask_mag, [('roi_file', 'in_file')])
,(mask_mag, fslprep, [('out_file', 'in_magnitude')])
,(fslprep, vsm, [('out_fieldmap', 'phasemap_file')])
,(inputnode, vsm, [('fieldmap_mag', 'in_file'),
('encoding_direction','unwarp_direction'),
(('te_diff', _ms2sec), 'asym_se_time'),
('vsm_sigma', 'smooth2d'),
(('epi_echospacing', _ms2sec), 'dwell_time')])
,(mask_mag, vsm, [('out_file', 'mask_file')])
,(inputnode, dwi_split, [('in_file', 'in_file')])
,(dwi_split, dwi_applyxfm, [('out_files', 'in_file')])
,(mask_mag, dwi_applyxfm, [('out_file', 'mask_file')])
,(vsm, dwi_applyxfm, [('shift_out_file', 'shift_in_file')])
,(inputnode, dwi_applyxfm, [('encoding_direction','unwarp_direction')])
,(dwi_applyxfm, dwi_merge, [('unwarped_file', 'in_files')])
,(dwi_merge, outputnode, [('merged_file', 'epi_corrected')])
,(vsm, outputnode, [('shift_out_file','out_vsm') ])
])
return pipeline
def topup_correction( name='topup_correction' ):
"""
Corrects for susceptibilty distortion of EPI images when one reverse encoding dataset has
been acquired
Example
-------
>>> nipype_epicorrect = topup_correction('nipype_topup')
>>> nipype_epicorrect.inputs.inputnode.in_file_dir = 'epi.nii'
>>> nipype_epicorrect.inputs.inputnode.in_file_rev = 'epi_rev.nii'
>>> nipype_epicorrect.inputs.inputnode.encoding_direction = ['y', 'y-']
>>> nipype_epicorrect.inputs.inputnode.ref_num = 0
>>> nipype_epicorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file_dir - EPI volume acquired in 'forward' phase encoding
inputnode.in_file_rev - EPI volume acquired in 'reversed' phase encoding
inputnode.encoding_direction - Direction encoding of in_file_dir
inputnode.ref_num - Identifier of the reference volumes (usually B0 volume)
Outputs::
outputnode.epi_corrected
"""
pipeline = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_file_dir',
'in_file_rev',
'encoding_direction',
'readout_times',
'ref_num'
]), name='inputnode'
)
outputnode = pe.Node( niu.IdentityInterface(
fields=['out_fieldcoef',
'out_movpar',
'out_enc_file',
'epi_corrected'
]), name='outputnode'
)
b0_dir = pe.Node( fsl.ExtractROI( t_size=1 ), name='b0_1' )
b0_rev = pe.Node( fsl.ExtractROI( t_size=1 ), name='b0_2' )
combin = pe.Node( niu.Merge(2), name='merge' )
combin2 = pe.Node( niu.Merge(2), name='merge2' )
merged = pe.Node( fsl.Merge( dimension='t' ), name='b0_comb' )
topup = pe.Node( fsl.TOPUP(), name='topup' )
applytopup = pe.Node( fsl.ApplyTOPUP(in_index=[1,2] ), name='applytopup' )
pipeline.connect([
(inputnode, b0_dir, [('in_file_dir','in_file'),('ref_num','t_min')] )
,(inputnode, b0_rev, [('in_file_rev','in_file'),('ref_num','t_min')] )
,(inputnode, combin2, [('in_file_dir','in1'),('in_file_rev','in2') ] )
,(b0_dir, combin, [('roi_file','in1')] )
,(b0_rev, combin, [('roi_file','in2')] )
,(combin, merged, [('out', 'in_files')] )
,(merged, topup, [('merged_file','in_file')])
,(inputnode, topup, [('encoding_direction','encoding_direction'),('readout_times','readout_times') ])
,(topup, applytopup, [('out_fieldcoef','in_topup_fieldcoef'),('out_movpar','in_topup_movpar'),
('out_enc_file','encoding_file')])
,(combin2, applytopup, [('out','in_files')] )
,(topup, outputnode, [('out_fieldcoef','out_fieldcoef'),('out_movpar','out_movpar'),
('out_enc_file','out_enc_file') ])
,(applytopup,outputnode, [('out_corrected','epi_corrected')])
])
return pipeline
def create_epidewarp_pipeline(name='epidewarp', fieldmap_registration=False):
""" Replaces the epidewarp.fsl script (http://www.nmr.mgh.harvard.edu/~greve/fbirn/b0/epidewarp.fsl)
for susceptibility distortion correction of dMRI & fMRI acquired with EPI sequences and the fieldmap
information (Jezzard et al., 1995) using FSL's FUGUE. The registration to the (warped) fieldmap
(strictly following the original script) is available using fieldmap_registration=True.
Example
-------
>>> nipype_epicorrect = create_epidewarp_pipeline('nipype_epidewarp', fieldmap_registration=False)
>>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii'
>>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46
>>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77
>>> nipype_epicorrect.inputs.inputnode.epi_rev_encoding = False
>>> nipype_epicorrect.inputs.inputnode.ref_num = 0
>>> nipype_epicorrect.inputs.inputnode.pi_accel_factor = 1.0
>>> nipype_epicorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file - The volume acquired with EPI sequence
inputnode.fieldmap_mag - The magnitude of the fieldmap
inputnode.fieldmap_pha - The phase difference of the fieldmap
inputnode.te_diff - Time difference between TE in ms.
inputnode.epi_echospacing - The echo spacing (aka dwell time) in the EPI sequence
inputnode.epi_ph_encoding_dir - The phase encoding direction in EPI acquisition (default y)
inputnode.epi_rev_encoding - True if it is acquired with reverse encoding
inputnode.pi_accel_factor - Acceleration factor used for EPI parallel imaging (GRAPPA)
inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map)
inputnode.ref_num - The reference volume (B=0 in dMRI or a central frame in fMRI)
Outputs::
outputnode.epi_corrected
Optional arguments::
fieldmap_registration - True if registration to fieldmap should be done (default False)
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file',
'fieldmap_mag',
'fieldmap_pha',
'te_diff',
'epi_echospacing',
'epi_ph_encoding_dir',
'epi_rev_encoding',
'pi_accel_factor',
'vsm_sigma',
'ref_num',
'unwarp_direction'
]), name='inputnode')
pipeline = pe.Workflow(name=name)
# Keep first frame from magnitude
select_mag = pe.Node(fsl.utils.ExtractROI(
t_size=1, t_min=0), name='select_magnitude')
# mask_brain
mask_mag = pe.Node(fsl.BET(mask=True), name='mask_magnitude')
mask_mag_dil = pe.Node(niu.Function(input_names=[
'in_file'], output_names=['out_file'], function=_dilate_mask), name='mask_dilate')
# Compute dwell time
dwell_time = pe.Node(niu.Function(input_names=['dwell_time', 'pi_factor', 'is_reverse_encoding'], output_names=[
'dwell_time'], function=_compute_dwelltime), name='dwell_time')
# Normalize phase diff to be [-pi, pi)
norm_pha = pe.Node(niu.Function(input_names=['in_file'], output_names=[
'out_file'], function=_prepare_phasediff), name='normalize_phasediff')
# Execute FSL PRELUDE: prelude -p %s -a %s -o %s -f -v -m %s
prelude = pe.Node(fsl.PRELUDE(
process3d=True), name='phase_unwrap')
fill_phase = pe.Node(niu.Function(input_names=['in_file'], output_names=[
'out_file'], function=_fill_phase), name='fill_phasediff')
# to assure that vsm is same dimension as mag. The input only affects the output dimension.
# The content of the input has no effect on the vsm. The de-warped mag volume is
# meaningless and will be thrown away
# fugue -i %s -u %s -p %s --dwell=%s --asym=%s --mask=%s --saveshift=%s %
# ( mag_name, magdw_name, ph_name, esp, tediff, mask_name, vsmmag_name)
vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm')
vsm_mean = pe.Node(niu.Function(input_names=['in_file', 'mask_file', 'in_unwarped'], output_names=[
'out_file'], function=_vsm_remove_mean), name='vsm_mean_shift')
# fugue_epi
dwi_split = pe.Node(niu.Function(input_names=[
'in_file'], output_names=['out_files'], function=_split_dwi), name='dwi_split')
# 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name )
dwi_applyxfm = pe.MapNode(fsl.FUGUE(
icorr=True, save_shift=False), iterfield=['in_file'], name='dwi_fugue')
# Merge back all volumes
dwi_merge = pe.Node(fsl.utils.Merge(
dimension='t'), name='dwi_merge')
outputnode = pe.Node(
niu.IdentityInterface(fields=['epi_corrected']),
name='outputnode')
pipeline.connect([
(inputnode, dwell_time, [('epi_echospacing', 'dwell_time'), ('pi_accel_factor', 'pi_factor'), ('epi_rev_encoding', 'is_reverse_encoding')])
,(inputnode, select_mag, [('fieldmap_mag', 'in_file')])
,(inputnode, norm_pha, [('fieldmap_pha', 'in_file')])
,(select_mag, mask_mag, [('roi_file', 'in_file')])
,(mask_mag, mask_mag_dil, [('mask_file', 'in_file')])
,(select_mag, prelude, [('roi_file', 'magnitude_file')])
,(norm_pha, prelude, [('out_file', 'phase_file')])
,(mask_mag_dil, prelude, [('out_file', 'mask_file')])
,(prelude, fill_phase, [('unwrapped_phase_file', 'in_file')])
,(inputnode, vsm, [('fieldmap_mag', 'in_file')])
,(fill_phase, vsm, [('out_file', 'phasemap_file')])
,(inputnode, vsm, [(('te_diff', _ms2sec), 'asym_se_time'), ('vsm_sigma', 'smooth2d')])
,(dwell_time, vsm, [(('dwell_time', _ms2sec), 'dwell_time')])
,(mask_mag_dil, vsm, [('out_file', 'mask_file')])
,(mask_mag_dil, vsm_mean, [('out_file', 'mask_file')])
,(vsm, vsm_mean, [('unwarped_file', 'in_unwarped'), ('shift_out_file', 'in_file')])
,(inputnode, dwi_split, [('in_file', 'in_file')])
,(dwi_split, dwi_applyxfm, [('out_files', 'in_file')])
,(dwi_applyxfm, dwi_merge, [('unwarped_file', 'in_files')])
,(dwi_merge, outputnode, [('merged_file', 'epi_corrected')])
])
if fieldmap_registration:
""" Register magfw to example epi. There are some parameters here that may need to be tweaked. Should probably strip the mag
Pre-condition: forward warp the mag in order to reg with func. What does mask do here?
"""
# Select reference volume from EPI (B0 in dMRI and a middle frame in
# fMRI)
select_epi = pe.Node(fsl.utils.ExtractROI(
t_size=1), name='select_epi')
# fugue -i %s -w %s --loadshift=%s --mask=%s % ( mag_name, magfw_name,
# vsmmag_name, mask_name ), log ) # Forward Map
vsm_fwd = pe.Node(fsl.FUGUE(
save_warped=True), name='vsm_fwd')
vsm_reg = pe.Node(fsl.FLIRT(bins=256, cost='corratio', dof=6, interp='spline', searchr_x=[
-10, 10], searchr_y=[-10, 10], searchr_z=[-10, 10]), name='vsm_registration')
# 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( vsmmag_name, ref_epi, vsmmag_name, magfw_mat_out )
vsm_applyxfm = pe.Node(fsl.ApplyXfm(
interp='spline'), name='vsm_apply_xfm')
# 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( mask_name, ref_epi, mask_name, magfw_mat_out )
msk_applyxfm = pe.Node(fsl.ApplyXfm(
interp='nearestneighbour'), name='msk_apply_xfm')
pipeline.connect([
(inputnode, select_epi, [('in_file', 'in_file'), ('ref_num', 't_min')])
,(select_epi, vsm_reg, [('roi_file', 'reference')])
,(vsm, vsm_fwd, [('shift_out_file', 'shift_in_file')])
,(mask_mag_dil, vsm_fwd, [('out_file', 'mask_file')])
,(inputnode, vsm_fwd, [('fieldmap_mag', 'in_file')])
,(vsm_fwd, vsm_reg, [('warped_file', 'in_file')])
,(vsm_reg, msk_applyxfm, [('out_matrix_file', 'in_matrix_file')])
,(select_epi, msk_applyxfm, [('roi_file', 'reference')])
,(mask_mag_dil, msk_applyxfm, [('out_file', 'in_file')])
,(vsm_reg, vsm_applyxfm, [('out_matrix_file', 'in_matrix_file')])
,(select_epi, vsm_applyxfm, [('roi_file', 'reference')])
,(vsm_mean, vsm_applyxfm, [('out_file', 'in_file')])
,(msk_applyxfm, dwi_applyxfm, [('out_file', 'mask_file')])
,(vsm_applyxfm, dwi_applyxfm, [('out_file', 'shift_in_file')])
])
else:
pipeline.connect([
(mask_mag_dil, dwi_applyxfm, [('out_file', 'mask_file')])
,( vsm_mean, dwi_applyxfm, [('out_file', 'shift_in_file')])
])
return pipeline
def _rotate_bvecs(in_bvec, in_matrix):
import os
import numpy as np
name, fext = os.path.splitext(os.path.basename(in_bvec))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_rotated.bvec' % name)
bvecs = np.loadtxt(in_bvec)
new_bvecs = np.zeros(shape=bvecs.T.shape) #pre-initialise array, 3 col format
for i, vol_matrix in enumerate(in_matrix[0::]): #start index at 0
bvec = np.matrix(bvecs[:, i])
rot = np.matrix(np.loadtxt(vol_matrix)[0:3, 0:3])
new_bvecs[i] = (np.array(rot * bvec.T).T)[0] #fill each volume with x,y,z as we go along
np.savetxt(out_file, np.array(new_bvecs).T, fmt='%0.15f')
return out_file
def _cat_logs(in_files):
import shutil
import os
name, fext = os.path.splitext(os.path.basename(in_files[0]))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_ecclog.log' % name)
out_str = ''
with open(out_file, 'wb') as totallog:
for i, fname in enumerate(in_files):
totallog.write('\n\npreprocessing %d\n' % i)
with open(fname) as inlog:
for line in inlog:
totallog.write(line)
return out_file
def _compute_dwelltime(dwell_time=0.68, pi_factor=1.0, is_reverse_encoding=False):
dwell_time *= (1.0/pi_factor)
if is_reverse_encoding:
dwell_time *= -1.0
return dwell_time
def _effective_echospacing( dwell_time, pi_factor=1.0 ):
dwelltime = 1.0e-3 * dwell_time * ( 1.0/pi_factor )
return dwelltime
def _prepare_phasediff(in_file):
import nibabel as nib
import os
import numpy as np
img = nib.load(in_file)
max_diff = np.max(img.get_data().reshape(-1))
min_diff = np.min(img.get_data().reshape(-1))
A = (2.0 * np.pi)/(max_diff-min_diff)
B = np.pi - (A * max_diff)
diff_norm = img.get_data() * A + B
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_2pi.nii.gz' % name)
nib.save(nib.Nifti1Image(
diff_norm, img.get_affine(), img.get_header()), out_file)
return out_file
def _dilate_mask(in_file, iterations=4):
import nibabel as nib
import scipy.ndimage as ndimage
import os
img = nib.load(in_file)
img._data = ndimage.binary_dilation(img.get_data(), iterations=iterations)
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_dil.nii.gz' % name)
nib.save(img, out_file)
return out_file
def _fill_phase(in_file):
import nibabel as nib
import os
import numpy as np
img = nib.load(in_file)
dumb_img = nib.Nifti1Image(np.zeros(
img.get_shape()), img.get_affine(), img.get_header())
out_nii = nib.funcs.concat_images((img, dumb_img))
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_fill.nii.gz' % name)
nib.save(out_nii, out_file)
return out_file
def _vsm_remove_mean(in_file, mask_file, in_unwarped):
import nibabel as nib
import os
import numpy as np
import numpy.ma as ma
img = nib.load(in_file)
msk = nib.load(mask_file).get_data()
img_data = img.get_data()
img_data[msk == 0] = 0
vsmmag_masked = ma.masked_values(img_data.reshape(-1), 0.0)
vsmmag_masked = vsmmag_masked - vsmmag_masked.mean()
img._data = vsmmag_masked.reshape(img.get_shape())
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_demeaned.nii.gz' % name)
nib.save(img, out_file)
return out_file
def _ms2sec(val):
return val*1e-3;
def _split_dwi(in_file):
import nibabel as nib
import os
out_files = []
frames = nib.funcs.four_to_three(nib.load(in_file))
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
for i, frame in enumerate(frames):
out_file = os.path.abspath('./%s_%03d.nii.gz' % (name, i))
nib.save(frame, out_file)
out_files.append(out_file)
return out_files
|
dmordom/nipype
|
nipype/workflows/dmri/fsl/epi.py
|
Python
|
bsd-3-clause
| 34,579
|
[
"Gaussian"
] |
d913b471ce26bc4463fb7ec3d820fbd411dd0276a07388c74e2606fdc9ecf0aa
|
# coding=utf-8
""" Python AST visitor that converts an expression to a RACO equivalent """
import sys
import ast
from raco.expression import NamedAttributeRef, UnnamedAttributeRef, \
StringLiteral, NumericLiteral, BooleanLiteral, \
EQ, NEQ, LT, LTEQ, GT, GTEQ, AND, OR, NOT, \
PLUS, MINUS, DIVIDE, IDIVIDE, MOD, TIMES, NEG, CAST, PYUDF
from raco.types import STRING_TYPE, LONG_TYPE, DOUBLE_TYPE, BOOLEAN_TYPE
from raco.expression.function import WORKERID, RANDOM, \
ABS, CEIL, COS, FLOOR, LOG, SIN, SQRT, TAN, MD5, LEN, POW, \
LESSER, GREATER, SUBSTR
from raco.python.exceptions import PythonUnrecognizedTokenException, \
PythonOutOfRangeException, PythonUnsupportedOperationException, \
PythonArgumentException
comparator_map = {
ast.Eq: EQ,
ast.NotEq: NEQ,
ast.Lt: LT,
ast.LtE: LTEQ,
ast.Gt: GT,
ast.GtE: GTEQ
}
propositional_map = {
ast.And: AND,
ast.Or: OR
}
unary_operators = {
ast.Not: NOT,
ast.USub: NEG
}
binary_operators = {
ast.Add: PLUS,
ast.Sub: MINUS,
ast.Div: DIVIDE,
ast.FloorDiv: IDIVIDE,
ast.Mod: MOD,
ast.Mult: TIMES
}
literal_map = {
'True': BooleanLiteral(True),
'False': BooleanLiteral(False),
}
nary_map = {
# Arity, name: function
(1, 'str'): lambda args: CAST(STRING_TYPE, args[0]),
(1, 'int'): lambda args: MOD(CAST(LONG_TYPE, args[0]),
NumericLiteral(sys.maxint)),
(1, 'long'): lambda args: CAST(LONG_TYPE, args[0]),
(1, 'float'): lambda args: CAST(DOUBLE_TYPE, args[0]),
(1, 'bool'): lambda args: CAST(BOOLEAN_TYPE, args[0]),
(0, 'workerid'): lambda args: WORKERID(),
(0, 'random'): lambda args: RANDOM(),
(1, 'fabs'): lambda args: ABS(CAST(DOUBLE_TYPE, args[0])),
(1, 'abs'): lambda args: ABS(args[0]),
(1, 'ceil'): lambda args: CEIL(args[0]),
(1, 'cos'): lambda args: COS(args[0]),
(1, 'floor'): lambda args: FLOOR(args[0]),
(1, 'log'): lambda args: LOG(args[0]),
(1, 'sin'): lambda args: SIN(args[0]),
(1, 'sqrt'): lambda args: SQRT(args[0]),
(1, 'tan'): lambda args: TAN(args[0]),
(1, 'md5'): lambda args: MD5(args[0]),
(1, 'len'): lambda args: LEN(args[0]),
(2, 'pow'): lambda args: POW(args[0], args[1]),
(2, 'min'): lambda args: LESSER(args[0], args[1]),
(2, 'max'): lambda args: GREATER(args[0], args[1]),
}
zero = ast.Num(n=0)
class ExpressionVisitor(ast.NodeVisitor):
""" Visitor that converts an AST to a RACO expression """
def __init__(self, schema, udfs):
self.schema = schema
self.names = None
self.udfs = udfs
def visit_arguments(self, node):
""" Visitor for function arguments """
self.names = [n.id for n in node.args]
def visit_UnaryOp(self, node):
""" Visitor for a unary operator """
if type(node.op) not in unary_operators:
raise PythonUnrecognizedTokenException(node.op,
node.lineno,
node.col_offset)
return unary_operators[type(node.op)](self.visit(node.operand))
def visit_BinOp(self, node):
""" Visitor for binary operations """
if type(node.op) not in binary_operators:
raise PythonUnrecognizedTokenException(node.op,
node.lineno,
node.col_offset)
return binary_operators[type(node.op)](
self.visit(node.left), self.visit(node.right))
def visit_BoolOp(self, node):
""" Visitor for boolean operations """
assert (len(node.values) >= 2)
op = propositional_map[type(node.op)]
return reduce(lambda c, e: op(c, self.visit(e)),
# Fold over any other clauses
node.values[2:],
# Always at least two clauses
op(*map(self.visit, node.values[:2])))
def visit_Compare(self, node):
""" Visitor for comparison operations """
if len(node.ops) == 1 and \
type(node.ops[0]) in comparator_map.keys() and \
len(node.comparators) == 1:
left = self.visit(node.left)
right = self.visit(node.comparators[0])
return comparator_map[type(node.ops[0])](left, right)
else:
raise PythonUnrecognizedTokenException(node.ops[0],
node.lineno,
node.col_offset)
def visit_Attribute(self, node):
""" Visitor for dotted references """
if not isinstance(node.value, ast.Name):
raise PythonUnsupportedOperationException(
'Unsupported reference',
node.lineno, node.col_offset)
scheme = self.schema[self.names.index(node.value.id)]
if node.attr not in [s[0] for s in scheme]:
raise PythonUnrecognizedTokenException(node.attr,
node.lineno,
node.col_offset)
return NamedAttributeRef(node.attr)
def visit_Subscript(self, node):
""" Visitor for slices """
if isinstance(node.value, ast.Name) and \
isinstance(node.slice, ast.Index) and \
node.value.id in self.names:
return self.visit_AttributeIndex(node)
elif isinstance(node.slice, ast.Slice):
return self.visit_SubstringSlice(node)
elif isinstance(node.slice, ast.Index):
return self.visit_SubstringIndex(node)
def visit_AttributeIndex(self, node):
""" Visitor for subscripts over a tuple """
if node.value.id not in self.names:
raise PythonUnrecognizedTokenException(
node.value.id, node.lineno, node.col_offset)
offset = sum([len(s) for s in
self.schema[:self.names.index(node.value.id)]])
if node.slice.value.n < 0 or \
node.slice.value.n >= \
len(self.schema[self.names.index(node.value.id)]):
raise PythonOutOfRangeException(
node.value.id, node.lineno, node.col_offset)
return UnnamedAttributeRef(node.slice.value.n + offset)
def visit_SubstringIndex(self, node):
""" Visitor for indexing a string """
node.slice = ast.Slice(lower=node.slice.value,
upper=ast.Num(node.slice.value.n + 1),
step=None)
return self.visit_SubstringSlice(node)
def visit_SubstringSlice(self, node):
""" Visitor for slicing a string """
if (node.slice.lower or zero).n < 0 or \
(node.slice.upper or zero).n < 0:
raise PythonUnsupportedOperationException(
'RACO does not support negative indices in slices',
node.lineno, node.col_offset)
elif node.slice.step is not None:
raise PythonUnsupportedOperationException(
'RACO does not support steps in slices',
node.lineno, node.col_offset)
child = self.visit(node.value)
return SUBSTR(
[child,
self.visit(node.slice.lower or ast.Num(0)),
self.visit(node.slice.upper or ast.Num(2 ** 30))])
def visit_Call(self, node):
""" Visitor for calling built-in or UDF functions """
name = node.func.id
arity = len(node.args)
if (arity, name) in nary_map:
return self.visit_Call_builtin(node)
elif name in (udf['name'] for udf in self.udfs):
return self.visit_Call_UDF(node)
else:
raise PythonArgumentException(
'Unrecognized function %s or invalid arguments' % name,
node.lineno, node.col_offset)
def visit_Call_builtin(self, node):
""" Visitor for calling built-in functions """
name = node.func.id
arity = len(node.args)
if (arity, name) not in nary_map:
raise PythonArgumentException(
'Unrecognized function %s or invalid arguments' % name,
node.lineno, node.col_offset)
return nary_map[(arity, name)](map(self.visit, node.args))
def visit_Call_UDF(self, node):
""" Visitor for calling UDF functions """
name = node.func.id
arity = len(node.args)
udf = next((udf for udf in self.udfs
if udf['name'] == name), None)
# Ignore output type when looking up UDF
if udf is None:
raise PythonArgumentException(
'Unrecognized function %s or invalid arguments' % name,
node.lineno, node.col_offset)
output_type = udf['outputType']
source = udf.get('source', None)
return PYUDF(name, output_type,
*map(self.visit, node.args),
source=source)
def visit_Str(self, node):
""" Visitor for string literals """
return StringLiteral(node.s)
def visit_Num(self, node):
""" Visitor for numeric literals """
return NumericLiteral(node.n)
def visit_Name(self, node):
""" Visitor for built-in literals """
if node.id not in literal_map:
raise PythonUnrecognizedTokenException(node.id,
node.lineno,
node.col_offset)
return literal_map[node.id]
def visit_Module(self, node):
""" Visitor for top-level modules """
assert(len(node.body) == 1)
return self.visit(node.body[0])
def visit_Lambda(self, node):
""" Visitor for lambdas """
self.visit(node.args)
return self.visit(node.body)
def visit_FunctionDef(self, node):
""" Visitor for function declaration """
# TODO this is not a strict RACO requirement
if len(node.body) != 1:
raise PythonUnsupportedOperationException(
'Functions must have exactly one statement',
node.lineno, node.col_offset)
elif not isinstance(node.body[0], ast.Return):
raise PythonUnsupportedOperationException(
'Statement in function body must be a return',
node.lineno, node.col_offset)
self.visit(node.args)
return self.visit(node.body[0])
def visit_Return(self, node):
""" Visitor for return statements """
return self.visit(node.value)
def visit_Expr(self, node):
""" Visitor for expressions """
return self.visit(node.value)
def generic_visit(self, node):
""" Visitor for unsupported node types """
raise PythonUnsupportedOperationException(
'Unsupported node ' + str(type(node)),
node.lineno, node.col_offset)
|
uwescience/raco
|
raco/python/util/visitor.py
|
Python
|
bsd-3-clause
| 11,005
|
[
"VisIt"
] |
e43fae8ad2bea1113969cde6f78074ad09a1dc05a0bde7f6b4a19875c75bae6b
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Okinawa Institute of Science and Technology, Japan.
#
# This script runs on STEPS 2.x http://steps.sourceforge.net
#
# H Anwar, I Hepburn, H Nedelescu, W Chen and E De Schutter
# Stochastic calcium mechanisms cause dendritic calcium spike variability
# J Neuroscience 2013
#
# *HybridCaburst_stochCaT.py : A hybrid calcium burst model with stochastic T-type
# calcium channels and everything else deterministic.
#
# Script authors: Haroon Anwar and Iain Hepburn
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# USAGE
#
# $ python HybridCaburst_stochCaT.py *mesh* *root* *iter_n*
#
# *mesh* is the tetrahedral mesh (10um to 160um cylinder)
# *root* is the path to the location for data storage
# *iter_n* (is intened to be an integer) is an identifier number for each
# simulation iteration.
#
# E.g: python HybridCaburst_stochCaT.py Cylinder2_dia2um_L10um_outer0_3um_0.3shell_0.3size_19156tets_adaptive.inp ~/stochcasims/ 1
#
#
# OUTPUT
#
# In (root)/data/HybridCaburst_stochCaT/(mesh)/(iter_n+time) directory
# 3 data files will be recorded. Each file contains one row for every
# time-point at which data is recorded, organised into the following columns:
#
# currents.dat
# Time (ms), P-type current, T-type current, BK current, SK current
# (current units are Amps/m^2)
#
# voltage.dat
# Time (ms), voltage at mesh centre (mV)
#
# calcium.dat
# Time (ms), determinstic calcium concentration in submembrane (micromolar),
# stochastic calcium concentration in submembrane (micromolar),
# number of calcium ions in submembrane in deterministic solver,
# number of calcium ions in submembrane in stochastic solver.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import steps.interface
import math
import time
from random import *
from steps.model import *
from steps.geom import *
from steps.rng import *
from steps.sim import *
import os
import extra.curr_funcs as cf
from extra.constants import *
import sys
import numpy as np
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
_, meshfile_ab, root, iter_n = sys.argv
if meshfile_ab == 'Cylinder2_dia2um_L160um_outer0_0.3shell_0.3size_279152tets_adaptive.inp':
cyl160=True
else:
cyl160=False
########################### BIOCHEMICAL MODEL ###############################
r = ReactionManager()
mdl_stoch = Model()
mdl_det = Model()
with mdl_stoch:
Ca_stoch = Species.Create(valence=2)
CaT_m0h0, CaT_m0h1, CaT_m1h0, CaT_m1h1, CaT_m2h0, CaT_m2h1 = SubUnitState.Create()
CaTchan = Channel.Create([CaT_m0h0, CaT_m0h1, CaT_m1h0, CaT_m1h1, CaT_m2h0, CaT_m2h1])
vsys_stoch = VolumeSystem.Create()
ssys_stoch = SurfaceSystem.Create()
with ssys_stoch:
with CaTchan[...]:
CaT_m0h0.s <r['CaTm0h0_m1h0']> CaT_m1h0.s <r['CaTm1h0_m2h0']> CaT_m2h0.s <r['CaTm2h0_m2h1']> CaT_m2h1.s
r['CaTm0h0_m1h0'].K = VDepRate(lambda V: 1.0e3 *2.* alpham_cat(V*1.0e3)), VDepRate(lambda V: 1.0e3 *1.* betam_cat(V*1.0e3))
r['CaTm1h0_m2h0'].K = VDepRate(lambda V: 1.0e3 *1.* alpham_cat(V*1.0e3)), VDepRate(lambda V: 1.0e3 *2.* betam_cat(V*1.0e3))
r['CaTm2h0_m2h1'].K = VDepRate(lambda V: 1.0e3 *1.* alphah_cat(V*1.0e3)), VDepRate(lambda V: 1.0e3 *1.* betah_cat(V*1.0e3))
CaT_m1h0.s <r['CaTm1h0_m1h1']> CaT_m1h1.s
r['CaTm1h0_m1h1'].K = VDepRate(lambda V: 1.0e3 *1.* alphah_cat(V*1.0e3)), VDepRate(lambda V: 1.0e3 *1.* betah_cat(V*1.0e3))
CaT_m0h0.s <r['CaTm0h0_m0h1']> CaT_m0h1.s <r['CaTm0h1_m1h1']> CaT_m1h1.s <r['CaTm1h1_m2h1']> CaT_m2h1.s
r['CaTm0h0_m0h1'].K = VDepRate(lambda V: 1.0e3 *1.* alphah_cat(V*1.0e3)), VDepRate(lambda V: 1.0e3 *1.* betah_cat(V*1.0e3))
r['CaTm1h1_m2h1'].K = VDepRate(lambda V: 1.0e3 *1.* alpham_cat(V*1.0e3)), VDepRate(lambda V: 1.0e3 *2.* betam_cat(V*1.0e3))
r['CaTm0h1_m1h1'].K = VDepRate(lambda V: 1.0e3 *2.* alpham_cat(V*1.0e3)), VDepRate(lambda V: 1.0e3 *1.* betam_cat(V*1.0e3))
if cyl160:
OC_CaT = GHKCurr.Create(CaTchan[CaT_m2h1], Ca_stoch, CaT_P, virtual_oconc=Ca_oconc, computeflux=True)
else:
OC_CaT = GHKCurr.Create(CaTchan[CaT_m2h1], Ca_stoch, CaT_P, computeflux=True)
with mdl_det:
Ca_det = Species.Create(valence=2)
iCBsf, iCBsCa, iCBCaf, iCBCaCa, CBsf, CBsCa, CBCaf, CBCaCa, PV, PVMg, PVCa, Mg, Pump, CaPump,\
SK_C1, SK_C2, SK_C3, SK_C4, SK_O1, SK_O2, BK_C0, BK_C1, BK_C2, BK_C3, BK_C4, BK_O0, BK_O1,\
BK_O2, BK_O3, BK_O4, CaP_m0, CaP_m1, CaP_m2, CaP_m3 = Species.Create()
# Vol/surface systems
vsys_det = VolumeSystem.Create()
ssys_det = SurfaceSystem.Create()
with vsys_det:
diff_Ca = Diffusion.Create(Ca_det, DCST)
diff_CBsf = Diffusion.Create(CBsf, DCB)
diff_CBsCa = Diffusion.Create(CBsCa, DCB)
diff_CBCaf = Diffusion.Create(CBCaf, DCB)
diff_CBCaCa = Diffusion.Create(CBCaCa, DCB)
diff_PV = Diffusion.Create(PV, DPV)
diff_PVCa = Diffusion.Create(PVCa, DPV)
diff_PVMg = Diffusion.Create(PVMg, DPV)
(Ca_det + iCBsf <r['iCBsf1_f']> iCBsCa) + Ca_det <r['iCBsCa_f']> iCBCaCa
(Ca_det + iCBsf <r['iCBsf2_f']> iCBCaf) + Ca_det <r['iCBCaf_f']> iCBCaCa
r['iCBsf1_f'].K = iCBsf1_f_kcst, iCBsf1_b_kcst
r['iCBsCa_f'].K = iCBsCa_f_kcst, iCBsCa_b_kcst
r['iCBsf2_f'].K = iCBsf2_f_kcst, iCBsf2_b_kcst
r['iCBCaf_f'].K = iCBCaf_f_kcst, iCBCaf_b_kcst
(CBsf + Ca_det <r['CBsf1_f']> CBsCa) + Ca_det <r['CBsCa_f']> CBCaCa
(CBsf + Ca_det <r['CBsf2_f']> CBCaf) + Ca_det <r['CBCaf_f']> CBCaCa
r['CBsf1_f'].K = CBsf1_f_kcst, CBsf1_b_kcst
r['CBsCa_f'].K = CBsCa_f_kcst, CBsCa_b_kcst
r['CBsf2_f'].K = CBsf2_f_kcst, CBsf2_b_kcst
r['CBCaf_f'].K = CBCaf_f_kcst, CBCaf_b_kcst
Ca_det + PV <r['PVca_f']> PVCa
Mg + PV <r['PVmg_f']> PVMg
r['PVca_f'].K = PVca_f_kcst, PVca_b_kcst
r['PVmg_f'].K = PVmg_f_kcst, PVmg_b_kcst
with ssys_det:
#Pump
Ca_det.i + Pump.s <r['PumpD_f']> CaPump.s >r['PumpD_k']> Pump.s
r['PumpD_f'].K = P_f_kcst, P_b_kcst
r['PumpD_k'].K = P_k_kcst
CaP_m0.s <r['CaPm0m1']> CaP_m1.s <r['CaPm1m2']> CaP_m2.s <r['CaPm2m3']> CaP_m3.s
r['CaPm0m1'].K = 0.0, 0.0
r['CaPm1m2'].K = 0.0, 0.0
r['CaPm2m3'].K = 0.0, 0.0
(((BK_C0.s + Ca_det.i <r['BKCAC0']> BK_C1.s)\
+ Ca_det.i <r['BKCAC1']> BK_C2.s)\
+ Ca_det.i <r['BKCAC2']> BK_C3.s)\
+ Ca_det.i <r['BKCAC3']> BK_C4.s
r['BKCAC0'].K = c_01, c_10
r['BKCAC1'].K = c_12, c_21
r['BKCAC2'].K = c_23, c_32
r['BKCAC3'].K = c_34, c_43
(((BK_O0.s + Ca_det.i <r['BKCAO0']> BK_O1.s)\
+ Ca_det.i <r['BKCAO1']> BK_O2.s)\
+ Ca_det.i <r['BKCAO2']> BK_O3.s)\
+ Ca_det.i <r['BKCAO3']> BK_O4.s
r['BKCAO0'].K = o_01, o_10
r['BKCAO1'].K = o_12, o_21
r['BKCAO2'].K = o_23, o_32
r['BKCAO3'].K = o_34, o_43
BK_C0.s <r['BKC0O0']> BK_O0.s
BK_C1.s <r['BKC1O1']> BK_O1.s
BK_C2.s <r['BKC2O2']> BK_O2.s
BK_C3.s <r['BKC3O3']> BK_O3.s
BK_C4.s <r['BKC4O4']> BK_O4.s
r['BKC0O0'].K = 0, 0
r['BKC1O1'].K = 0, 0
r['BKC2O2'].K = 0, 0
r['BKC3O3'].K = 0, 0
r['BKC4O4'].K = 0, 0
((SK_C1.s + Ca_det.i <r['SKCAC1']> SK_C2.s)\
+ Ca_det.i <r['SKCAC2']> SK_C3.s)\
+ Ca_det.i <r['SKCAC3']> SK_C4.s
r['SKCAC1'].K = dirc2_t, invc1_t
r['SKCAC2'].K = dirc3_t, invc2_t
r['SKCAC3'].K = dirc4_t, invc3_t
SK_C3.s <r['SKC3O1']> SK_O1.s
SK_C4.s <r['SKC4O2']> SK_O2.s
r['SKC3O1'].K = diro1_t, invo1_t
r['SKC4O2'].K = diro2_t, invo2_t
##################################
########### MESH & COMPARTMENTALIZATION #################
##########Import Mesh
mesh_stoch = TetMesh.Load('./meshes/'+meshfile_ab)
mesh_det = TetMesh.Load('./meshes/'+meshfile_ab)
with mesh_stoch as mesh:
# Use mesh_stoch for geometrical operations
rad, zmin, zmax = 1e-6, -200e-6, 200e-6
inner_tets, outer_tets = TetList(), TetList()
for t in mesh.tets:
c = t.center
if zmin <= c.z <= zmax and c.x**2 + c.y**2 <= rad**2:
inner_tets.append(t)
else:
outer_tets.append(t)
print(len(outer_tets), " tets in outer compartment")
print(len(inner_tets), " tets in inner compartment")
# Record voltage from the central tetrahedron
cent_tet = mesh.tets[0.0, 0.0, 0.0]
if cyl160:
# Ensure that we use points a small distance inside the boundary:
minz, maxz = mesh.bbox.min.z, mesh.bbox.max.z
memb_tris = TriList(tri for tri in mesh_stock.surface if minz < tri.center.z < maxz)
else:
print('Finding connecting triangles...')
memb_tris = inner_tets.surface & outer_tets.surface
submemb_tets = TetList()
for tri in memb_tris:
submemb_tets += tri.tetNeighbs
submemb_tets = submemb_tets & inner_tets
print(len(submemb_tets))
vol = sum(tet.Vol for tet in submemb_tets)
print('Volume of submembrane region is', vol)
submemb_tris = TriList()
for tet in submemb_tets:
for tri in tet.faces:
if tri in memb_tris:
submemb_tris.append(tri)
break
assert(len(submemb_tris) == len(submemb_tets))
########## Create an intracellular compartment i.e. cytosolic compartment
cyto_stoch = Compartment.Create(inner_tets.indices, vsys_stoch)
########## Create a membrane as a surface mesh
if cyl160:
memb_stoch = Patch.Create(memb_tris, cyto_stoch, None, ssys_stoch)
else:
outer_stoch = Compartment.Create(outer_tets.indices)
memb_stoch = Patch.Create(memb_tris, cyto_stoch, outer_stoch, ssys_stoch)
# For EField calculation
print("Creating membrane..")
membrane = Membrane.Create([memb_stoch])
print("Membrane created.")
print("Area: ", memb_stoch.Area)
with mesh_det:
cyto_det = Compartment.Create(inner_tets.indices, vsys_det)
memb_det = Patch.Create(memb_tris.indices, cyto_det, None, ssys_det)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # SIMULATION # # # # # # # # # # # # # # # # # # # # # #
rng1 = RNG('mt19937', 512, 7)
rng2 = RNG('mt19937', 512, 7)
#Creating two solvers
sim_stoch = Simulation('Tetexact', mdl_stoch, mesh_stoch, rng1, calcMembPot=True)
sim_det = Simulation('TetODE', mdl_det, mesh_det, rng2)
sim_det.setTolerances(1.0e-3, 1.0e-3)
print("Resetting simulation objects..")
sim_stoch.newRun()
sim_det.newRun()
print("Injecting molecules..")
sim_stoch.Temp = TEMPERATURE+273.15
if not cyl160:
sim_stoch.outer_stoch.Ca_stoch.Conc = Ca_oconc
sim_stoch.outer_stoch.Ca_stoch.Clamped = True
sim_det.cyto_det.Ca_det.Conc = Ca_iconc
sim_stoch.cyto_stoch.Ca_stoch.Conc = Ca_iconc
print("Calcium concentration in stochastic simulation is: ", sim_stoch.cyto_stoch.Ca_stoch.Conc)
print("No. of Ca molecules in stochastic simulation is: ", sim_stoch.cyto_stoch.Ca_stoch.Count)
print("Calcium concentration in deterministic simulation is: ", sim_det.cyto_det.Ca_det.Conc)
print("No. of Ca molecules in deterministic simulation is: ", sim_det.cyto_det.Ca_det.Count)
sim_det.cyto_det.Mg.Conc = Mg_conc
surfarea = sim_stoch.memb_stoch.Area
pumpnbs = 6.022141e12*surfarea
sim_det.memb_det.Pump.Count = round(pumpnbs)
sim_det.memb_det.CaPump.Count = 0
sim_det.cyto_det.iCBsf.Conc = iCBsf_conc
sim_det.cyto_det.iCBCaf.Conc = iCBCaf_conc
sim_det.cyto_det.iCBsCa.Conc = iCBsCa_conc
sim_det.cyto_det.iCBCaCa.Conc = iCBCaCa_conc
sim_det.cyto_det.CBsf.Conc = CBsf_conc
sim_det.cyto_det.CBCaf.Conc = CBCaf_conc
sim_det.cyto_det.CBsCa.Conc = CBsCa_conc
sim_det.cyto_det.CBCaCa.Conc = CBCaCa_conc
sim_det.cyto_det.PV.Conc = PV_conc
sim_det.cyto_det.PVCa.Conc = PVCa_conc
sim_det.cyto_det.PVMg.Conc = PVMg_conc
# CaP
sim_det.memb_det.CaP_m0.Count = round(CaP_ro*surfarea*CaP_m0_p)
sim_det.memb_det.CaP_m1.Count = round(CaP_ro*surfarea*CaP_m1_p)
sim_det.memb_det.CaP_m2.Count = round(CaP_ro*surfarea*CaP_m2_p)
sim_det.memb_det.CaP_m3.Count = round(CaP_ro*surfarea*CaP_m3_p)
print("CaP_m0 ", round(CaP_ro*surfarea*CaP_m0_p))
print("CaP_m1 ", round(CaP_ro*surfarea*CaP_m1_p))
print("CaP_m2 ", round(CaP_ro*surfarea*CaP_m2_p))
print("CaP_m3 ", round(CaP_ro*surfarea*CaP_m3_p))
print("Targeted Injection: ", round(CaP_ro*surfarea), "CaP channels")
# CaT
# From cstate: CaT_m2h0 conducting
sim_stoch.memb_stoch.CaTchan[CaT_m0h0].Count = round(CaT_ro*surfarea*CaT_m0h0_p)
sim_stoch.memb_stoch.CaTchan[CaT_m1h0].Count = round(CaT_ro*surfarea*CaT_m1h0_p)
sim_stoch.memb_stoch.CaTchan[CaT_m2h0].Count = round(CaT_ro*surfarea*CaT_m2h0_p)
sim_stoch.memb_stoch.CaTchan[CaT_m0h1].Count = round(CaT_ro*surfarea*CaT_m0h1_p)
sim_stoch.memb_stoch.CaTchan[CaT_m1h1].Count = round(CaT_ro*surfarea*CaT_m1h1_p)
sim_stoch.memb_stoch.CaTchan[CaT_m2h1].Count = round(CaT_ro*surfarea*CaT_m2h1_p)
print("Injected ", CaT_ro*surfarea, "CaT channels")
# BK
sim_det.memb_det.BK_C0.Count = round(BK_ro*surfarea*BK_C0_p)
sim_det.memb_det.BK_C1.Count = round(BK_ro*surfarea*BK_C1_p)
sim_det.memb_det.BK_C2.Count = round(BK_ro*surfarea*BK_C2_p)
sim_det.memb_det.BK_C3.Count = round(BK_ro*surfarea*BK_C3_p)
sim_det.memb_det.BK_C4.Count = round(BK_ro*surfarea*BK_C4_p)
sim_det.memb_det.BK_O0.Count = round(BK_ro*surfarea*BK_O0_p)
sim_det.memb_det.BK_O1.Count = round(BK_ro*surfarea*BK_O1_p)
sim_det.memb_det.BK_O2.Count = round(BK_ro*surfarea*BK_O2_p)
sim_det.memb_det.BK_O3.Count = round(BK_ro*surfarea*BK_O3_p)
sim_det.memb_det.BK_O4.Count = round(BK_ro*surfarea*BK_O4_p)
print("Injected ", BK_ro*surfarea, "BK channels")
# SK
sim_det.memb_det.SK_C1.Count = round(SK_ro*surfarea*SK_C1_p)
sim_det.memb_det.SK_C2.Count = round(SK_ro*surfarea*SK_C2_p)
sim_det.memb_det.SK_C3.Count = round(SK_ro*surfarea*SK_C3_p)
sim_det.memb_det.SK_C4.Count = round(SK_ro*surfarea*SK_C4_p)
sim_det.memb_det.SK_O1.Count = round(SK_ro*surfarea*SK_O1_p)
sim_det.memb_det.SK_O2.Count = round(SK_ro*surfarea*SK_O2_p)
print("Injected ", SK_ro*surfarea, "SK channels")
sim_stoch.EfieldDT = EF_DT
sim_stoch.membrane.Potential = init_pot
sim_stoch.membrane.VolRes = Ra
#cm = 1.5uF/cm2 -> 1.5e-6F/1e-4m2 ->1.5e-2 F/m2
sim_stoch.membrane.Capac = memb_capac
#### Recording #####
dc = time.strftime('%b%d_%H_%M_%S_%Y')
runPath = os.path.join(root, 'data/HybridCaburst_stochCaT/', meshfile_ab, f'{iter_n}__{dc}')
os.makedirs(runPath, exist_ok=True)
rng1.initialize(int(time.time()%1000))
datfile = open(os.path.join(runPath, 'currents.dat'), 'w')
datfile2 = open(os.path.join(runPath, 'voltage.dat'), 'w')
datfile3 = open(os.path.join(runPath, 'calcium.dat'), 'w')
stets = submemb_tets.indices
stris = submemb_tris.indices
for l in range(NTIMEPOINTS):
print("Tpnt: ", l)
#1) RUN STOCHASTIC SIMULATION i.e. compute currents and update stochastic calcium concentration
sim_stoch.run(TIMECONVERTER*l)
#2) READ STOCHASTIC CA and 3) SET DETERMINISTIC CA AND RATE CONSTANTS FOR DETERMINISTIC CHANNELS
sim_det.TETS(stets).Ca_det.Conc = sim_stoch.TETS(stets).Ca_stoch.Conc
#Assuming this sim V is not constant everwhere
allPots = sim_stoch.TRIS(stris).V
#3) Set the rate constants and RUN THE DETERMINISTIC SIMULATION
sim_det.TRIS(stris).CaPm0m1['fwd'].K = [1.0e3 *3.* alpha_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).CaPm1m2['fwd'].K = [1.0e3 *2.* alpha_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).CaPm2m3['fwd'].K = [1.0e3 *1.* alpha_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).CaPm2m3['bkw'].K = [1.0e3 *3.* beta_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).CaPm1m2['bkw'].K = [1.0e3 *2.* beta_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).CaPm0m1['bkw'].K = [1.0e3 *1.* beta_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).BKC0O0['fwd'].K = [f_0(V) for V in allPots]
sim_det.TRIS(stris).BKC1O1['fwd'].K = [f_1(V) for V in allPots]
sim_det.TRIS(stris).BKC2O2['fwd'].K = [f_2(V) for V in allPots]
sim_det.TRIS(stris).BKC3O3['fwd'].K = [f_3(V) for V in allPots]
sim_det.TRIS(stris).BKC4O4['fwd'].K = [f_4(V) for V in allPots]
sim_det.TRIS(stris).BKC0O0['bkw'].K = [b_0(V) for V in allPots]
sim_det.TRIS(stris).BKC1O1['bkw'].K = [b_1(V) for V in allPots]
sim_det.TRIS(stris).BKC2O2['bkw'].K = [b_2(V) for V in allPots]
sim_det.TRIS(stris).BKC3O3['bkw'].K = [b_3(V) for V in allPots]
sim_det.TRIS(stris).BKC4O4['bkw'].K = [b_4(V) for V in allPots]
#4) RUN DETERMINISTIC SIMULATION
sim_det.run(TIMECONVERTER*l)
# Now do the communication between the sims
#5)READ DETERMINISTIC CHANNELS & THEN COMPUTE CURRENT USING DETERMINISTIC GHK (could be stochastic)
So = Ca_oconc
# i) For each tet in submembrane, find the corresponding triID
# ii) For each tri, compute GHK current for each channel
# iii) Count the channel states / Spec in open states for each of the triID and compute the total current of that channel
allCa = sim_det.TETS(stets).Ca_det.Conc
currs_CaP = np.array([
nb * cf.getGHKI(CaP_P, V, 2, TEMPERATURE+273.15, Si*1.0e3, So*1.0e3)
for V, Si, nb in zip(allPots, allCa, sim_det.TRIS(stris).CaP_m3.Count)
])
currs_CaT = np.array(sim_stoch.TRIS(stris).OC_CaT.I)
allBK = np.array(sim_det.TRIS(stris).LIST(BK_O0, BK_O1, BK_O2, BK_O3, BK_O4).Count).reshape(len(stris), 5)
currs_BK = np.array([
nb * cf.getOhmI(V, BK_rev, BK_G)
for V, nb in zip(allPots, np.sum(allBK, axis=1))
])
allSK = np.array(sim_det.TRIS(stris).LIST(SK_O1, SK_O2).Count).reshape(len(stris), 2)
currs_SK = np.array([
nb * cf.getOhmI(V, SK_rev, SK_G)
for V, nb in zip(allPots, np.sum(allSK, axis=1))
])
membArea = sim_det.memb_det.Area
currs_L = np.array([
cf.getOhmI(V, L_rev, L_G) * round(L_ro * membArea) * (area / membArea)
for V, area in zip(allPots, sim_stoch.TRIS(stris).Area)
])
tcur_CaP = sum(currs_CaP)
tcur_CaT = sum(currs_CaT)
tcur_BK = sum(currs_BK)
tcur_SK = sum(currs_SK)
tca_count_det = sum(sim_det.TETS(stets).Ca_det.Count)
tca_count_stoch = sum(sim_stoch.TETS(stets).Ca_stoch.Count)
# Update sim stoch
sim_stoch.TETS(stets).Ca_stoch.Count = sim_det.TETS(stets).Ca_det.Count - (currs_CaP) * TIMECONVERTER / (2 * E_CHARGE)
sim_stoch.TRIS(stris).IClamp = currs_BK + currs_CaP + currs_SK + currs_L
datfile.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile.write('%.6g' %((tcur_CaP*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_CaT*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_BK*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_SK*1.0e-1)/surfarea) + ' ')
datfile.write('\n')
datfile2.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile2.write('%.6g' %(sim_stoch.TET(cent_tet).V*1.0e3) + ' ')
datfile2.write('\n')
datfile3.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile3.write('%.6g' %(((tca_count_det/AVOGADRO)/(vol*1.0e3))*1.0e6) + ' ')
datfile3.write('%.6g' %(((tca_count_stoch/AVOGADRO)/(vol*1.0e3))*1.0e6) + ' ')
datfile3.write('%.6g' %tca_count_det + ' ')
datfile3.write('%.6g' %tca_count_stoch + ' ')
datfile3.write('\n')
datfile.close()
datfile2.close()
datfile3.close()
## END
|
CNS-OIST/STEPS_Example
|
publication_models/API_2/Anwar_J_Neurosci_2013/HybridCaburst_stochCaT.py
|
Python
|
gpl-2.0
| 19,678
|
[
"Avogadro"
] |
d6f679d314265fe9aaba8a6f675ce800dc977dcac79855da6759380a22b349c0
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 Gael Honorez.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#-------------------------------------------------------------------------------
from trueSkill.Numerics.GaussianDistribution import *
from math import sqrt
from math import fabs
class TruncatedGaussianCorrectionFunctions :
# // These functions from the bottom of page 4 of the TrueSkill paper.
#
# /**
# * The "V" function where the team performance difference is greater than the draw margin.
# *
# * In the reference F# implementation, this is referred to as "the additive
# * correction of a single-sided truncated Gaussian with unit variance."
# *
# * @param number $drawMargin In the paper, it's referred to as just "".
# */
@staticmethod
def vExceedsMarginScaled(teamPerformanceDifference, drawMargin, c) :
return TruncatedGaussianCorrectionFunctions.vExceedsMargin(teamPerformanceDifference/c, drawMargin/c)
@staticmethod
def vExceedsMargin(teamPerformanceDifference, drawMargin) :
denominator = GaussianDistribution.cumulativeTo(teamPerformanceDifference - drawMargin)
if (denominator < 2.222758749e-162) :
return -teamPerformanceDifference + drawMargin
return GaussianDistribution.at(teamPerformanceDifference - drawMargin)/denominator
# /**
# * The "W" function where the team performance difference is greater than the draw margin.
# *
# * In the reference F# implementation, this is referred to as "the multiplicative
# * correction of a single-sided truncated Gaussian with unit variance."
# */
#
@staticmethod
def wExceedsMarginScaled(teamPerformanceDifference, drawMargin, c) :
return TruncatedGaussianCorrectionFunctions.wExceedsMargin(teamPerformanceDifference/c, drawMargin/c)
@staticmethod
def wExceedsMargin(teamPerformanceDifference, drawMargin) :
denominator = GaussianDistribution.cumulativeTo(teamPerformanceDifference - drawMargin)
if (denominator < 2.222758749e-162) :
if (teamPerformanceDifference < 0.0) :
return 1.0
return 0.0
vWin = TruncatedGaussianCorrectionFunctions.vExceedsMargin(teamPerformanceDifference, drawMargin)
return vWin*(vWin + teamPerformanceDifference - drawMargin)
#// the additive correction of a double-sided truncated Gaussian with unit variance
@staticmethod
def vWithinMarginScaled(teamPerformanceDifference, drawMargin, c) :
return TruncatedGaussianCorrectionFunctions.vWithinMargin(teamPerformanceDifference/c, drawMargin/c)
#
@staticmethod
def vWithinMargin(teamPerformanceDifference, drawMargin) :
teamPerformanceDifferenceAbsoluteValue = fabs(teamPerformanceDifference)
denominator = GaussianDistribution.cumulativeTo(drawMargin - teamPerformanceDifferenceAbsoluteValue) - GaussianDistribution.cumulativeTo(-drawMargin - teamPerformanceDifferenceAbsoluteValue)
if (denominator < 2.222758749e-162) :
if (teamPerformanceDifference < 0.0) :
return -teamPerformanceDifference - drawMargin
return -teamPerformanceDifference + drawMargin
numerator = GaussianDistribution.at(-drawMargin - teamPerformanceDifferenceAbsoluteValue) - GaussianDistribution.at(drawMargin - teamPerformanceDifferenceAbsoluteValue)
if (teamPerformanceDifference < 0.0) :
return -numerator/denominator
return numerator/denominator
# // the multiplicative correction of a double-sided truncated Gaussian with unit variance
@staticmethod
def wWithinMarginScaled(teamPerformanceDifference, drawMargin, c) :
return TruncatedGaussianCorrectionFunctions.wWithinMargin(teamPerformanceDifference/c, drawMargin/c)
# // From F#:
@staticmethod
def wWithinMargin(teamPerformanceDifference, drawMargin) :
teamPerformanceDifferenceAbsoluteValue = fabs(teamPerformanceDifference);
denominator = GaussianDistribution.cumulativeTo(drawMargin - teamPerformanceDifferenceAbsoluteValue) - GaussianDistribution.cumulativeTo(-drawMargin - teamPerformanceDifferenceAbsoluteValue)
if (denominator < 2.222758749e-162) :
return 1.0;
vt = TruncatedGaussianCorrectionFunctions.vWithinMargin(teamPerformanceDifferenceAbsoluteValue, drawMargin)
return vt*vt + ((drawMargin - teamPerformanceDifferenceAbsoluteValue)*GaussianDistribution.at(drawMargin - teamPerformanceDifferenceAbsoluteValue) - (-drawMargin - teamPerformanceDifferenceAbsoluteValue) * GaussianDistribution.at(-drawMargin - teamPerformanceDifferenceAbsoluteValue))/denominator
|
IDragonfire/modular-client
|
src/trueSkill/TrueSkill/TruncatedGaussianCorrectionFunctions.py
|
Python
|
gpl-3.0
| 5,615
|
[
"Gaussian"
] |
6b0376ffdb2bcf51e5dc3ad46caf0c7004a5056f134c6c986dc2a21c0a3319ba
|
# -*- coding: utf-8 -*-
"""
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Custom Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
import logging
import time
import json
import uuid
import random
# Setup logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# define slot name
class F:
NumberSlot = 'NumberSlot'
# my guess number is {NumberSlot}
GuessNumberIntent = "GuessNumberIntent"
# what's my guess number
WhatsMyGuessNumberIntent = "WhatsMyGuessNumberIntent"
# restart the game
RestartGuessNumberIntent = "RestartGuessNumberIntent"
randomNumber = 'randomNumber'
guessNumber = 'guessNumber'
# guessTimes = 'guessTimes'
# define constants
BEGIN_NUM = 1
END_NUM = 10
def get_uuid():
return str(uuid.uuid4())
def get_utc_timestamp(seconds=None):
return time.strftime("%Y-%m-%dT%H:%M:%S.00Z", time.gmtime(seconds))
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response, directive_dict={}):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response,
'directive': directive_dict
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {
F.randomNumber: str(random.randint(1, 10)),
}
card_title = "Welcome"
speech_output = "Welcome to the guess number game. " \
"Please tell me your guess number by saying, my guess number is one. It should be a number from {} to {}.".format(BEGIN_NUM, END_NUM)
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Please tell me your guess number by saying, my guess number is one. It should be a number from {} to {}.".format(BEGIN_NUM, END_NUM)
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for playing guess number. Have a nice day! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
def guess_number_in_session(intent, session):
""" Sets the number in the session and prepares the speech to reply to the user.
"""
card_title = intent['name']
session_attributes = {}
should_end_session = False
# get random number
randomNumber = "1"
if session.get('attributes', {}) and F.randomNumber in session.get('attributes', {}):
randomNumber = session['attributes'][F.randomNumber]
# set random number back to session attributes
session_attributes[F.randomNumber] = randomNumber
# get guess number
if F.NumberSlot in intent['slots']:
guess_number = intent['slots'][F.NumberSlot]['value']
# set guess number to session attributes
session_attributes[F.guessNumber] = guess_number
if int(guess_number) == int(randomNumber): # guess right
speech_output = "Congratulations! You got it, the number is {}. You can restart the game by saying, restart the game.".format(guess_number)
reprompt_text = "You can restart the game by saying, restart the game."
else: # guess wrong
if int(guess_number) > int(randomNumber): # guess number too big
speech_output = "Sorry! The number should be smaller than {}. Please try another number.".format(guess_number)
else: # guess number too small
speech_output = "Sorry! The number should be bigger than {}. Please try another number.".format(guess_number)
reprompt_text = "Please tell me your guess number by saying, my guess number is one. It should be a number from {} to {}.".format(BEGIN_NUM, END_NUM)
else:
speech_output = "I'm not sure what your guess number is. Please try again."
reprompt_text = "I'm not sure what your guess number is. " \
"Please tell me your guess number by saying, my guess number is one. It should be a number from {} to {}.".format(BEGIN_NUM, END_NUM)
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_guess_number_from_session(intent, session):
session_attributes = {}
if session.get('attributes', {}) and F.guessNumber in session.get('attributes', {}):
guessNumber = session['attributes'][F.guessNumber]
speech_output = "Your guess color is {}.".format(guessNumber)
should_end_session = False
else:
speech_output = "I'm not sure what your guess number is. " \
"Please tell me your guess number by saying, my guess number is one. It should be a number from {} to {}.".format(BEGIN_NUM, END_NUM)
should_end_session = False
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
reprompt_text = None
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId'] + ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they want
"""
print("on_launch requestId=" + launch_request['requestId'] + ", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == F.GuessNumberIntent:
return guess_number_in_session(intent, session)
elif intent_name == F.WhatsMyGuessNumberIntent:
return get_guess_number_from_session(intent, session)
elif intent_name == F.RestartGuessNumberIntent:
return get_welcome_response()
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] + ", sessionId=" + session['sessionId'])
# add cleanup logic here
pass
# --------------- Main handler ------------------
def lambda_handler(request, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the request parameter.
"""
print("request json=\n{}".format(json.dumps(request, indent=4, sort_keys=True)))
if request.get('directive', {}) or request.get('payload', {}): # SMART HOME
logger.info("This is a smart home directive")
try:
version = get_directive_version(request)
if version == "3":
logger.info("Received v3 directive!")
if request["directive"]["header"]["name"] == "Discover":
response = handle_discovery_v3(request)
else:
response = handle_non_discovery_v3(request)
else:
logger.info("Received v2 directive!")
if request["header"]["namespace"] == "Alexa.ConnectedHome.Discovery":
response = handle_discovery_v2()
else:
response = handle_non_discovery_v2(request)
logger.info("response json=\n{}".format(json.dumps(response, indent=4, sort_keys=True)))
return response
except ValueError as error:
logger.error(error)
raise
else:
logger.info("This is a custom alexa skill request")
print("request.session.application.applicationId=" + request['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (request['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if request['session']['new']:
on_session_started({'requestId': request['request']['requestId']}, request['session'])
if request['request']['type'] == "LaunchRequest":
return on_launch(request['request'], request['session'])
elif request['request']['type'] == "IntentRequest":
return on_intent(request['request'], request['session'])
elif request['request']['type'] == "SessionEndedRequest":
return on_session_ended(request['request'], request['session'])
# ------------------------------ Smart home module ---------------------------------
SAMPLE_APPLIANCES = [
{
"applianceId": "endpoint-001",
"manufacturerName": "Sample Manufacturer",
"modelName": "Smart Switch",
"version": "1",
"friendlyName": "Switch",
"friendlyDescription": "001 Switch that can only be turned on/off",
"isReachable": True,
"actions": [
"turnOn",
"turnOff"
],
"additionalApplianceDetails": {
"detail1": "For simplicity, this is the only appliance",
"detail2": "that has some values in the additionalApplianceDetails"
}
},
{
"applianceId": "endpoint-002",
"manufacturerName": "Sample Manufacturer",
"modelName": "Smart Light",
"version": "1",
"friendlyName": "Light",
"friendlyDescription": "002 Light that is dimmable and can change color and color temperature",
"isReachable": True,
"actions": [
"turnOn",
"turnOff",
"setPercentage",
"incrementPercentage",
"decrementPercentage",
"setColor",
"setColorTemperature",
"incrementColorTemperature",
"decrementColorTemperature"
],
"additionalApplianceDetails": {}
},
]
def get_directive_version(request):
try:
return request["directive"]["header"]["payloadVersion"]
except:
try:
return request["header"]["payloadVersion"]
except:
return "-1"
# --------------- v2 handlers ------------------
def handle_discovery_v2():
header = {
"namespace": "Alexa.ConnectedHome.Discovery",
"name": "DiscoverAppliancesResponse",
"payloadVersion": "2",
"messageId": get_uuid()
}
payload = {
"discoveredAppliances": SAMPLE_APPLIANCES
}
response = {
"header": header,
"payload": payload
}
return response
def handle_non_discovery_v2(request):
request_name = request["header"]["name"]
if request_name == "TurnOnRequest":
header = {
"namespace": "Alexa.ConnectedHome.Control",
"name": "TurnOnConfirmation",
"payloadVersion": "2",
"messageId": get_uuid()
}
payload = {}
elif request_name == "TurnOffRequest":
header = {
"namespace": "Alexa.ConnectedHome.Control",
"name": "TurnOffConfirmation",
"payloadVersion": "2",
"messageId": get_uuid()
}
# other handlers omitted in this example
payload = {}
response = {
"header": header,
"payload": payload
}
return response
# --------------- v3 handlers ------------------
def handle_discovery_v3(request):
endpoints = []
for appliance in SAMPLE_APPLIANCES:
endpoints.append(get_endpoint_from_v2_appliance(appliance))
response = {
"event": {
"header": {
"namespace": "Alexa.Discovery",
"name": "Discover.Response",
"payloadVersion": "3",
"messageId": get_uuid()
},
"payload": {
"endpoints": endpoints
}
}
}
return response
def handle_non_discovery_v3(request):
request_namespace = request["directive"]["header"]["namespace"]
request_name = request["directive"]["header"]["name"]
if request_namespace == "Alexa.PowerController":
if request_name == "TurnOn":
value = "ON"
else:
value = "OFF"
response = {
"context": {
"properties": [
{
"namespace": "Alexa.PowerController",
"name": "powerState",
"value": value,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 500
}
]
},
"event": {
"header": {
"namespace": "Alexa",
"name": "Response",
"payloadVersion": "3",
"messageId": get_uuid(),
"correlationToken": request["directive"]["header"]["correlationToken"]
},
"endpoint": {
"scope": {
"type": "BearerToken",
"token": "access-token-from-Amazon"
},
"endpointId": request["directive"]["endpoint"]["endpointId"]
},
"payload": {}
}
}
return response
elif request_namespace == "Alexa.Authorization":
if request_name == "AcceptGrant":
response = {
"event": {
"header": {
"namespace": "Alexa.Authorization",
"name": "AcceptGrant.Response",
"payloadVersion": "3",
"messageId": "5f8a426e-01e4-4cc9-8b79-65f8bd0fd8a4"
},
"payload": {}
}
}
return response
# other handlers omitted in this example
# --------------- v3 utility functions (v2 compatibility) ------------------
def get_endpoint_from_v2_appliance(appliance):
endpoint = {
"endpointId": appliance["applianceId"],
"manufacturerName": appliance["manufacturerName"],
"friendlyName": appliance["friendlyName"],
"description": appliance["friendlyDescription"],
"displayCategories": [],
"cookie": appliance["additionalApplianceDetails"],
"capabilities": []
}
endpoint["displayCategories"] = get_display_categories_from_v2_appliance(appliance)
endpoint["capabilities"] = get_capabilities_from_v2_appliance(appliance)
return endpoint
def get_display_categories_from_v2_appliance(appliance):
model_name = appliance["modelName"]
if model_name == "Smart Switch": displayCategories = ["SWITCH"]
elif model_name == "Smart Light": displayCategories = ["LIGHT"]
elif model_name == "Smart White Light": displayCategories = ["LIGHT"]
else: displayCategories = ["OTHER"]
return displayCategories
def get_capabilities_from_v2_appliance(appliance):
model_name = appliance["modelName"]
if model_name == 'Smart Switch':
capabilities = [
{
"type": "AlexaInterface",
"interface": "Alexa.PowerController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerState" }
],
"proactivelyReported": True,
"retrievable": True
}
}
]
elif model_name == "Smart Light":
capabilities = [
{
"type": "AlexaInterface",
"interface": "Alexa.PowerController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerState" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.ColorController",
"version": "3",
"properties": {
"supported": [
{ "name": "color" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.ColorTemperatureController",
"version": "3",
"properties": {
"supported": [
{ "name": "colorTemperatureInKelvin" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.BrightnessController",
"version": "3",
"properties": {
"supported": [
{ "name": "brightness" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.PowerLevelController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerLevel" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.PercentageController",
"version": "3",
"properties": {
"supported": [
{ "name": "percentage" }
],
"proactivelyReported": True,
"retrievable": True
}
}
]
elif model_name == "Smart White Light":
capabilities = [
{
"type": "AlexaInterface",
"interface": "Alexa.PowerController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerState" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.ColorTemperatureController",
"version": "3",
"properties": {
"supported": [
{ "name": "colorTemperatureInKelvin" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.BrightnessController",
"version": "3",
"properties": {
"supported": [
{ "name": "brightness" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.PowerLevelController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerLevel" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.PercentageController",
"version": "3",
"properties": {
"supported": [
{ "name": "percentage" }
],
"proactivelyReported": True,
"retrievable": True
}
}
]
else:
# in this example, just return simple on/off capability
capabilities = [
{
"type": "AlexaInterface",
"interface": "Alexa.PowerController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerState" }
],
"proactivelyReported": True,
"retrievable": True
}
}
]
# additional capabilities that are required for each endpoint
endpoint_health_capability = {
"type": "AlexaInterface",
"interface": "Alexa.EndpointHealth",
"version": "3",
"properties": {
"supported":[
{ "name":"connectivity" }
],
"proactivelyReported": True,
"retrievable": True
}
}
alexa_interface_capability = {
"type": "AlexaInterface",
"interface": "Alexa",
"version": "3"
}
capabilities.append(endpoint_health_capability)
capabilities.append(alexa_interface_capability)
return capabilities
|
Ernestyj/PyStudy
|
alexa-smarthome/sample_lambda/lambda_function_guess_number.py
|
Python
|
apache-2.0
| 23,635
|
[
"VisIt"
] |
cfc81585b34a87b50ad785ddf48909f44b7f5b227251926f3b7f2b38f2404419
|
config = {
# environment this app is running on: localhost, testing, production
'environment': "production",
# webapp2 sessions
'webapp2_extras.sessions' : {'secret_key': '_PUT_KEY_HERE_YOUR_SECRET_KEY_'},
# webapp2 authentication
'webapp2_extras.auth' : {'user_model': 'boilerplate.models.User',
'cookie_name': 'session_name'},
# jinja2 templates
'webapp2_extras.jinja2' : {'template_path': ['templates','boilerplate/templates', 'admin/templates'],
'environment_args': {'extensions': ['jinja2.ext.i18n']}},
# application name
'app_name' : "City Watchers",
# the default language code for the application.
# should match whatever language the site uses when i18n is disabled
'app_lang' : 'pt_BR',
# Locale code = <language>_<territory> (ie 'en_US')
# to pick locale codes see http://cldr.unicode.org/index/cldr-spec/picking-the-right-language-code
# also see http://www.sil.org/iso639-3/codes.asp
# Language codes defined under iso 639-1 http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# Territory codes defined under iso 3166-1 alpha-2 http://en.wikipedia.org/wiki/ISO_3166-1
# disable i18n if locales array is empty or None
'locales' : ['en_US', 'es_ES', 'it_IT', 'zh_CN', 'id_ID', 'fr_FR', 'de_DE', 'ru_RU', 'pt_BR', 'cs_CZ'],
'contact_sender' : "city-watchers@city-watchers.appspotmail.com",
'contact_recipient' : "city-watchers@city-watchers.appspotmail.com",
# Password AES Encryption Parameters
'aes_key' : "12_24_32_BYTES_KEY_FOR_PASSWORDS",
'salt' : "_PUT_SALT_HERE_TO_SHA512_PASSWORDS_",
# get your own consumer key and consumer secret by registering at https://dev.twitter.com/apps
# callback url must be: http://[YOUR DOMAIN]/login/twitter/complete
'twitter_consumer_key' : 'PUT_YOUR_TWITTER_CONSUMER_KEY_HERE',
'twitter_consumer_secret' : 'PUT_YOUR_TWITTER_CONSUMER_SECRET_HERE',
#Facebook Login
# get your own consumer key and consumer secret by registering at https://developers.facebook.com/apps
#Very Important: set the site_url= your domain in the application settings in the facebook app settings page
# callback url must be: http://[YOUR DOMAIN]/login/facebook/complete
'fb_api_key' : 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
'fb_secret' : 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
#Linkedin Login
#Get you own api key and secret from https://www.linkedin.com/secure/developer
'linkedin_api' : 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
'linkedin_secret' : 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
# Github login
# Register apps here: https://github.com/settings/applications/new
'github_server' : 'github.com',
'github_redirect_uri' : 'http://www.example.com/social_login/github/complete',
'github_client_id' : 'PUT_YOUR_GITHUB_CLIENT_ID_HERE',
'github_client_secret' : 'PUT_YOUR_GITHUB_CLIENT_SECRET_HERE',
# get your own recaptcha keys by registering at http://www.google.com/recaptcha/
'captcha_public_key' : "6Lduvt0SAAAAAPtLAk34sIc6zQD3Tu2VeL5HNDSI",
'captcha_private_key' : "6Lduvt0SAAAAACB6T9SZL-l8Wdx2MYEIOtFr2P3a",
# Leave blank "google_analytics_domain" if you only want Analytics code
'google_analytics_domain' : "YOUR_PRIMARY_DOMAIN (e.g. google.com)",
'google_analytics_code' : "UA-XXXXX-X",
# add status codes and templates used to catch and display errors
# if a status code is not listed here it will use the default app engine
# stacktrace error page or browser error page
'error_templates' : {
403: 'errors/default_error.html',
404: 'errors/default_error.html',
500: 'errors/default_error.html',
},
# Enable Federated login (OpenID and OAuth)
# Google App Engine Settings must be set to Authentication Options: Federated Login
'enable_federated_login' : True,
# jinja2 base layout template
'base_layout' : 'base.html',
# send error emails to developers
'send_mail_developer' : True,
# fellas' list
'developers' : (
('Pedro Pimenta', 'pedro.a.m.pimenta@gmail.com'),
),
# If true, it will write in datastore a log of every email sent
'log_email' : True,
# If true, it will write in datastore a log of every visit
'log_visit' : True,
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
}
|
rittersport3/CityWatchers
|
config/production.py
|
Python
|
lgpl-3.0
| 4,100
|
[
"VisIt"
] |
bf131474c6114efd29da608be4e4d55d1d86f0fcfae63e4f585d42e9269c43c8
|
# proxy module
from __future__ import absolute_import
from mayavi.components.custom_grid_plane import *
|
enthought/etsproxy
|
enthought/mayavi/components/custom_grid_plane.py
|
Python
|
bsd-3-clause
| 104
|
[
"Mayavi"
] |
73dc44ceb7913ee85d2bd76a6e763d9ecb63da3cd6ca4af9775b8a372e065980
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import binascii
import collections
import ctypes
import email
import getpass
import io
import itertools
import optparse
import os
import platform
import re
import shlex
import shutil
import socket
import struct
import subprocess
import sys
import xml.etree.ElementTree
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import urllib.response as compat_urllib_response
except ImportError: # Python 2
import urllib as compat_urllib_response
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
if sys.version_info[0] == 2:
class compat_cookiejar_Cookie(compat_cookiejar.Cookie):
def __init__(self, version, name, value, *args, **kwargs):
if isinstance(name, compat_str):
name = name.encode()
if isinstance(value, compat_str):
value = value.encode()
compat_cookiejar.Cookie.__init__(self, version, name, value, *args, **kwargs)
else:
compat_cookiejar_Cookie = compat_cookiejar.Cookie
try:
import http.cookies as compat_cookies
except ImportError: # Python 2
import Cookie as compat_cookies
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try: # Python >= 3.3
compat_html_entities_html5 = compat_html_entities.html5
except AttributeError:
# Copied from CPython 3.5.1 html/entities.py
compat_html_entities_html5 = {
'Aacute': '\xc1',
'aacute': '\xe1',
'Aacute;': '\xc1',
'aacute;': '\xe1',
'Abreve;': '\u0102',
'abreve;': '\u0103',
'ac;': '\u223e',
'acd;': '\u223f',
'acE;': '\u223e\u0333',
'Acirc': '\xc2',
'acirc': '\xe2',
'Acirc;': '\xc2',
'acirc;': '\xe2',
'acute': '\xb4',
'acute;': '\xb4',
'Acy;': '\u0410',
'acy;': '\u0430',
'AElig': '\xc6',
'aelig': '\xe6',
'AElig;': '\xc6',
'aelig;': '\xe6',
'af;': '\u2061',
'Afr;': '\U0001d504',
'afr;': '\U0001d51e',
'Agrave': '\xc0',
'agrave': '\xe0',
'Agrave;': '\xc0',
'agrave;': '\xe0',
'alefsym;': '\u2135',
'aleph;': '\u2135',
'Alpha;': '\u0391',
'alpha;': '\u03b1',
'Amacr;': '\u0100',
'amacr;': '\u0101',
'amalg;': '\u2a3f',
'AMP': '&',
'amp': '&',
'AMP;': '&',
'amp;': '&',
'And;': '\u2a53',
'and;': '\u2227',
'andand;': '\u2a55',
'andd;': '\u2a5c',
'andslope;': '\u2a58',
'andv;': '\u2a5a',
'ang;': '\u2220',
'ange;': '\u29a4',
'angle;': '\u2220',
'angmsd;': '\u2221',
'angmsdaa;': '\u29a8',
'angmsdab;': '\u29a9',
'angmsdac;': '\u29aa',
'angmsdad;': '\u29ab',
'angmsdae;': '\u29ac',
'angmsdaf;': '\u29ad',
'angmsdag;': '\u29ae',
'angmsdah;': '\u29af',
'angrt;': '\u221f',
'angrtvb;': '\u22be',
'angrtvbd;': '\u299d',
'angsph;': '\u2222',
'angst;': '\xc5',
'angzarr;': '\u237c',
'Aogon;': '\u0104',
'aogon;': '\u0105',
'Aopf;': '\U0001d538',
'aopf;': '\U0001d552',
'ap;': '\u2248',
'apacir;': '\u2a6f',
'apE;': '\u2a70',
'ape;': '\u224a',
'apid;': '\u224b',
'apos;': "'",
'ApplyFunction;': '\u2061',
'approx;': '\u2248',
'approxeq;': '\u224a',
'Aring': '\xc5',
'aring': '\xe5',
'Aring;': '\xc5',
'aring;': '\xe5',
'Ascr;': '\U0001d49c',
'ascr;': '\U0001d4b6',
'Assign;': '\u2254',
'ast;': '*',
'asymp;': '\u2248',
'asympeq;': '\u224d',
'Atilde': '\xc3',
'atilde': '\xe3',
'Atilde;': '\xc3',
'atilde;': '\xe3',
'Auml': '\xc4',
'auml': '\xe4',
'Auml;': '\xc4',
'auml;': '\xe4',
'awconint;': '\u2233',
'awint;': '\u2a11',
'backcong;': '\u224c',
'backepsilon;': '\u03f6',
'backprime;': '\u2035',
'backsim;': '\u223d',
'backsimeq;': '\u22cd',
'Backslash;': '\u2216',
'Barv;': '\u2ae7',
'barvee;': '\u22bd',
'Barwed;': '\u2306',
'barwed;': '\u2305',
'barwedge;': '\u2305',
'bbrk;': '\u23b5',
'bbrktbrk;': '\u23b6',
'bcong;': '\u224c',
'Bcy;': '\u0411',
'bcy;': '\u0431',
'bdquo;': '\u201e',
'becaus;': '\u2235',
'Because;': '\u2235',
'because;': '\u2235',
'bemptyv;': '\u29b0',
'bepsi;': '\u03f6',
'bernou;': '\u212c',
'Bernoullis;': '\u212c',
'Beta;': '\u0392',
'beta;': '\u03b2',
'beth;': '\u2136',
'between;': '\u226c',
'Bfr;': '\U0001d505',
'bfr;': '\U0001d51f',
'bigcap;': '\u22c2',
'bigcirc;': '\u25ef',
'bigcup;': '\u22c3',
'bigodot;': '\u2a00',
'bigoplus;': '\u2a01',
'bigotimes;': '\u2a02',
'bigsqcup;': '\u2a06',
'bigstar;': '\u2605',
'bigtriangledown;': '\u25bd',
'bigtriangleup;': '\u25b3',
'biguplus;': '\u2a04',
'bigvee;': '\u22c1',
'bigwedge;': '\u22c0',
'bkarow;': '\u290d',
'blacklozenge;': '\u29eb',
'blacksquare;': '\u25aa',
'blacktriangle;': '\u25b4',
'blacktriangledown;': '\u25be',
'blacktriangleleft;': '\u25c2',
'blacktriangleright;': '\u25b8',
'blank;': '\u2423',
'blk12;': '\u2592',
'blk14;': '\u2591',
'blk34;': '\u2593',
'block;': '\u2588',
'bne;': '=\u20e5',
'bnequiv;': '\u2261\u20e5',
'bNot;': '\u2aed',
'bnot;': '\u2310',
'Bopf;': '\U0001d539',
'bopf;': '\U0001d553',
'bot;': '\u22a5',
'bottom;': '\u22a5',
'bowtie;': '\u22c8',
'boxbox;': '\u29c9',
'boxDL;': '\u2557',
'boxDl;': '\u2556',
'boxdL;': '\u2555',
'boxdl;': '\u2510',
'boxDR;': '\u2554',
'boxDr;': '\u2553',
'boxdR;': '\u2552',
'boxdr;': '\u250c',
'boxH;': '\u2550',
'boxh;': '\u2500',
'boxHD;': '\u2566',
'boxHd;': '\u2564',
'boxhD;': '\u2565',
'boxhd;': '\u252c',
'boxHU;': '\u2569',
'boxHu;': '\u2567',
'boxhU;': '\u2568',
'boxhu;': '\u2534',
'boxminus;': '\u229f',
'boxplus;': '\u229e',
'boxtimes;': '\u22a0',
'boxUL;': '\u255d',
'boxUl;': '\u255c',
'boxuL;': '\u255b',
'boxul;': '\u2518',
'boxUR;': '\u255a',
'boxUr;': '\u2559',
'boxuR;': '\u2558',
'boxur;': '\u2514',
'boxV;': '\u2551',
'boxv;': '\u2502',
'boxVH;': '\u256c',
'boxVh;': '\u256b',
'boxvH;': '\u256a',
'boxvh;': '\u253c',
'boxVL;': '\u2563',
'boxVl;': '\u2562',
'boxvL;': '\u2561',
'boxvl;': '\u2524',
'boxVR;': '\u2560',
'boxVr;': '\u255f',
'boxvR;': '\u255e',
'boxvr;': '\u251c',
'bprime;': '\u2035',
'Breve;': '\u02d8',
'breve;': '\u02d8',
'brvbar': '\xa6',
'brvbar;': '\xa6',
'Bscr;': '\u212c',
'bscr;': '\U0001d4b7',
'bsemi;': '\u204f',
'bsim;': '\u223d',
'bsime;': '\u22cd',
'bsol;': '\\',
'bsolb;': '\u29c5',
'bsolhsub;': '\u27c8',
'bull;': '\u2022',
'bullet;': '\u2022',
'bump;': '\u224e',
'bumpE;': '\u2aae',
'bumpe;': '\u224f',
'Bumpeq;': '\u224e',
'bumpeq;': '\u224f',
'Cacute;': '\u0106',
'cacute;': '\u0107',
'Cap;': '\u22d2',
'cap;': '\u2229',
'capand;': '\u2a44',
'capbrcup;': '\u2a49',
'capcap;': '\u2a4b',
'capcup;': '\u2a47',
'capdot;': '\u2a40',
'CapitalDifferentialD;': '\u2145',
'caps;': '\u2229\ufe00',
'caret;': '\u2041',
'caron;': '\u02c7',
'Cayleys;': '\u212d',
'ccaps;': '\u2a4d',
'Ccaron;': '\u010c',
'ccaron;': '\u010d',
'Ccedil': '\xc7',
'ccedil': '\xe7',
'Ccedil;': '\xc7',
'ccedil;': '\xe7',
'Ccirc;': '\u0108',
'ccirc;': '\u0109',
'Cconint;': '\u2230',
'ccups;': '\u2a4c',
'ccupssm;': '\u2a50',
'Cdot;': '\u010a',
'cdot;': '\u010b',
'cedil': '\xb8',
'cedil;': '\xb8',
'Cedilla;': '\xb8',
'cemptyv;': '\u29b2',
'cent': '\xa2',
'cent;': '\xa2',
'CenterDot;': '\xb7',
'centerdot;': '\xb7',
'Cfr;': '\u212d',
'cfr;': '\U0001d520',
'CHcy;': '\u0427',
'chcy;': '\u0447',
'check;': '\u2713',
'checkmark;': '\u2713',
'Chi;': '\u03a7',
'chi;': '\u03c7',
'cir;': '\u25cb',
'circ;': '\u02c6',
'circeq;': '\u2257',
'circlearrowleft;': '\u21ba',
'circlearrowright;': '\u21bb',
'circledast;': '\u229b',
'circledcirc;': '\u229a',
'circleddash;': '\u229d',
'CircleDot;': '\u2299',
'circledR;': '\xae',
'circledS;': '\u24c8',
'CircleMinus;': '\u2296',
'CirclePlus;': '\u2295',
'CircleTimes;': '\u2297',
'cirE;': '\u29c3',
'cire;': '\u2257',
'cirfnint;': '\u2a10',
'cirmid;': '\u2aef',
'cirscir;': '\u29c2',
'ClockwiseContourIntegral;': '\u2232',
'CloseCurlyDoubleQuote;': '\u201d',
'CloseCurlyQuote;': '\u2019',
'clubs;': '\u2663',
'clubsuit;': '\u2663',
'Colon;': '\u2237',
'colon;': ':',
'Colone;': '\u2a74',
'colone;': '\u2254',
'coloneq;': '\u2254',
'comma;': ',',
'commat;': '@',
'comp;': '\u2201',
'compfn;': '\u2218',
'complement;': '\u2201',
'complexes;': '\u2102',
'cong;': '\u2245',
'congdot;': '\u2a6d',
'Congruent;': '\u2261',
'Conint;': '\u222f',
'conint;': '\u222e',
'ContourIntegral;': '\u222e',
'Copf;': '\u2102',
'copf;': '\U0001d554',
'coprod;': '\u2210',
'Coproduct;': '\u2210',
'COPY': '\xa9',
'copy': '\xa9',
'COPY;': '\xa9',
'copy;': '\xa9',
'copysr;': '\u2117',
'CounterClockwiseContourIntegral;': '\u2233',
'crarr;': '\u21b5',
'Cross;': '\u2a2f',
'cross;': '\u2717',
'Cscr;': '\U0001d49e',
'cscr;': '\U0001d4b8',
'csub;': '\u2acf',
'csube;': '\u2ad1',
'csup;': '\u2ad0',
'csupe;': '\u2ad2',
'ctdot;': '\u22ef',
'cudarrl;': '\u2938',
'cudarrr;': '\u2935',
'cuepr;': '\u22de',
'cuesc;': '\u22df',
'cularr;': '\u21b6',
'cularrp;': '\u293d',
'Cup;': '\u22d3',
'cup;': '\u222a',
'cupbrcap;': '\u2a48',
'CupCap;': '\u224d',
'cupcap;': '\u2a46',
'cupcup;': '\u2a4a',
'cupdot;': '\u228d',
'cupor;': '\u2a45',
'cups;': '\u222a\ufe00',
'curarr;': '\u21b7',
'curarrm;': '\u293c',
'curlyeqprec;': '\u22de',
'curlyeqsucc;': '\u22df',
'curlyvee;': '\u22ce',
'curlywedge;': '\u22cf',
'curren': '\xa4',
'curren;': '\xa4',
'curvearrowleft;': '\u21b6',
'curvearrowright;': '\u21b7',
'cuvee;': '\u22ce',
'cuwed;': '\u22cf',
'cwconint;': '\u2232',
'cwint;': '\u2231',
'cylcty;': '\u232d',
'Dagger;': '\u2021',
'dagger;': '\u2020',
'daleth;': '\u2138',
'Darr;': '\u21a1',
'dArr;': '\u21d3',
'darr;': '\u2193',
'dash;': '\u2010',
'Dashv;': '\u2ae4',
'dashv;': '\u22a3',
'dbkarow;': '\u290f',
'dblac;': '\u02dd',
'Dcaron;': '\u010e',
'dcaron;': '\u010f',
'Dcy;': '\u0414',
'dcy;': '\u0434',
'DD;': '\u2145',
'dd;': '\u2146',
'ddagger;': '\u2021',
'ddarr;': '\u21ca',
'DDotrahd;': '\u2911',
'ddotseq;': '\u2a77',
'deg': '\xb0',
'deg;': '\xb0',
'Del;': '\u2207',
'Delta;': '\u0394',
'delta;': '\u03b4',
'demptyv;': '\u29b1',
'dfisht;': '\u297f',
'Dfr;': '\U0001d507',
'dfr;': '\U0001d521',
'dHar;': '\u2965',
'dharl;': '\u21c3',
'dharr;': '\u21c2',
'DiacriticalAcute;': '\xb4',
'DiacriticalDot;': '\u02d9',
'DiacriticalDoubleAcute;': '\u02dd',
'DiacriticalGrave;': '`',
'DiacriticalTilde;': '\u02dc',
'diam;': '\u22c4',
'Diamond;': '\u22c4',
'diamond;': '\u22c4',
'diamondsuit;': '\u2666',
'diams;': '\u2666',
'die;': '\xa8',
'DifferentialD;': '\u2146',
'digamma;': '\u03dd',
'disin;': '\u22f2',
'div;': '\xf7',
'divide': '\xf7',
'divide;': '\xf7',
'divideontimes;': '\u22c7',
'divonx;': '\u22c7',
'DJcy;': '\u0402',
'djcy;': '\u0452',
'dlcorn;': '\u231e',
'dlcrop;': '\u230d',
'dollar;': '$',
'Dopf;': '\U0001d53b',
'dopf;': '\U0001d555',
'Dot;': '\xa8',
'dot;': '\u02d9',
'DotDot;': '\u20dc',
'doteq;': '\u2250',
'doteqdot;': '\u2251',
'DotEqual;': '\u2250',
'dotminus;': '\u2238',
'dotplus;': '\u2214',
'dotsquare;': '\u22a1',
'doublebarwedge;': '\u2306',
'DoubleContourIntegral;': '\u222f',
'DoubleDot;': '\xa8',
'DoubleDownArrow;': '\u21d3',
'DoubleLeftArrow;': '\u21d0',
'DoubleLeftRightArrow;': '\u21d4',
'DoubleLeftTee;': '\u2ae4',
'DoubleLongLeftArrow;': '\u27f8',
'DoubleLongLeftRightArrow;': '\u27fa',
'DoubleLongRightArrow;': '\u27f9',
'DoubleRightArrow;': '\u21d2',
'DoubleRightTee;': '\u22a8',
'DoubleUpArrow;': '\u21d1',
'DoubleUpDownArrow;': '\u21d5',
'DoubleVerticalBar;': '\u2225',
'DownArrow;': '\u2193',
'Downarrow;': '\u21d3',
'downarrow;': '\u2193',
'DownArrowBar;': '\u2913',
'DownArrowUpArrow;': '\u21f5',
'DownBreve;': '\u0311',
'downdownarrows;': '\u21ca',
'downharpoonleft;': '\u21c3',
'downharpoonright;': '\u21c2',
'DownLeftRightVector;': '\u2950',
'DownLeftTeeVector;': '\u295e',
'DownLeftVector;': '\u21bd',
'DownLeftVectorBar;': '\u2956',
'DownRightTeeVector;': '\u295f',
'DownRightVector;': '\u21c1',
'DownRightVectorBar;': '\u2957',
'DownTee;': '\u22a4',
'DownTeeArrow;': '\u21a7',
'drbkarow;': '\u2910',
'drcorn;': '\u231f',
'drcrop;': '\u230c',
'Dscr;': '\U0001d49f',
'dscr;': '\U0001d4b9',
'DScy;': '\u0405',
'dscy;': '\u0455',
'dsol;': '\u29f6',
'Dstrok;': '\u0110',
'dstrok;': '\u0111',
'dtdot;': '\u22f1',
'dtri;': '\u25bf',
'dtrif;': '\u25be',
'duarr;': '\u21f5',
'duhar;': '\u296f',
'dwangle;': '\u29a6',
'DZcy;': '\u040f',
'dzcy;': '\u045f',
'dzigrarr;': '\u27ff',
'Eacute': '\xc9',
'eacute': '\xe9',
'Eacute;': '\xc9',
'eacute;': '\xe9',
'easter;': '\u2a6e',
'Ecaron;': '\u011a',
'ecaron;': '\u011b',
'ecir;': '\u2256',
'Ecirc': '\xca',
'ecirc': '\xea',
'Ecirc;': '\xca',
'ecirc;': '\xea',
'ecolon;': '\u2255',
'Ecy;': '\u042d',
'ecy;': '\u044d',
'eDDot;': '\u2a77',
'Edot;': '\u0116',
'eDot;': '\u2251',
'edot;': '\u0117',
'ee;': '\u2147',
'efDot;': '\u2252',
'Efr;': '\U0001d508',
'efr;': '\U0001d522',
'eg;': '\u2a9a',
'Egrave': '\xc8',
'egrave': '\xe8',
'Egrave;': '\xc8',
'egrave;': '\xe8',
'egs;': '\u2a96',
'egsdot;': '\u2a98',
'el;': '\u2a99',
'Element;': '\u2208',
'elinters;': '\u23e7',
'ell;': '\u2113',
'els;': '\u2a95',
'elsdot;': '\u2a97',
'Emacr;': '\u0112',
'emacr;': '\u0113',
'empty;': '\u2205',
'emptyset;': '\u2205',
'EmptySmallSquare;': '\u25fb',
'emptyv;': '\u2205',
'EmptyVerySmallSquare;': '\u25ab',
'emsp13;': '\u2004',
'emsp14;': '\u2005',
'emsp;': '\u2003',
'ENG;': '\u014a',
'eng;': '\u014b',
'ensp;': '\u2002',
'Eogon;': '\u0118',
'eogon;': '\u0119',
'Eopf;': '\U0001d53c',
'eopf;': '\U0001d556',
'epar;': '\u22d5',
'eparsl;': '\u29e3',
'eplus;': '\u2a71',
'epsi;': '\u03b5',
'Epsilon;': '\u0395',
'epsilon;': '\u03b5',
'epsiv;': '\u03f5',
'eqcirc;': '\u2256',
'eqcolon;': '\u2255',
'eqsim;': '\u2242',
'eqslantgtr;': '\u2a96',
'eqslantless;': '\u2a95',
'Equal;': '\u2a75',
'equals;': '=',
'EqualTilde;': '\u2242',
'equest;': '\u225f',
'Equilibrium;': '\u21cc',
'equiv;': '\u2261',
'equivDD;': '\u2a78',
'eqvparsl;': '\u29e5',
'erarr;': '\u2971',
'erDot;': '\u2253',
'Escr;': '\u2130',
'escr;': '\u212f',
'esdot;': '\u2250',
'Esim;': '\u2a73',
'esim;': '\u2242',
'Eta;': '\u0397',
'eta;': '\u03b7',
'ETH': '\xd0',
'eth': '\xf0',
'ETH;': '\xd0',
'eth;': '\xf0',
'Euml': '\xcb',
'euml': '\xeb',
'Euml;': '\xcb',
'euml;': '\xeb',
'euro;': '\u20ac',
'excl;': '!',
'exist;': '\u2203',
'Exists;': '\u2203',
'expectation;': '\u2130',
'ExponentialE;': '\u2147',
'exponentiale;': '\u2147',
'fallingdotseq;': '\u2252',
'Fcy;': '\u0424',
'fcy;': '\u0444',
'female;': '\u2640',
'ffilig;': '\ufb03',
'fflig;': '\ufb00',
'ffllig;': '\ufb04',
'Ffr;': '\U0001d509',
'ffr;': '\U0001d523',
'filig;': '\ufb01',
'FilledSmallSquare;': '\u25fc',
'FilledVerySmallSquare;': '\u25aa',
'fjlig;': 'fj',
'flat;': '\u266d',
'fllig;': '\ufb02',
'fltns;': '\u25b1',
'fnof;': '\u0192',
'Fopf;': '\U0001d53d',
'fopf;': '\U0001d557',
'ForAll;': '\u2200',
'forall;': '\u2200',
'fork;': '\u22d4',
'forkv;': '\u2ad9',
'Fouriertrf;': '\u2131',
'fpartint;': '\u2a0d',
'frac12': '\xbd',
'frac12;': '\xbd',
'frac13;': '\u2153',
'frac14': '\xbc',
'frac14;': '\xbc',
'frac15;': '\u2155',
'frac16;': '\u2159',
'frac18;': '\u215b',
'frac23;': '\u2154',
'frac25;': '\u2156',
'frac34': '\xbe',
'frac34;': '\xbe',
'frac35;': '\u2157',
'frac38;': '\u215c',
'frac45;': '\u2158',
'frac56;': '\u215a',
'frac58;': '\u215d',
'frac78;': '\u215e',
'frasl;': '\u2044',
'frown;': '\u2322',
'Fscr;': '\u2131',
'fscr;': '\U0001d4bb',
'gacute;': '\u01f5',
'Gamma;': '\u0393',
'gamma;': '\u03b3',
'Gammad;': '\u03dc',
'gammad;': '\u03dd',
'gap;': '\u2a86',
'Gbreve;': '\u011e',
'gbreve;': '\u011f',
'Gcedil;': '\u0122',
'Gcirc;': '\u011c',
'gcirc;': '\u011d',
'Gcy;': '\u0413',
'gcy;': '\u0433',
'Gdot;': '\u0120',
'gdot;': '\u0121',
'gE;': '\u2267',
'ge;': '\u2265',
'gEl;': '\u2a8c',
'gel;': '\u22db',
'geq;': '\u2265',
'geqq;': '\u2267',
'geqslant;': '\u2a7e',
'ges;': '\u2a7e',
'gescc;': '\u2aa9',
'gesdot;': '\u2a80',
'gesdoto;': '\u2a82',
'gesdotol;': '\u2a84',
'gesl;': '\u22db\ufe00',
'gesles;': '\u2a94',
'Gfr;': '\U0001d50a',
'gfr;': '\U0001d524',
'Gg;': '\u22d9',
'gg;': '\u226b',
'ggg;': '\u22d9',
'gimel;': '\u2137',
'GJcy;': '\u0403',
'gjcy;': '\u0453',
'gl;': '\u2277',
'gla;': '\u2aa5',
'glE;': '\u2a92',
'glj;': '\u2aa4',
'gnap;': '\u2a8a',
'gnapprox;': '\u2a8a',
'gnE;': '\u2269',
'gne;': '\u2a88',
'gneq;': '\u2a88',
'gneqq;': '\u2269',
'gnsim;': '\u22e7',
'Gopf;': '\U0001d53e',
'gopf;': '\U0001d558',
'grave;': '`',
'GreaterEqual;': '\u2265',
'GreaterEqualLess;': '\u22db',
'GreaterFullEqual;': '\u2267',
'GreaterGreater;': '\u2aa2',
'GreaterLess;': '\u2277',
'GreaterSlantEqual;': '\u2a7e',
'GreaterTilde;': '\u2273',
'Gscr;': '\U0001d4a2',
'gscr;': '\u210a',
'gsim;': '\u2273',
'gsime;': '\u2a8e',
'gsiml;': '\u2a90',
'GT': '>',
'gt': '>',
'GT;': '>',
'Gt;': '\u226b',
'gt;': '>',
'gtcc;': '\u2aa7',
'gtcir;': '\u2a7a',
'gtdot;': '\u22d7',
'gtlPar;': '\u2995',
'gtquest;': '\u2a7c',
'gtrapprox;': '\u2a86',
'gtrarr;': '\u2978',
'gtrdot;': '\u22d7',
'gtreqless;': '\u22db',
'gtreqqless;': '\u2a8c',
'gtrless;': '\u2277',
'gtrsim;': '\u2273',
'gvertneqq;': '\u2269\ufe00',
'gvnE;': '\u2269\ufe00',
'Hacek;': '\u02c7',
'hairsp;': '\u200a',
'half;': '\xbd',
'hamilt;': '\u210b',
'HARDcy;': '\u042a',
'hardcy;': '\u044a',
'hArr;': '\u21d4',
'harr;': '\u2194',
'harrcir;': '\u2948',
'harrw;': '\u21ad',
'Hat;': '^',
'hbar;': '\u210f',
'Hcirc;': '\u0124',
'hcirc;': '\u0125',
'hearts;': '\u2665',
'heartsuit;': '\u2665',
'hellip;': '\u2026',
'hercon;': '\u22b9',
'Hfr;': '\u210c',
'hfr;': '\U0001d525',
'HilbertSpace;': '\u210b',
'hksearow;': '\u2925',
'hkswarow;': '\u2926',
'hoarr;': '\u21ff',
'homtht;': '\u223b',
'hookleftarrow;': '\u21a9',
'hookrightarrow;': '\u21aa',
'Hopf;': '\u210d',
'hopf;': '\U0001d559',
'horbar;': '\u2015',
'HorizontalLine;': '\u2500',
'Hscr;': '\u210b',
'hscr;': '\U0001d4bd',
'hslash;': '\u210f',
'Hstrok;': '\u0126',
'hstrok;': '\u0127',
'HumpDownHump;': '\u224e',
'HumpEqual;': '\u224f',
'hybull;': '\u2043',
'hyphen;': '\u2010',
'Iacute': '\xcd',
'iacute': '\xed',
'Iacute;': '\xcd',
'iacute;': '\xed',
'ic;': '\u2063',
'Icirc': '\xce',
'icirc': '\xee',
'Icirc;': '\xce',
'icirc;': '\xee',
'Icy;': '\u0418',
'icy;': '\u0438',
'Idot;': '\u0130',
'IEcy;': '\u0415',
'iecy;': '\u0435',
'iexcl': '\xa1',
'iexcl;': '\xa1',
'iff;': '\u21d4',
'Ifr;': '\u2111',
'ifr;': '\U0001d526',
'Igrave': '\xcc',
'igrave': '\xec',
'Igrave;': '\xcc',
'igrave;': '\xec',
'ii;': '\u2148',
'iiiint;': '\u2a0c',
'iiint;': '\u222d',
'iinfin;': '\u29dc',
'iiota;': '\u2129',
'IJlig;': '\u0132',
'ijlig;': '\u0133',
'Im;': '\u2111',
'Imacr;': '\u012a',
'imacr;': '\u012b',
'image;': '\u2111',
'ImaginaryI;': '\u2148',
'imagline;': '\u2110',
'imagpart;': '\u2111',
'imath;': '\u0131',
'imof;': '\u22b7',
'imped;': '\u01b5',
'Implies;': '\u21d2',
'in;': '\u2208',
'incare;': '\u2105',
'infin;': '\u221e',
'infintie;': '\u29dd',
'inodot;': '\u0131',
'Int;': '\u222c',
'int;': '\u222b',
'intcal;': '\u22ba',
'integers;': '\u2124',
'Integral;': '\u222b',
'intercal;': '\u22ba',
'Intersection;': '\u22c2',
'intlarhk;': '\u2a17',
'intprod;': '\u2a3c',
'InvisibleComma;': '\u2063',
'InvisibleTimes;': '\u2062',
'IOcy;': '\u0401',
'iocy;': '\u0451',
'Iogon;': '\u012e',
'iogon;': '\u012f',
'Iopf;': '\U0001d540',
'iopf;': '\U0001d55a',
'Iota;': '\u0399',
'iota;': '\u03b9',
'iprod;': '\u2a3c',
'iquest': '\xbf',
'iquest;': '\xbf',
'Iscr;': '\u2110',
'iscr;': '\U0001d4be',
'isin;': '\u2208',
'isindot;': '\u22f5',
'isinE;': '\u22f9',
'isins;': '\u22f4',
'isinsv;': '\u22f3',
'isinv;': '\u2208',
'it;': '\u2062',
'Itilde;': '\u0128',
'itilde;': '\u0129',
'Iukcy;': '\u0406',
'iukcy;': '\u0456',
'Iuml': '\xcf',
'iuml': '\xef',
'Iuml;': '\xcf',
'iuml;': '\xef',
'Jcirc;': '\u0134',
'jcirc;': '\u0135',
'Jcy;': '\u0419',
'jcy;': '\u0439',
'Jfr;': '\U0001d50d',
'jfr;': '\U0001d527',
'jmath;': '\u0237',
'Jopf;': '\U0001d541',
'jopf;': '\U0001d55b',
'Jscr;': '\U0001d4a5',
'jscr;': '\U0001d4bf',
'Jsercy;': '\u0408',
'jsercy;': '\u0458',
'Jukcy;': '\u0404',
'jukcy;': '\u0454',
'Kappa;': '\u039a',
'kappa;': '\u03ba',
'kappav;': '\u03f0',
'Kcedil;': '\u0136',
'kcedil;': '\u0137',
'Kcy;': '\u041a',
'kcy;': '\u043a',
'Kfr;': '\U0001d50e',
'kfr;': '\U0001d528',
'kgreen;': '\u0138',
'KHcy;': '\u0425',
'khcy;': '\u0445',
'KJcy;': '\u040c',
'kjcy;': '\u045c',
'Kopf;': '\U0001d542',
'kopf;': '\U0001d55c',
'Kscr;': '\U0001d4a6',
'kscr;': '\U0001d4c0',
'lAarr;': '\u21da',
'Lacute;': '\u0139',
'lacute;': '\u013a',
'laemptyv;': '\u29b4',
'lagran;': '\u2112',
'Lambda;': '\u039b',
'lambda;': '\u03bb',
'Lang;': '\u27ea',
'lang;': '\u27e8',
'langd;': '\u2991',
'langle;': '\u27e8',
'lap;': '\u2a85',
'Laplacetrf;': '\u2112',
'laquo': '\xab',
'laquo;': '\xab',
'Larr;': '\u219e',
'lArr;': '\u21d0',
'larr;': '\u2190',
'larrb;': '\u21e4',
'larrbfs;': '\u291f',
'larrfs;': '\u291d',
'larrhk;': '\u21a9',
'larrlp;': '\u21ab',
'larrpl;': '\u2939',
'larrsim;': '\u2973',
'larrtl;': '\u21a2',
'lat;': '\u2aab',
'lAtail;': '\u291b',
'latail;': '\u2919',
'late;': '\u2aad',
'lates;': '\u2aad\ufe00',
'lBarr;': '\u290e',
'lbarr;': '\u290c',
'lbbrk;': '\u2772',
'lbrace;': '{',
'lbrack;': '[',
'lbrke;': '\u298b',
'lbrksld;': '\u298f',
'lbrkslu;': '\u298d',
'Lcaron;': '\u013d',
'lcaron;': '\u013e',
'Lcedil;': '\u013b',
'lcedil;': '\u013c',
'lceil;': '\u2308',
'lcub;': '{',
'Lcy;': '\u041b',
'lcy;': '\u043b',
'ldca;': '\u2936',
'ldquo;': '\u201c',
'ldquor;': '\u201e',
'ldrdhar;': '\u2967',
'ldrushar;': '\u294b',
'ldsh;': '\u21b2',
'lE;': '\u2266',
'le;': '\u2264',
'LeftAngleBracket;': '\u27e8',
'LeftArrow;': '\u2190',
'Leftarrow;': '\u21d0',
'leftarrow;': '\u2190',
'LeftArrowBar;': '\u21e4',
'LeftArrowRightArrow;': '\u21c6',
'leftarrowtail;': '\u21a2',
'LeftCeiling;': '\u2308',
'LeftDoubleBracket;': '\u27e6',
'LeftDownTeeVector;': '\u2961',
'LeftDownVector;': '\u21c3',
'LeftDownVectorBar;': '\u2959',
'LeftFloor;': '\u230a',
'leftharpoondown;': '\u21bd',
'leftharpoonup;': '\u21bc',
'leftleftarrows;': '\u21c7',
'LeftRightArrow;': '\u2194',
'Leftrightarrow;': '\u21d4',
'leftrightarrow;': '\u2194',
'leftrightarrows;': '\u21c6',
'leftrightharpoons;': '\u21cb',
'leftrightsquigarrow;': '\u21ad',
'LeftRightVector;': '\u294e',
'LeftTee;': '\u22a3',
'LeftTeeArrow;': '\u21a4',
'LeftTeeVector;': '\u295a',
'leftthreetimes;': '\u22cb',
'LeftTriangle;': '\u22b2',
'LeftTriangleBar;': '\u29cf',
'LeftTriangleEqual;': '\u22b4',
'LeftUpDownVector;': '\u2951',
'LeftUpTeeVector;': '\u2960',
'LeftUpVector;': '\u21bf',
'LeftUpVectorBar;': '\u2958',
'LeftVector;': '\u21bc',
'LeftVectorBar;': '\u2952',
'lEg;': '\u2a8b',
'leg;': '\u22da',
'leq;': '\u2264',
'leqq;': '\u2266',
'leqslant;': '\u2a7d',
'les;': '\u2a7d',
'lescc;': '\u2aa8',
'lesdot;': '\u2a7f',
'lesdoto;': '\u2a81',
'lesdotor;': '\u2a83',
'lesg;': '\u22da\ufe00',
'lesges;': '\u2a93',
'lessapprox;': '\u2a85',
'lessdot;': '\u22d6',
'lesseqgtr;': '\u22da',
'lesseqqgtr;': '\u2a8b',
'LessEqualGreater;': '\u22da',
'LessFullEqual;': '\u2266',
'LessGreater;': '\u2276',
'lessgtr;': '\u2276',
'LessLess;': '\u2aa1',
'lesssim;': '\u2272',
'LessSlantEqual;': '\u2a7d',
'LessTilde;': '\u2272',
'lfisht;': '\u297c',
'lfloor;': '\u230a',
'Lfr;': '\U0001d50f',
'lfr;': '\U0001d529',
'lg;': '\u2276',
'lgE;': '\u2a91',
'lHar;': '\u2962',
'lhard;': '\u21bd',
'lharu;': '\u21bc',
'lharul;': '\u296a',
'lhblk;': '\u2584',
'LJcy;': '\u0409',
'ljcy;': '\u0459',
'Ll;': '\u22d8',
'll;': '\u226a',
'llarr;': '\u21c7',
'llcorner;': '\u231e',
'Lleftarrow;': '\u21da',
'llhard;': '\u296b',
'lltri;': '\u25fa',
'Lmidot;': '\u013f',
'lmidot;': '\u0140',
'lmoust;': '\u23b0',
'lmoustache;': '\u23b0',
'lnap;': '\u2a89',
'lnapprox;': '\u2a89',
'lnE;': '\u2268',
'lne;': '\u2a87',
'lneq;': '\u2a87',
'lneqq;': '\u2268',
'lnsim;': '\u22e6',
'loang;': '\u27ec',
'loarr;': '\u21fd',
'lobrk;': '\u27e6',
'LongLeftArrow;': '\u27f5',
'Longleftarrow;': '\u27f8',
'longleftarrow;': '\u27f5',
'LongLeftRightArrow;': '\u27f7',
'Longleftrightarrow;': '\u27fa',
'longleftrightarrow;': '\u27f7',
'longmapsto;': '\u27fc',
'LongRightArrow;': '\u27f6',
'Longrightarrow;': '\u27f9',
'longrightarrow;': '\u27f6',
'looparrowleft;': '\u21ab',
'looparrowright;': '\u21ac',
'lopar;': '\u2985',
'Lopf;': '\U0001d543',
'lopf;': '\U0001d55d',
'loplus;': '\u2a2d',
'lotimes;': '\u2a34',
'lowast;': '\u2217',
'lowbar;': '_',
'LowerLeftArrow;': '\u2199',
'LowerRightArrow;': '\u2198',
'loz;': '\u25ca',
'lozenge;': '\u25ca',
'lozf;': '\u29eb',
'lpar;': '(',
'lparlt;': '\u2993',
'lrarr;': '\u21c6',
'lrcorner;': '\u231f',
'lrhar;': '\u21cb',
'lrhard;': '\u296d',
'lrm;': '\u200e',
'lrtri;': '\u22bf',
'lsaquo;': '\u2039',
'Lscr;': '\u2112',
'lscr;': '\U0001d4c1',
'Lsh;': '\u21b0',
'lsh;': '\u21b0',
'lsim;': '\u2272',
'lsime;': '\u2a8d',
'lsimg;': '\u2a8f',
'lsqb;': '[',
'lsquo;': '\u2018',
'lsquor;': '\u201a',
'Lstrok;': '\u0141',
'lstrok;': '\u0142',
'LT': '<',
'lt': '<',
'LT;': '<',
'Lt;': '\u226a',
'lt;': '<',
'ltcc;': '\u2aa6',
'ltcir;': '\u2a79',
'ltdot;': '\u22d6',
'lthree;': '\u22cb',
'ltimes;': '\u22c9',
'ltlarr;': '\u2976',
'ltquest;': '\u2a7b',
'ltri;': '\u25c3',
'ltrie;': '\u22b4',
'ltrif;': '\u25c2',
'ltrPar;': '\u2996',
'lurdshar;': '\u294a',
'luruhar;': '\u2966',
'lvertneqq;': '\u2268\ufe00',
'lvnE;': '\u2268\ufe00',
'macr': '\xaf',
'macr;': '\xaf',
'male;': '\u2642',
'malt;': '\u2720',
'maltese;': '\u2720',
'Map;': '\u2905',
'map;': '\u21a6',
'mapsto;': '\u21a6',
'mapstodown;': '\u21a7',
'mapstoleft;': '\u21a4',
'mapstoup;': '\u21a5',
'marker;': '\u25ae',
'mcomma;': '\u2a29',
'Mcy;': '\u041c',
'mcy;': '\u043c',
'mdash;': '\u2014',
'mDDot;': '\u223a',
'measuredangle;': '\u2221',
'MediumSpace;': '\u205f',
'Mellintrf;': '\u2133',
'Mfr;': '\U0001d510',
'mfr;': '\U0001d52a',
'mho;': '\u2127',
'micro': '\xb5',
'micro;': '\xb5',
'mid;': '\u2223',
'midast;': '*',
'midcir;': '\u2af0',
'middot': '\xb7',
'middot;': '\xb7',
'minus;': '\u2212',
'minusb;': '\u229f',
'minusd;': '\u2238',
'minusdu;': '\u2a2a',
'MinusPlus;': '\u2213',
'mlcp;': '\u2adb',
'mldr;': '\u2026',
'mnplus;': '\u2213',
'models;': '\u22a7',
'Mopf;': '\U0001d544',
'mopf;': '\U0001d55e',
'mp;': '\u2213',
'Mscr;': '\u2133',
'mscr;': '\U0001d4c2',
'mstpos;': '\u223e',
'Mu;': '\u039c',
'mu;': '\u03bc',
'multimap;': '\u22b8',
'mumap;': '\u22b8',
'nabla;': '\u2207',
'Nacute;': '\u0143',
'nacute;': '\u0144',
'nang;': '\u2220\u20d2',
'nap;': '\u2249',
'napE;': '\u2a70\u0338',
'napid;': '\u224b\u0338',
'napos;': '\u0149',
'napprox;': '\u2249',
'natur;': '\u266e',
'natural;': '\u266e',
'naturals;': '\u2115',
'nbsp': '\xa0',
'nbsp;': '\xa0',
'nbump;': '\u224e\u0338',
'nbumpe;': '\u224f\u0338',
'ncap;': '\u2a43',
'Ncaron;': '\u0147',
'ncaron;': '\u0148',
'Ncedil;': '\u0145',
'ncedil;': '\u0146',
'ncong;': '\u2247',
'ncongdot;': '\u2a6d\u0338',
'ncup;': '\u2a42',
'Ncy;': '\u041d',
'ncy;': '\u043d',
'ndash;': '\u2013',
'ne;': '\u2260',
'nearhk;': '\u2924',
'neArr;': '\u21d7',
'nearr;': '\u2197',
'nearrow;': '\u2197',
'nedot;': '\u2250\u0338',
'NegativeMediumSpace;': '\u200b',
'NegativeThickSpace;': '\u200b',
'NegativeThinSpace;': '\u200b',
'NegativeVeryThinSpace;': '\u200b',
'nequiv;': '\u2262',
'nesear;': '\u2928',
'nesim;': '\u2242\u0338',
'NestedGreaterGreater;': '\u226b',
'NestedLessLess;': '\u226a',
'NewLine;': '\n',
'nexist;': '\u2204',
'nexists;': '\u2204',
'Nfr;': '\U0001d511',
'nfr;': '\U0001d52b',
'ngE;': '\u2267\u0338',
'nge;': '\u2271',
'ngeq;': '\u2271',
'ngeqq;': '\u2267\u0338',
'ngeqslant;': '\u2a7e\u0338',
'nges;': '\u2a7e\u0338',
'nGg;': '\u22d9\u0338',
'ngsim;': '\u2275',
'nGt;': '\u226b\u20d2',
'ngt;': '\u226f',
'ngtr;': '\u226f',
'nGtv;': '\u226b\u0338',
'nhArr;': '\u21ce',
'nharr;': '\u21ae',
'nhpar;': '\u2af2',
'ni;': '\u220b',
'nis;': '\u22fc',
'nisd;': '\u22fa',
'niv;': '\u220b',
'NJcy;': '\u040a',
'njcy;': '\u045a',
'nlArr;': '\u21cd',
'nlarr;': '\u219a',
'nldr;': '\u2025',
'nlE;': '\u2266\u0338',
'nle;': '\u2270',
'nLeftarrow;': '\u21cd',
'nleftarrow;': '\u219a',
'nLeftrightarrow;': '\u21ce',
'nleftrightarrow;': '\u21ae',
'nleq;': '\u2270',
'nleqq;': '\u2266\u0338',
'nleqslant;': '\u2a7d\u0338',
'nles;': '\u2a7d\u0338',
'nless;': '\u226e',
'nLl;': '\u22d8\u0338',
'nlsim;': '\u2274',
'nLt;': '\u226a\u20d2',
'nlt;': '\u226e',
'nltri;': '\u22ea',
'nltrie;': '\u22ec',
'nLtv;': '\u226a\u0338',
'nmid;': '\u2224',
'NoBreak;': '\u2060',
'NonBreakingSpace;': '\xa0',
'Nopf;': '\u2115',
'nopf;': '\U0001d55f',
'not': '\xac',
'Not;': '\u2aec',
'not;': '\xac',
'NotCongruent;': '\u2262',
'NotCupCap;': '\u226d',
'NotDoubleVerticalBar;': '\u2226',
'NotElement;': '\u2209',
'NotEqual;': '\u2260',
'NotEqualTilde;': '\u2242\u0338',
'NotExists;': '\u2204',
'NotGreater;': '\u226f',
'NotGreaterEqual;': '\u2271',
'NotGreaterFullEqual;': '\u2267\u0338',
'NotGreaterGreater;': '\u226b\u0338',
'NotGreaterLess;': '\u2279',
'NotGreaterSlantEqual;': '\u2a7e\u0338',
'NotGreaterTilde;': '\u2275',
'NotHumpDownHump;': '\u224e\u0338',
'NotHumpEqual;': '\u224f\u0338',
'notin;': '\u2209',
'notindot;': '\u22f5\u0338',
'notinE;': '\u22f9\u0338',
'notinva;': '\u2209',
'notinvb;': '\u22f7',
'notinvc;': '\u22f6',
'NotLeftTriangle;': '\u22ea',
'NotLeftTriangleBar;': '\u29cf\u0338',
'NotLeftTriangleEqual;': '\u22ec',
'NotLess;': '\u226e',
'NotLessEqual;': '\u2270',
'NotLessGreater;': '\u2278',
'NotLessLess;': '\u226a\u0338',
'NotLessSlantEqual;': '\u2a7d\u0338',
'NotLessTilde;': '\u2274',
'NotNestedGreaterGreater;': '\u2aa2\u0338',
'NotNestedLessLess;': '\u2aa1\u0338',
'notni;': '\u220c',
'notniva;': '\u220c',
'notnivb;': '\u22fe',
'notnivc;': '\u22fd',
'NotPrecedes;': '\u2280',
'NotPrecedesEqual;': '\u2aaf\u0338',
'NotPrecedesSlantEqual;': '\u22e0',
'NotReverseElement;': '\u220c',
'NotRightTriangle;': '\u22eb',
'NotRightTriangleBar;': '\u29d0\u0338',
'NotRightTriangleEqual;': '\u22ed',
'NotSquareSubset;': '\u228f\u0338',
'NotSquareSubsetEqual;': '\u22e2',
'NotSquareSuperset;': '\u2290\u0338',
'NotSquareSupersetEqual;': '\u22e3',
'NotSubset;': '\u2282\u20d2',
'NotSubsetEqual;': '\u2288',
'NotSucceeds;': '\u2281',
'NotSucceedsEqual;': '\u2ab0\u0338',
'NotSucceedsSlantEqual;': '\u22e1',
'NotSucceedsTilde;': '\u227f\u0338',
'NotSuperset;': '\u2283\u20d2',
'NotSupersetEqual;': '\u2289',
'NotTilde;': '\u2241',
'NotTildeEqual;': '\u2244',
'NotTildeFullEqual;': '\u2247',
'NotTildeTilde;': '\u2249',
'NotVerticalBar;': '\u2224',
'npar;': '\u2226',
'nparallel;': '\u2226',
'nparsl;': '\u2afd\u20e5',
'npart;': '\u2202\u0338',
'npolint;': '\u2a14',
'npr;': '\u2280',
'nprcue;': '\u22e0',
'npre;': '\u2aaf\u0338',
'nprec;': '\u2280',
'npreceq;': '\u2aaf\u0338',
'nrArr;': '\u21cf',
'nrarr;': '\u219b',
'nrarrc;': '\u2933\u0338',
'nrarrw;': '\u219d\u0338',
'nRightarrow;': '\u21cf',
'nrightarrow;': '\u219b',
'nrtri;': '\u22eb',
'nrtrie;': '\u22ed',
'nsc;': '\u2281',
'nsccue;': '\u22e1',
'nsce;': '\u2ab0\u0338',
'Nscr;': '\U0001d4a9',
'nscr;': '\U0001d4c3',
'nshortmid;': '\u2224',
'nshortparallel;': '\u2226',
'nsim;': '\u2241',
'nsime;': '\u2244',
'nsimeq;': '\u2244',
'nsmid;': '\u2224',
'nspar;': '\u2226',
'nsqsube;': '\u22e2',
'nsqsupe;': '\u22e3',
'nsub;': '\u2284',
'nsubE;': '\u2ac5\u0338',
'nsube;': '\u2288',
'nsubset;': '\u2282\u20d2',
'nsubseteq;': '\u2288',
'nsubseteqq;': '\u2ac5\u0338',
'nsucc;': '\u2281',
'nsucceq;': '\u2ab0\u0338',
'nsup;': '\u2285',
'nsupE;': '\u2ac6\u0338',
'nsupe;': '\u2289',
'nsupset;': '\u2283\u20d2',
'nsupseteq;': '\u2289',
'nsupseteqq;': '\u2ac6\u0338',
'ntgl;': '\u2279',
'Ntilde': '\xd1',
'ntilde': '\xf1',
'Ntilde;': '\xd1',
'ntilde;': '\xf1',
'ntlg;': '\u2278',
'ntriangleleft;': '\u22ea',
'ntrianglelefteq;': '\u22ec',
'ntriangleright;': '\u22eb',
'ntrianglerighteq;': '\u22ed',
'Nu;': '\u039d',
'nu;': '\u03bd',
'num;': '#',
'numero;': '\u2116',
'numsp;': '\u2007',
'nvap;': '\u224d\u20d2',
'nVDash;': '\u22af',
'nVdash;': '\u22ae',
'nvDash;': '\u22ad',
'nvdash;': '\u22ac',
'nvge;': '\u2265\u20d2',
'nvgt;': '>\u20d2',
'nvHarr;': '\u2904',
'nvinfin;': '\u29de',
'nvlArr;': '\u2902',
'nvle;': '\u2264\u20d2',
'nvlt;': '<\u20d2',
'nvltrie;': '\u22b4\u20d2',
'nvrArr;': '\u2903',
'nvrtrie;': '\u22b5\u20d2',
'nvsim;': '\u223c\u20d2',
'nwarhk;': '\u2923',
'nwArr;': '\u21d6',
'nwarr;': '\u2196',
'nwarrow;': '\u2196',
'nwnear;': '\u2927',
'Oacute': '\xd3',
'oacute': '\xf3',
'Oacute;': '\xd3',
'oacute;': '\xf3',
'oast;': '\u229b',
'ocir;': '\u229a',
'Ocirc': '\xd4',
'ocirc': '\xf4',
'Ocirc;': '\xd4',
'ocirc;': '\xf4',
'Ocy;': '\u041e',
'ocy;': '\u043e',
'odash;': '\u229d',
'Odblac;': '\u0150',
'odblac;': '\u0151',
'odiv;': '\u2a38',
'odot;': '\u2299',
'odsold;': '\u29bc',
'OElig;': '\u0152',
'oelig;': '\u0153',
'ofcir;': '\u29bf',
'Ofr;': '\U0001d512',
'ofr;': '\U0001d52c',
'ogon;': '\u02db',
'Ograve': '\xd2',
'ograve': '\xf2',
'Ograve;': '\xd2',
'ograve;': '\xf2',
'ogt;': '\u29c1',
'ohbar;': '\u29b5',
'ohm;': '\u03a9',
'oint;': '\u222e',
'olarr;': '\u21ba',
'olcir;': '\u29be',
'olcross;': '\u29bb',
'oline;': '\u203e',
'olt;': '\u29c0',
'Omacr;': '\u014c',
'omacr;': '\u014d',
'Omega;': '\u03a9',
'omega;': '\u03c9',
'Omicron;': '\u039f',
'omicron;': '\u03bf',
'omid;': '\u29b6',
'ominus;': '\u2296',
'Oopf;': '\U0001d546',
'oopf;': '\U0001d560',
'opar;': '\u29b7',
'OpenCurlyDoubleQuote;': '\u201c',
'OpenCurlyQuote;': '\u2018',
'operp;': '\u29b9',
'oplus;': '\u2295',
'Or;': '\u2a54',
'or;': '\u2228',
'orarr;': '\u21bb',
'ord;': '\u2a5d',
'order;': '\u2134',
'orderof;': '\u2134',
'ordf': '\xaa',
'ordf;': '\xaa',
'ordm': '\xba',
'ordm;': '\xba',
'origof;': '\u22b6',
'oror;': '\u2a56',
'orslope;': '\u2a57',
'orv;': '\u2a5b',
'oS;': '\u24c8',
'Oscr;': '\U0001d4aa',
'oscr;': '\u2134',
'Oslash': '\xd8',
'oslash': '\xf8',
'Oslash;': '\xd8',
'oslash;': '\xf8',
'osol;': '\u2298',
'Otilde': '\xd5',
'otilde': '\xf5',
'Otilde;': '\xd5',
'otilde;': '\xf5',
'Otimes;': '\u2a37',
'otimes;': '\u2297',
'otimesas;': '\u2a36',
'Ouml': '\xd6',
'ouml': '\xf6',
'Ouml;': '\xd6',
'ouml;': '\xf6',
'ovbar;': '\u233d',
'OverBar;': '\u203e',
'OverBrace;': '\u23de',
'OverBracket;': '\u23b4',
'OverParenthesis;': '\u23dc',
'par;': '\u2225',
'para': '\xb6',
'para;': '\xb6',
'parallel;': '\u2225',
'parsim;': '\u2af3',
'parsl;': '\u2afd',
'part;': '\u2202',
'PartialD;': '\u2202',
'Pcy;': '\u041f',
'pcy;': '\u043f',
'percnt;': '%',
'period;': '.',
'permil;': '\u2030',
'perp;': '\u22a5',
'pertenk;': '\u2031',
'Pfr;': '\U0001d513',
'pfr;': '\U0001d52d',
'Phi;': '\u03a6',
'phi;': '\u03c6',
'phiv;': '\u03d5',
'phmmat;': '\u2133',
'phone;': '\u260e',
'Pi;': '\u03a0',
'pi;': '\u03c0',
'pitchfork;': '\u22d4',
'piv;': '\u03d6',
'planck;': '\u210f',
'planckh;': '\u210e',
'plankv;': '\u210f',
'plus;': '+',
'plusacir;': '\u2a23',
'plusb;': '\u229e',
'pluscir;': '\u2a22',
'plusdo;': '\u2214',
'plusdu;': '\u2a25',
'pluse;': '\u2a72',
'PlusMinus;': '\xb1',
'plusmn': '\xb1',
'plusmn;': '\xb1',
'plussim;': '\u2a26',
'plustwo;': '\u2a27',
'pm;': '\xb1',
'Poincareplane;': '\u210c',
'pointint;': '\u2a15',
'Popf;': '\u2119',
'popf;': '\U0001d561',
'pound': '\xa3',
'pound;': '\xa3',
'Pr;': '\u2abb',
'pr;': '\u227a',
'prap;': '\u2ab7',
'prcue;': '\u227c',
'prE;': '\u2ab3',
'pre;': '\u2aaf',
'prec;': '\u227a',
'precapprox;': '\u2ab7',
'preccurlyeq;': '\u227c',
'Precedes;': '\u227a',
'PrecedesEqual;': '\u2aaf',
'PrecedesSlantEqual;': '\u227c',
'PrecedesTilde;': '\u227e',
'preceq;': '\u2aaf',
'precnapprox;': '\u2ab9',
'precneqq;': '\u2ab5',
'precnsim;': '\u22e8',
'precsim;': '\u227e',
'Prime;': '\u2033',
'prime;': '\u2032',
'primes;': '\u2119',
'prnap;': '\u2ab9',
'prnE;': '\u2ab5',
'prnsim;': '\u22e8',
'prod;': '\u220f',
'Product;': '\u220f',
'profalar;': '\u232e',
'profline;': '\u2312',
'profsurf;': '\u2313',
'prop;': '\u221d',
'Proportion;': '\u2237',
'Proportional;': '\u221d',
'propto;': '\u221d',
'prsim;': '\u227e',
'prurel;': '\u22b0',
'Pscr;': '\U0001d4ab',
'pscr;': '\U0001d4c5',
'Psi;': '\u03a8',
'psi;': '\u03c8',
'puncsp;': '\u2008',
'Qfr;': '\U0001d514',
'qfr;': '\U0001d52e',
'qint;': '\u2a0c',
'Qopf;': '\u211a',
'qopf;': '\U0001d562',
'qprime;': '\u2057',
'Qscr;': '\U0001d4ac',
'qscr;': '\U0001d4c6',
'quaternions;': '\u210d',
'quatint;': '\u2a16',
'quest;': '?',
'questeq;': '\u225f',
'QUOT': '"',
'quot': '"',
'QUOT;': '"',
'quot;': '"',
'rAarr;': '\u21db',
'race;': '\u223d\u0331',
'Racute;': '\u0154',
'racute;': '\u0155',
'radic;': '\u221a',
'raemptyv;': '\u29b3',
'Rang;': '\u27eb',
'rang;': '\u27e9',
'rangd;': '\u2992',
'range;': '\u29a5',
'rangle;': '\u27e9',
'raquo': '\xbb',
'raquo;': '\xbb',
'Rarr;': '\u21a0',
'rArr;': '\u21d2',
'rarr;': '\u2192',
'rarrap;': '\u2975',
'rarrb;': '\u21e5',
'rarrbfs;': '\u2920',
'rarrc;': '\u2933',
'rarrfs;': '\u291e',
'rarrhk;': '\u21aa',
'rarrlp;': '\u21ac',
'rarrpl;': '\u2945',
'rarrsim;': '\u2974',
'Rarrtl;': '\u2916',
'rarrtl;': '\u21a3',
'rarrw;': '\u219d',
'rAtail;': '\u291c',
'ratail;': '\u291a',
'ratio;': '\u2236',
'rationals;': '\u211a',
'RBarr;': '\u2910',
'rBarr;': '\u290f',
'rbarr;': '\u290d',
'rbbrk;': '\u2773',
'rbrace;': '}',
'rbrack;': ']',
'rbrke;': '\u298c',
'rbrksld;': '\u298e',
'rbrkslu;': '\u2990',
'Rcaron;': '\u0158',
'rcaron;': '\u0159',
'Rcedil;': '\u0156',
'rcedil;': '\u0157',
'rceil;': '\u2309',
'rcub;': '}',
'Rcy;': '\u0420',
'rcy;': '\u0440',
'rdca;': '\u2937',
'rdldhar;': '\u2969',
'rdquo;': '\u201d',
'rdquor;': '\u201d',
'rdsh;': '\u21b3',
'Re;': '\u211c',
'real;': '\u211c',
'realine;': '\u211b',
'realpart;': '\u211c',
'reals;': '\u211d',
'rect;': '\u25ad',
'REG': '\xae',
'reg': '\xae',
'REG;': '\xae',
'reg;': '\xae',
'ReverseElement;': '\u220b',
'ReverseEquilibrium;': '\u21cb',
'ReverseUpEquilibrium;': '\u296f',
'rfisht;': '\u297d',
'rfloor;': '\u230b',
'Rfr;': '\u211c',
'rfr;': '\U0001d52f',
'rHar;': '\u2964',
'rhard;': '\u21c1',
'rharu;': '\u21c0',
'rharul;': '\u296c',
'Rho;': '\u03a1',
'rho;': '\u03c1',
'rhov;': '\u03f1',
'RightAngleBracket;': '\u27e9',
'RightArrow;': '\u2192',
'Rightarrow;': '\u21d2',
'rightarrow;': '\u2192',
'RightArrowBar;': '\u21e5',
'RightArrowLeftArrow;': '\u21c4',
'rightarrowtail;': '\u21a3',
'RightCeiling;': '\u2309',
'RightDoubleBracket;': '\u27e7',
'RightDownTeeVector;': '\u295d',
'RightDownVector;': '\u21c2',
'RightDownVectorBar;': '\u2955',
'RightFloor;': '\u230b',
'rightharpoondown;': '\u21c1',
'rightharpoonup;': '\u21c0',
'rightleftarrows;': '\u21c4',
'rightleftharpoons;': '\u21cc',
'rightrightarrows;': '\u21c9',
'rightsquigarrow;': '\u219d',
'RightTee;': '\u22a2',
'RightTeeArrow;': '\u21a6',
'RightTeeVector;': '\u295b',
'rightthreetimes;': '\u22cc',
'RightTriangle;': '\u22b3',
'RightTriangleBar;': '\u29d0',
'RightTriangleEqual;': '\u22b5',
'RightUpDownVector;': '\u294f',
'RightUpTeeVector;': '\u295c',
'RightUpVector;': '\u21be',
'RightUpVectorBar;': '\u2954',
'RightVector;': '\u21c0',
'RightVectorBar;': '\u2953',
'ring;': '\u02da',
'risingdotseq;': '\u2253',
'rlarr;': '\u21c4',
'rlhar;': '\u21cc',
'rlm;': '\u200f',
'rmoust;': '\u23b1',
'rmoustache;': '\u23b1',
'rnmid;': '\u2aee',
'roang;': '\u27ed',
'roarr;': '\u21fe',
'robrk;': '\u27e7',
'ropar;': '\u2986',
'Ropf;': '\u211d',
'ropf;': '\U0001d563',
'roplus;': '\u2a2e',
'rotimes;': '\u2a35',
'RoundImplies;': '\u2970',
'rpar;': ')',
'rpargt;': '\u2994',
'rppolint;': '\u2a12',
'rrarr;': '\u21c9',
'Rrightarrow;': '\u21db',
'rsaquo;': '\u203a',
'Rscr;': '\u211b',
'rscr;': '\U0001d4c7',
'Rsh;': '\u21b1',
'rsh;': '\u21b1',
'rsqb;': ']',
'rsquo;': '\u2019',
'rsquor;': '\u2019',
'rthree;': '\u22cc',
'rtimes;': '\u22ca',
'rtri;': '\u25b9',
'rtrie;': '\u22b5',
'rtrif;': '\u25b8',
'rtriltri;': '\u29ce',
'RuleDelayed;': '\u29f4',
'ruluhar;': '\u2968',
'rx;': '\u211e',
'Sacute;': '\u015a',
'sacute;': '\u015b',
'sbquo;': '\u201a',
'Sc;': '\u2abc',
'sc;': '\u227b',
'scap;': '\u2ab8',
'Scaron;': '\u0160',
'scaron;': '\u0161',
'sccue;': '\u227d',
'scE;': '\u2ab4',
'sce;': '\u2ab0',
'Scedil;': '\u015e',
'scedil;': '\u015f',
'Scirc;': '\u015c',
'scirc;': '\u015d',
'scnap;': '\u2aba',
'scnE;': '\u2ab6',
'scnsim;': '\u22e9',
'scpolint;': '\u2a13',
'scsim;': '\u227f',
'Scy;': '\u0421',
'scy;': '\u0441',
'sdot;': '\u22c5',
'sdotb;': '\u22a1',
'sdote;': '\u2a66',
'searhk;': '\u2925',
'seArr;': '\u21d8',
'searr;': '\u2198',
'searrow;': '\u2198',
'sect': '\xa7',
'sect;': '\xa7',
'semi;': ';',
'seswar;': '\u2929',
'setminus;': '\u2216',
'setmn;': '\u2216',
'sext;': '\u2736',
'Sfr;': '\U0001d516',
'sfr;': '\U0001d530',
'sfrown;': '\u2322',
'sharp;': '\u266f',
'SHCHcy;': '\u0429',
'shchcy;': '\u0449',
'SHcy;': '\u0428',
'shcy;': '\u0448',
'ShortDownArrow;': '\u2193',
'ShortLeftArrow;': '\u2190',
'shortmid;': '\u2223',
'shortparallel;': '\u2225',
'ShortRightArrow;': '\u2192',
'ShortUpArrow;': '\u2191',
'shy': '\xad',
'shy;': '\xad',
'Sigma;': '\u03a3',
'sigma;': '\u03c3',
'sigmaf;': '\u03c2',
'sigmav;': '\u03c2',
'sim;': '\u223c',
'simdot;': '\u2a6a',
'sime;': '\u2243',
'simeq;': '\u2243',
'simg;': '\u2a9e',
'simgE;': '\u2aa0',
'siml;': '\u2a9d',
'simlE;': '\u2a9f',
'simne;': '\u2246',
'simplus;': '\u2a24',
'simrarr;': '\u2972',
'slarr;': '\u2190',
'SmallCircle;': '\u2218',
'smallsetminus;': '\u2216',
'smashp;': '\u2a33',
'smeparsl;': '\u29e4',
'smid;': '\u2223',
'smile;': '\u2323',
'smt;': '\u2aaa',
'smte;': '\u2aac',
'smtes;': '\u2aac\ufe00',
'SOFTcy;': '\u042c',
'softcy;': '\u044c',
'sol;': '/',
'solb;': '\u29c4',
'solbar;': '\u233f',
'Sopf;': '\U0001d54a',
'sopf;': '\U0001d564',
'spades;': '\u2660',
'spadesuit;': '\u2660',
'spar;': '\u2225',
'sqcap;': '\u2293',
'sqcaps;': '\u2293\ufe00',
'sqcup;': '\u2294',
'sqcups;': '\u2294\ufe00',
'Sqrt;': '\u221a',
'sqsub;': '\u228f',
'sqsube;': '\u2291',
'sqsubset;': '\u228f',
'sqsubseteq;': '\u2291',
'sqsup;': '\u2290',
'sqsupe;': '\u2292',
'sqsupset;': '\u2290',
'sqsupseteq;': '\u2292',
'squ;': '\u25a1',
'Square;': '\u25a1',
'square;': '\u25a1',
'SquareIntersection;': '\u2293',
'SquareSubset;': '\u228f',
'SquareSubsetEqual;': '\u2291',
'SquareSuperset;': '\u2290',
'SquareSupersetEqual;': '\u2292',
'SquareUnion;': '\u2294',
'squarf;': '\u25aa',
'squf;': '\u25aa',
'srarr;': '\u2192',
'Sscr;': '\U0001d4ae',
'sscr;': '\U0001d4c8',
'ssetmn;': '\u2216',
'ssmile;': '\u2323',
'sstarf;': '\u22c6',
'Star;': '\u22c6',
'star;': '\u2606',
'starf;': '\u2605',
'straightepsilon;': '\u03f5',
'straightphi;': '\u03d5',
'strns;': '\xaf',
'Sub;': '\u22d0',
'sub;': '\u2282',
'subdot;': '\u2abd',
'subE;': '\u2ac5',
'sube;': '\u2286',
'subedot;': '\u2ac3',
'submult;': '\u2ac1',
'subnE;': '\u2acb',
'subne;': '\u228a',
'subplus;': '\u2abf',
'subrarr;': '\u2979',
'Subset;': '\u22d0',
'subset;': '\u2282',
'subseteq;': '\u2286',
'subseteqq;': '\u2ac5',
'SubsetEqual;': '\u2286',
'subsetneq;': '\u228a',
'subsetneqq;': '\u2acb',
'subsim;': '\u2ac7',
'subsub;': '\u2ad5',
'subsup;': '\u2ad3',
'succ;': '\u227b',
'succapprox;': '\u2ab8',
'succcurlyeq;': '\u227d',
'Succeeds;': '\u227b',
'SucceedsEqual;': '\u2ab0',
'SucceedsSlantEqual;': '\u227d',
'SucceedsTilde;': '\u227f',
'succeq;': '\u2ab0',
'succnapprox;': '\u2aba',
'succneqq;': '\u2ab6',
'succnsim;': '\u22e9',
'succsim;': '\u227f',
'SuchThat;': '\u220b',
'Sum;': '\u2211',
'sum;': '\u2211',
'sung;': '\u266a',
'sup1': '\xb9',
'sup1;': '\xb9',
'sup2': '\xb2',
'sup2;': '\xb2',
'sup3': '\xb3',
'sup3;': '\xb3',
'Sup;': '\u22d1',
'sup;': '\u2283',
'supdot;': '\u2abe',
'supdsub;': '\u2ad8',
'supE;': '\u2ac6',
'supe;': '\u2287',
'supedot;': '\u2ac4',
'Superset;': '\u2283',
'SupersetEqual;': '\u2287',
'suphsol;': '\u27c9',
'suphsub;': '\u2ad7',
'suplarr;': '\u297b',
'supmult;': '\u2ac2',
'supnE;': '\u2acc',
'supne;': '\u228b',
'supplus;': '\u2ac0',
'Supset;': '\u22d1',
'supset;': '\u2283',
'supseteq;': '\u2287',
'supseteqq;': '\u2ac6',
'supsetneq;': '\u228b',
'supsetneqq;': '\u2acc',
'supsim;': '\u2ac8',
'supsub;': '\u2ad4',
'supsup;': '\u2ad6',
'swarhk;': '\u2926',
'swArr;': '\u21d9',
'swarr;': '\u2199',
'swarrow;': '\u2199',
'swnwar;': '\u292a',
'szlig': '\xdf',
'szlig;': '\xdf',
'Tab;': '\t',
'target;': '\u2316',
'Tau;': '\u03a4',
'tau;': '\u03c4',
'tbrk;': '\u23b4',
'Tcaron;': '\u0164',
'tcaron;': '\u0165',
'Tcedil;': '\u0162',
'tcedil;': '\u0163',
'Tcy;': '\u0422',
'tcy;': '\u0442',
'tdot;': '\u20db',
'telrec;': '\u2315',
'Tfr;': '\U0001d517',
'tfr;': '\U0001d531',
'there4;': '\u2234',
'Therefore;': '\u2234',
'therefore;': '\u2234',
'Theta;': '\u0398',
'theta;': '\u03b8',
'thetasym;': '\u03d1',
'thetav;': '\u03d1',
'thickapprox;': '\u2248',
'thicksim;': '\u223c',
'ThickSpace;': '\u205f\u200a',
'thinsp;': '\u2009',
'ThinSpace;': '\u2009',
'thkap;': '\u2248',
'thksim;': '\u223c',
'THORN': '\xde',
'thorn': '\xfe',
'THORN;': '\xde',
'thorn;': '\xfe',
'Tilde;': '\u223c',
'tilde;': '\u02dc',
'TildeEqual;': '\u2243',
'TildeFullEqual;': '\u2245',
'TildeTilde;': '\u2248',
'times': '\xd7',
'times;': '\xd7',
'timesb;': '\u22a0',
'timesbar;': '\u2a31',
'timesd;': '\u2a30',
'tint;': '\u222d',
'toea;': '\u2928',
'top;': '\u22a4',
'topbot;': '\u2336',
'topcir;': '\u2af1',
'Topf;': '\U0001d54b',
'topf;': '\U0001d565',
'topfork;': '\u2ada',
'tosa;': '\u2929',
'tprime;': '\u2034',
'TRADE;': '\u2122',
'trade;': '\u2122',
'triangle;': '\u25b5',
'triangledown;': '\u25bf',
'triangleleft;': '\u25c3',
'trianglelefteq;': '\u22b4',
'triangleq;': '\u225c',
'triangleright;': '\u25b9',
'trianglerighteq;': '\u22b5',
'tridot;': '\u25ec',
'trie;': '\u225c',
'triminus;': '\u2a3a',
'TripleDot;': '\u20db',
'triplus;': '\u2a39',
'trisb;': '\u29cd',
'tritime;': '\u2a3b',
'trpezium;': '\u23e2',
'Tscr;': '\U0001d4af',
'tscr;': '\U0001d4c9',
'TScy;': '\u0426',
'tscy;': '\u0446',
'TSHcy;': '\u040b',
'tshcy;': '\u045b',
'Tstrok;': '\u0166',
'tstrok;': '\u0167',
'twixt;': '\u226c',
'twoheadleftarrow;': '\u219e',
'twoheadrightarrow;': '\u21a0',
'Uacute': '\xda',
'uacute': '\xfa',
'Uacute;': '\xda',
'uacute;': '\xfa',
'Uarr;': '\u219f',
'uArr;': '\u21d1',
'uarr;': '\u2191',
'Uarrocir;': '\u2949',
'Ubrcy;': '\u040e',
'ubrcy;': '\u045e',
'Ubreve;': '\u016c',
'ubreve;': '\u016d',
'Ucirc': '\xdb',
'ucirc': '\xfb',
'Ucirc;': '\xdb',
'ucirc;': '\xfb',
'Ucy;': '\u0423',
'ucy;': '\u0443',
'udarr;': '\u21c5',
'Udblac;': '\u0170',
'udblac;': '\u0171',
'udhar;': '\u296e',
'ufisht;': '\u297e',
'Ufr;': '\U0001d518',
'ufr;': '\U0001d532',
'Ugrave': '\xd9',
'ugrave': '\xf9',
'Ugrave;': '\xd9',
'ugrave;': '\xf9',
'uHar;': '\u2963',
'uharl;': '\u21bf',
'uharr;': '\u21be',
'uhblk;': '\u2580',
'ulcorn;': '\u231c',
'ulcorner;': '\u231c',
'ulcrop;': '\u230f',
'ultri;': '\u25f8',
'Umacr;': '\u016a',
'umacr;': '\u016b',
'uml': '\xa8',
'uml;': '\xa8',
'UnderBar;': '_',
'UnderBrace;': '\u23df',
'UnderBracket;': '\u23b5',
'UnderParenthesis;': '\u23dd',
'Union;': '\u22c3',
'UnionPlus;': '\u228e',
'Uogon;': '\u0172',
'uogon;': '\u0173',
'Uopf;': '\U0001d54c',
'uopf;': '\U0001d566',
'UpArrow;': '\u2191',
'Uparrow;': '\u21d1',
'uparrow;': '\u2191',
'UpArrowBar;': '\u2912',
'UpArrowDownArrow;': '\u21c5',
'UpDownArrow;': '\u2195',
'Updownarrow;': '\u21d5',
'updownarrow;': '\u2195',
'UpEquilibrium;': '\u296e',
'upharpoonleft;': '\u21bf',
'upharpoonright;': '\u21be',
'uplus;': '\u228e',
'UpperLeftArrow;': '\u2196',
'UpperRightArrow;': '\u2197',
'Upsi;': '\u03d2',
'upsi;': '\u03c5',
'upsih;': '\u03d2',
'Upsilon;': '\u03a5',
'upsilon;': '\u03c5',
'UpTee;': '\u22a5',
'UpTeeArrow;': '\u21a5',
'upuparrows;': '\u21c8',
'urcorn;': '\u231d',
'urcorner;': '\u231d',
'urcrop;': '\u230e',
'Uring;': '\u016e',
'uring;': '\u016f',
'urtri;': '\u25f9',
'Uscr;': '\U0001d4b0',
'uscr;': '\U0001d4ca',
'utdot;': '\u22f0',
'Utilde;': '\u0168',
'utilde;': '\u0169',
'utri;': '\u25b5',
'utrif;': '\u25b4',
'uuarr;': '\u21c8',
'Uuml': '\xdc',
'uuml': '\xfc',
'Uuml;': '\xdc',
'uuml;': '\xfc',
'uwangle;': '\u29a7',
'vangrt;': '\u299c',
'varepsilon;': '\u03f5',
'varkappa;': '\u03f0',
'varnothing;': '\u2205',
'varphi;': '\u03d5',
'varpi;': '\u03d6',
'varpropto;': '\u221d',
'vArr;': '\u21d5',
'varr;': '\u2195',
'varrho;': '\u03f1',
'varsigma;': '\u03c2',
'varsubsetneq;': '\u228a\ufe00',
'varsubsetneqq;': '\u2acb\ufe00',
'varsupsetneq;': '\u228b\ufe00',
'varsupsetneqq;': '\u2acc\ufe00',
'vartheta;': '\u03d1',
'vartriangleleft;': '\u22b2',
'vartriangleright;': '\u22b3',
'Vbar;': '\u2aeb',
'vBar;': '\u2ae8',
'vBarv;': '\u2ae9',
'Vcy;': '\u0412',
'vcy;': '\u0432',
'VDash;': '\u22ab',
'Vdash;': '\u22a9',
'vDash;': '\u22a8',
'vdash;': '\u22a2',
'Vdashl;': '\u2ae6',
'Vee;': '\u22c1',
'vee;': '\u2228',
'veebar;': '\u22bb',
'veeeq;': '\u225a',
'vellip;': '\u22ee',
'Verbar;': '\u2016',
'verbar;': '|',
'Vert;': '\u2016',
'vert;': '|',
'VerticalBar;': '\u2223',
'VerticalLine;': '|',
'VerticalSeparator;': '\u2758',
'VerticalTilde;': '\u2240',
'VeryThinSpace;': '\u200a',
'Vfr;': '\U0001d519',
'vfr;': '\U0001d533',
'vltri;': '\u22b2',
'vnsub;': '\u2282\u20d2',
'vnsup;': '\u2283\u20d2',
'Vopf;': '\U0001d54d',
'vopf;': '\U0001d567',
'vprop;': '\u221d',
'vrtri;': '\u22b3',
'Vscr;': '\U0001d4b1',
'vscr;': '\U0001d4cb',
'vsubnE;': '\u2acb\ufe00',
'vsubne;': '\u228a\ufe00',
'vsupnE;': '\u2acc\ufe00',
'vsupne;': '\u228b\ufe00',
'Vvdash;': '\u22aa',
'vzigzag;': '\u299a',
'Wcirc;': '\u0174',
'wcirc;': '\u0175',
'wedbar;': '\u2a5f',
'Wedge;': '\u22c0',
'wedge;': '\u2227',
'wedgeq;': '\u2259',
'weierp;': '\u2118',
'Wfr;': '\U0001d51a',
'wfr;': '\U0001d534',
'Wopf;': '\U0001d54e',
'wopf;': '\U0001d568',
'wp;': '\u2118',
'wr;': '\u2240',
'wreath;': '\u2240',
'Wscr;': '\U0001d4b2',
'wscr;': '\U0001d4cc',
'xcap;': '\u22c2',
'xcirc;': '\u25ef',
'xcup;': '\u22c3',
'xdtri;': '\u25bd',
'Xfr;': '\U0001d51b',
'xfr;': '\U0001d535',
'xhArr;': '\u27fa',
'xharr;': '\u27f7',
'Xi;': '\u039e',
'xi;': '\u03be',
'xlArr;': '\u27f8',
'xlarr;': '\u27f5',
'xmap;': '\u27fc',
'xnis;': '\u22fb',
'xodot;': '\u2a00',
'Xopf;': '\U0001d54f',
'xopf;': '\U0001d569',
'xoplus;': '\u2a01',
'xotime;': '\u2a02',
'xrArr;': '\u27f9',
'xrarr;': '\u27f6',
'Xscr;': '\U0001d4b3',
'xscr;': '\U0001d4cd',
'xsqcup;': '\u2a06',
'xuplus;': '\u2a04',
'xutri;': '\u25b3',
'xvee;': '\u22c1',
'xwedge;': '\u22c0',
'Yacute': '\xdd',
'yacute': '\xfd',
'Yacute;': '\xdd',
'yacute;': '\xfd',
'YAcy;': '\u042f',
'yacy;': '\u044f',
'Ycirc;': '\u0176',
'ycirc;': '\u0177',
'Ycy;': '\u042b',
'ycy;': '\u044b',
'yen': '\xa5',
'yen;': '\xa5',
'Yfr;': '\U0001d51c',
'yfr;': '\U0001d536',
'YIcy;': '\u0407',
'yicy;': '\u0457',
'Yopf;': '\U0001d550',
'yopf;': '\U0001d56a',
'Yscr;': '\U0001d4b4',
'yscr;': '\U0001d4ce',
'YUcy;': '\u042e',
'yucy;': '\u044e',
'yuml': '\xff',
'Yuml;': '\u0178',
'yuml;': '\xff',
'Zacute;': '\u0179',
'zacute;': '\u017a',
'Zcaron;': '\u017d',
'zcaron;': '\u017e',
'Zcy;': '\u0417',
'zcy;': '\u0437',
'Zdot;': '\u017b',
'zdot;': '\u017c',
'zeetrf;': '\u2128',
'ZeroWidthSpace;': '\u200b',
'Zeta;': '\u0396',
'zeta;': '\u03b6',
'Zfr;': '\u2128',
'zfr;': '\U0001d537',
'ZHcy;': '\u0416',
'zhcy;': '\u0436',
'zigrarr;': '\u21dd',
'Zopf;': '\u2124',
'zopf;': '\U0001d56b',
'Zscr;': '\U0001d4b5',
'zscr;': '\U0001d4cf',
'zwj;': '\u200d',
'zwnj;': '\u200c',
}
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from html.parser import HTMLParser as compat_HTMLParser
except ImportError: # Python 2
from HTMLParser import HTMLParser as compat_HTMLParser
try: # Python 2
from HTMLParser import HTMLParseError as compat_HTMLParseError
except ImportError: # Python <3.4
try:
from html.parser import HTMLParseError as compat_HTMLParseError
except ImportError: # Python >3.4
# HTMLParseError has been deprecated in Python 3.3 and removed in
# Python 3.5. Introducing dummy exception for Python >3.5 for compatible
# and uniform cross-version exceptiong handling
class compat_HTMLParseError(Exception):
pass
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
import http.server as compat_http_server
except ImportError:
import BaseHTTPServer as compat_http_server
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
from urllib.parse import unquote as compat_urllib_parse_unquote
from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
except ImportError: # Python 2
_asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
else re.compile(r'([\x00-\x7f]+)'))
# HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
# implementations from cpython 3.4.3's stdlib. Python 2's version
# is apparently broken (see https://github.com/ytdl-org/youtube-dl/pull/6244)
def compat_urllib_parse_unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, compat_str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(compat_urllib_parse._hextochr[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(compat_urllib_parse_unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return compat_urllib_parse_unquote(string, encoding, errors)
try:
from urllib.parse import urlencode as compat_urllib_parse_urlencode
except ImportError: # Python 2
# Python 2 will choke in urlencode on mixture of byte and unicode strings.
# Possible solutions are to either port it from python 3 with all
# the friends or manually ensure input query contains only byte strings.
# We will stick with latter thus recursively encoding the whole query.
def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
def encode_elem(e):
if isinstance(e, dict):
e = encode_dict(e)
elif isinstance(e, (list, tuple,)):
list_e = encode_list(e)
e = tuple(list_e) if isinstance(e, tuple) else list_e
elif isinstance(e, compat_str):
e = e.encode(encoding)
return e
def encode_dict(d):
return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
def encode_list(l):
return [encode_elem(e) for e in l]
return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
try:
from urllib.request import DataHandler as compat_urllib_request_DataHandler
except ImportError: # Python < 3.4
# Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
def data_open(self, req):
# data URLs as specified in RFC 2397.
#
# ignores POSTed data
#
# syntax:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
url = req.get_full_url()
scheme, data = url.split(':', 1)
mediatype, data = data.split(',', 1)
# even base64 encoded data URLs might be quoted so unquote in any case:
data = compat_urllib_parse_unquote_to_bytes(data)
if mediatype.endswith(';base64'):
data = binascii.a2b_base64(data)
mediatype = mediatype[:-7]
if not mediatype:
mediatype = 'text/plain;charset=US-ASCII'
headers = email.message_from_string(
'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
try:
compat_basestring = basestring # Python 2
except NameError:
compat_basestring = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
etree = xml.etree.ElementTree
class _TreeBuilder(etree.TreeBuilder):
def doctype(self, name, pubid, system):
pass
try:
# xml.etree.ElementTree.Element is a method in Python <=2.6 and
# the following will crash with:
# TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types
isinstance(None, xml.etree.ElementTree.Element)
from xml.etree.ElementTree import Element as compat_etree_Element
except TypeError: # Python <=2.6
from xml.etree.ElementTree import _ElementInterface as compat_etree_Element
if sys.version_info[0] >= 3:
def compat_etree_fromstring(text):
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
else:
# python 2.x tries to encode unicode strings with ascii (see the
# XMLParser._fixtext method)
try:
_etree_iter = etree.Element.iter
except AttributeError: # Python <=2.6
def _etree_iter(root):
for el in root.findall('*'):
yield el
for sub in _etree_iter(el):
yield sub
# on 2.6 XML doesn't have a parser argument, function copied from CPython
# 2.7 source
def _XML(text, parser=None):
if not parser:
parser = etree.XMLParser(target=_TreeBuilder())
parser.feed(text)
return parser.close()
def _element_factory(*args, **kwargs):
el = etree.Element(*args, **kwargs)
for k, v in el.items():
if isinstance(v, bytes):
el.set(k, v.decode('utf-8'))
return el
def compat_etree_fromstring(text):
doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))
for el in _etree_iter(doc):
if el.text is not None and isinstance(el.text, bytes):
el.text = el.text.decode('utf-8')
return doc
if hasattr(etree, 'register_namespace'):
compat_etree_register_namespace = etree.register_namespace
else:
def compat_etree_register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
given prefix or the namespace URI will be removed.
*prefix* is the namespace prefix, *uri* is a namespace uri. Tags and
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match(r"ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(etree._namespace_map.items()):
if k == uri or v == prefix:
del etree._namespace_map[k]
etree._namespace_map[uri] = prefix
if sys.version_info < (2, 7):
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
# .//node does not match if a node is a direct child of . !
def compat_xpath(xpath):
if isinstance(xpath, compat_str):
xpath = xpath.encode('ascii')
return xpath
else:
compat_xpath = lambda xpath: xpath
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, compat_str
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError('bad query field: %r' % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = compat_urllib_parse_unquote(
name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = compat_urllib_parse_unquote(
value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
compat_os_name = os._name if os.name == 'java' else os.name
if compat_os_name == 'nt':
def compat_shlex_quote(s):
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
else:
try:
from shlex import quote as compat_shlex_quote
except ImportError: # Python < 3.3
def compat_shlex_quote(s):
if re.match(r'^[-_\w./]+$', s):
return s
else:
return "'" + s.replace("'", "'\"'\"'") + "'"
try:
args = shlex.split('中文')
assert (isinstance(args, list)
and isinstance(args[0], compat_str)
and args[0] == '中文')
compat_shlex_split = shlex.split
except (AssertionError, UnicodeEncodeError):
# Working around shlex issue with unicode strings on some python 2
# versions (see http://bugs.python.org/issue1548891)
def compat_shlex_split(s, comments=False, posix=True):
if isinstance(s, compat_str):
s = s.encode('utf-8')
return list(map(lambda s: s.decode('utf-8'), shlex.split(s, comments, posix)))
def compat_ord(c):
if type(c) is int:
return c
else:
return ord(c)
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
def compat_setenv(key, value, env=os.environ):
env[key] = value
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
def compat_setenv(key, value, env=os.environ):
def encode(v):
from .utils import get_filesystem_encoding
return v.encode(get_filesystem_encoding()) if isinstance(v, compat_str) else v
env[encode(key)] = encode(value)
# HACK: The default implementations of os.path.expanduser from cpython do not decode
# environment variables with filesystem encoding. We will work around this by
# providing adjusted implementations.
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
# for different platforms with correct environment variables decoding.
if compat_os_name == 'posix':
def compat_expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = compat_getenv('HOME')
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
elif compat_os_name in ('nt', 'ce'):
def compat_expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = compat_getenv('HOME')
elif 'USERPROFILE' in os.environ:
userhome = compat_getenv('USERPROFILE')
elif 'HOMEPATH' not in os.environ:
return path
else:
try:
drive = compat_getenv('HOMEDRIVE')
except KeyError:
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: # ~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
if compat_os_name == 'nt' and sys.version_info < (3, 8):
# os.path.realpath on Windows does not follow symbolic links
# prior to Python 3.8 (see https://bugs.python.org/issue9949)
def compat_realpath(path):
while os.path.islink(path):
path = os.path.abspath(os.readlink(path))
return path
else:
compat_realpath = os.path.realpath
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert isinstance(s, compat_str)
print(s)
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
from .utils import preferredencoding
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
try:
compat_input = raw_input
except NameError: # Python 3
compat_input = input
# Python < 2.6.5 require kwargs to be bytes
try:
def _testfunc(x):
pass
_testfunc(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
try:
compat_numeric_types = (int, float, long, complex)
except NameError: # Python 3
compat_numeric_types = (int, float, complex)
try:
compat_integer_types = (int, long)
except NameError: # Python 3
compat_integer_types = (int, )
if sys.version_info < (2, 7):
def compat_socket_create_connection(address, timeout, source_address=None):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error('getaddrinfo returns an empty list')
else:
compat_socket_create_connection = socket.create_connection
# Fix https://github.com/ytdl-org/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
og.add_option('-t')
except TypeError:
real_add_option = optparse.OptionGroup.add_option
def _compat_add_option(self, *args, **kwargs):
enc = lambda v: (
v.encode('ascii', 'replace') if isinstance(v, compat_str)
else v)
bargs = [enc(a) for a in args]
bkwargs = dict(
(k, enc(v)) for k, v in kwargs.items())
return real_add_option(self, *bargs, **bkwargs)
optparse.OptionGroup.add_option = _compat_add_option
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
compat_get_terminal_size = shutil.get_terminal_size
else:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
def compat_get_terminal_size(fallback=(80, 24)):
columns = compat_getenv('COLUMNS')
if columns:
columns = int(columns)
else:
columns = None
lines = compat_getenv('LINES')
if lines:
lines = int(lines)
else:
lines = None
if columns is None or lines is None or columns <= 0 or lines <= 0:
try:
sp = subprocess.Popen(
['stty', 'size'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
_lines, _columns = map(int, out.split())
except Exception:
_columns, _lines = _terminal_size(*fallback)
if columns is None or columns <= 0:
columns = _columns
if lines is None or lines <= 0:
lines = _lines
return _terminal_size(columns, lines)
try:
itertools.count(start=0, step=1)
compat_itertools_count = itertools.count
except TypeError: # Python 2.6
def compat_itertools_count(start=0, step=1):
n = start
while True:
yield n
n += step
if sys.version_info >= (3, 0):
from tokenize import tokenize as compat_tokenize_tokenize
else:
from tokenize import generate_tokens as compat_tokenize_tokenize
try:
struct.pack('!I', 0)
except TypeError:
# In Python 2.6 and 2.7.x < 2.7.7, struct requires a bytes argument
# See https://bugs.python.org/issue19099
def compat_struct_pack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.pack(spec, *args)
def compat_struct_unpack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.unpack(spec, *args)
class compat_Struct(struct.Struct):
def __init__(self, fmt):
if isinstance(fmt, compat_str):
fmt = fmt.encode('ascii')
super(compat_Struct, self).__init__(fmt)
else:
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
if platform.python_implementation() == 'IronPython' and sys.version_info < (2, 7, 8):
class compat_Struct(struct.Struct):
def unpack(self, string):
if not isinstance(string, buffer): # noqa: F821
string = buffer(string) # noqa: F821
return super(compat_Struct, self).unpack(string)
else:
compat_Struct = struct.Struct
try:
from future_builtins import zip as compat_zip
except ImportError: # not 2.6+ or is 3.x
try:
from itertools import izip as compat_zip # < 2.5 or 3.x
except ImportError:
compat_zip = zip
if sys.version_info < (3, 3):
def compat_b64decode(s, *args, **kwargs):
if isinstance(s, compat_str):
s = s.encode('ascii')
return base64.b64decode(s, *args, **kwargs)
else:
compat_b64decode = base64.b64decode
if platform.python_implementation() == 'PyPy' and sys.pypy_version_info < (5, 4, 0):
# PyPy2 prior to version 5.4.0 expects byte strings as Windows function
# names, see the original PyPy issue [1] and the youtube-dl one [2].
# 1. https://bitbucket.org/pypy/pypy/issues/2360/windows-ctypescdll-typeerror-function-name
# 2. https://github.com/ytdl-org/youtube-dl/pull/4392
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
real = ctypes.WINFUNCTYPE(*args, **kwargs)
def resf(tpl, *args, **kwargs):
funcname, dll = tpl
return real((str(funcname), dll), *args, **kwargs)
return resf
else:
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
return ctypes.WINFUNCTYPE(*args, **kwargs)
__all__ = [
'compat_HTMLParseError',
'compat_HTMLParser',
'compat_HTTPError',
'compat_Struct',
'compat_b64decode',
'compat_basestring',
'compat_chr',
'compat_cookiejar',
'compat_cookiejar_Cookie',
'compat_cookies',
'compat_ctypes_WINFUNCTYPE',
'compat_etree_Element',
'compat_etree_fromstring',
'compat_etree_register_namespace',
'compat_expanduser',
'compat_get_terminal_size',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_html_entities_html5',
'compat_http_client',
'compat_http_server',
'compat_input',
'compat_integer_types',
'compat_itertools_count',
'compat_kwargs',
'compat_numeric_types',
'compat_ord',
'compat_os_name',
'compat_parse_qs',
'compat_print',
'compat_realpath',
'compat_setenv',
'compat_shlex_quote',
'compat_shlex_split',
'compat_socket_create_connection',
'compat_str',
'compat_struct_pack',
'compat_struct_unpack',
'compat_subprocess_get_DEVNULL',
'compat_tokenize_tokenize',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
'compat_urllib_parse_unquote_plus',
'compat_urllib_parse_unquote_to_bytes',
'compat_urllib_parse_urlencode',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urllib_request_DataHandler',
'compat_urllib_response',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
'compat_xpath',
'compat_zip',
'workaround_optparse_bug9161',
]
|
vinegret/youtube-dl
|
youtube_dl/compat.py
|
Python
|
unlicense
| 93,865
|
[
"Bowtie"
] |
b92f55bca4dee05d2a3cfc051d2e41513d28dabc7d231be615ef5462b0a15994
|
r"""
Tools for seismic inversion: migration
**Auxiliary functions**
* :func:`~fatiando.seismic.zft_rtm`: apply 2D reverse time depth migration in a Zero-oFfseT section
* :func:`~fatiando.seismic.shot_rtm`: apply 2D reverse time depth migration in a shot gather
Those implementations use explicit finite differences time and space for forward and reverse
time extrapolations of the wave field.
**Theory**
"""
import sys
import numpy
from scipy import signal
from fatiando.seismic import wavefd
def rt_scalar(vel, area, dt, iterations, boundary, snapshot=None, padding=-1, taper=0.006):
"""
Simulate reverse in time scalar waves using an explicit finite differences scheme 4th order
space. Uses a boundary condition at z=0, re-inserting the recorded values back on the
wave-field simulation from the last values to the first.
Used to make reverse time depth migration of zero-offset sections or shot gathers.
The top implements a free-surface boundary condition (TODO: change to absorbing).
For the left, right and lower uses boundaries uses Transparent condition of Reynolds, A. C.
(Boundary conditions for numerical solution of wave propagation problems Geophysics p 1099-1110 - 1978)
Parameters:
* vel : 2D-array (defines shape simulation)
The wave velocity at all the grid nodes, must be half the original velocity.
The depth velocity model.
* area : [xmin, xmax, zmin, zmax]
The x, z limits of the simulation area, e.g., the shallowest point is
at zmin, the deepest at zmax.
* dt : float
The time interval between iterations
* iterations : int
Number of time steps to take
* boundary : 2D-array
Those are the boundary values at z=0 for all iteration times.
For zero-offset section migration, shot-gather migration
this is a matrix of traces.
Boundary must have same shape as vel and sample rate must be equal of dt.
* snapshot : None or int
If not None, than yield a snapshot of the scalar quantity disturbed at every
*snapshot* iterations.
* padding : int
Number of grid nodes to use for the absorbing boundary region
default 5 percent nz
* taper : float (TODO: implement real gaussian)
The intensity of the Gaussian taper function used for the absorbing
boundary conditions. Adjust it for better absorption.
Yields:
* i, u : int, 2D-array
The current iteration, the scalar quantity disturbed
The last iteration is the migrated section in depth
"""
if boundary.shape[1] != vel.shape[1]: # just x must be equal
raise IndexError("boundary must have same shape as velocity")
if iterations != boundary.shape[0]:
raise IndexError("Same number of interations needed for rtm")
nz, nx = numpy.shape(vel) # get simulation dimensions
x1, x2, z1, z2 = area
dz, dx = (z2 - z1)/(nz - 1), (x2 - x1)/(nx - 1)
# Add some padding to x and z. The padding region is where the wave is
# absorbed by gaussian dumping
pad = int(padding)
if pad == -1: # default 5% percent nz
pad = int(0.05*nz) + 2 # plus 2 due 4th order
nx += 2*pad
nz += pad
# Pad the velocity as well
vel_pad = wavefd._add_pad(vel, pad, (nz, nx))
# Pack the particle position u at 3 different times in one 3d array
u = numpy.zeros((3, nz, nx), dtype=numpy.float)
# insert the zero-offset samples reversed in time last ones first.
# For utp1 at z=3 for every x or z=0? I have to look at this again sometime
for j in xrange(nx-2*pad): # tp1
u[0, 0, j + pad] += boundary[iterations-1, j]
if snapshot is not None:
yield 0, u[0, :-pad, pad:-pad]
for j in xrange(nx-2*pad): # t
u[1, 0, j + pad] += boundary[iterations-2, j]
if snapshot is not None:
yield 1, u[1, :-pad, pad:-pad]
for iteration in xrange(2, iterations):
tm1, t, tp1 = iteration % 3, (iteration-1) % 3, (iteration-2) % 3 # to avoid copying between panels
# invert the order of the input parameters to make it reverse in time
wavefd._step_scalar(u[tm1], u[t], u[tp1], 2, nx - 2, 2, nz - 2,
dt, dx, dz, vel_pad)
# _apply_damping(u[tp1], nx-2, nz-2, pad-2, taper)
# forth order +2-2 indexes needed
# apply Reynolds 1d plane wave absorbing condition
wavefd._nonreflexive_scalar_boundary_conditions(u[tm1], u[t], u[tp1], vel_pad, dt, dx, dz, nx, nz)
# insert the zero-offset samples reversed in time last ones first. For utp1 at z=0 for every x
for j in xrange(nx-2*pad):
u[t, 0, j + pad] += boundary[iterations-(iteration+1), j]
if snapshot is not None and iteration%snapshot == 0:
yield iteration, u[tm1, :-pad, pad:-pad]
def pre_rtmshot(shot, dt, vdepth, area, fc, source):
"""
Perform pre-stack reverse in time depth migration on a 2D shot gather,
Forward and reverse modelling of shots are done using scalar wave equation.
For image condition uses the normalized cross-correlation of every grid node.
Parameters:
* shot : 2D-array
The shot gather, time x space
* dt : float
sample rate
* vdepth : 2D-array
The depth velocity field at all receiver positions
* area : [xmin, xmax, zmin, zmax]
The x, z limits of the shot/velocity area, e.g., the shallowest point is
at zmin, the deepest at zmax
* fc : source frequency
Used for forward modelling based on a Gauss Source
* source: (sx, sz)
x, z coordinates of source source
Returns:
* migrated shot : 2D
the depth migrated shot same shape as vdepth
"""
# Basic parameters
# Set the parameters of the finite difference grid
nz, nx = vdepth.shape
x1, x2, z1, z2 = area
dz, dx = (z2 - z1) / (nz - 1), (x2 - x1) / (nx - 1)
ns = shot.shape[0] # number samples per trace
# avoiding spatial alias and numerical dispersion based on plane waves v=l*f and Alford et al.
# # and using at least 5 points per wavelength
eps = 0.98*1./(5*max(dx, dz)*min(1./(2*dx), 1./(2*dz)))
idealfc = eps*numpy.min(vdepth)/(max(2*dx, 2*dz))
if fc > idealfc:
sys.stdout.write("Warning: the simulation might have strong numerical dispersion making it unusable\n")
sys.stdout.write("Warning: consider using a finer velocity model")
simsource = [wavefd.GaussSource(source, area, (nz, nx), 1., fc)] # forward simulation source
simdt = wavefd.scalar_maxdt(area, vdepth.shape, numpy.max(vdepth)) # forward simulation time step
simit = int(numpy.floor(ns*dt/simdt)) # maximum number of iterations needed for forward modelling
# run forward modelling of the shot
fwdsimulation = wavefd.scalar(vdepth, area, simdt, simit, simsource, snapshot=1, padding=50)
# dt from signal must be equal to dt from simulation, so resample it first
# resample the input signal is better then resampling everything else
simshot = shot
if dt != simdt: # resample shot if needed
if dt > simdt: # low pass filtering on Nyquest first of shot sample rate
# 1/(2*simdt) is equal of Nyquest=1 for the input signal
b, a = signal.butter(8, dt/simdt)
simshot = signal.filtfilt(b, a, shot, axis=0)
simshot = signal.resample(simshot, simit, axis=0)
# run the forward simulation and record every time step of the grid
fwdfield = numpy.zeros((simit, nz, nx))
for i, u, seismograms in fwdsimulation:
fwdfield[i, :, :] = u
sys.stdout.write("\rforward modeling progressing .. %.1f%% time %.3f" % (100.0*float(i)/simit, (simdt*i)))
sys.stdout.flush()
# Reverse in time shot basic parameters same from forward modelling
rtmsimulation = rt_scalar(vdepth, area, simdt, simit, simshot, snapshot=1, padding=50)
# run the reverse time simulation and record every time step of the grid
rtmfield = numpy.zeros((simit, nz, nx))
for i, u in rtmsimulation:
rtmfield[i, :, :] = u
sys.stdout.write("\rreverse in time modeling progressing .. %.1f%% time %.3f" % (100.0*float(i)/simit, (simdt*i)))
sys.stdout.flush()
# normalized cross-correlation image condition
migratedshot = numpy.zeros((nz, nx))
for i in xrange(nz):
for j in xrange(nx):
migratedshot[i, j] = numpy.dot(rtmfield[:, i, j], fwdfield[::-1, i, j])
migratedshot[i, j] /= numpy.sum(fwdfield[:, i, j]**2)
return migratedshot
# def rt_scalar(vel, area, dt, iterations, boundary, snapshot=None, padding=-1, taper=0.006):
# """
#
# Simulate reverse in time scalar waves using an explicit finite differences scheme 4th order
# space. Uses a boundary condition at z=0, re-inserting the recorded values back on the
# wave-field simulation from the last values to the first.
# Used to make reverse time depth migration of zero-offset sections or shot gathers.
#
# The top implements a free-surface boundary condition (TODO: change to absorbing).
# For the left, right and lower uses boundaries uses Transparent condition of Reynolds, A. C.
# (Boundary conditions for numerical solution of wave propagation problems Geophysics p 1099-1110 - 1978)
#
# Parameters:
#
# * vel : 2D-array (defines shape simulation)
# The wave velocity at all the grid nodes, must be half the original velocity.
# The depth velocity model.
# * area : [xmin, xmax, zmin, zmax]
# The x, z limits of the simulation area, e.g., the shallowest point is
# at zmin, the deepest at zmax.
# * dt : float
# The time interval between iterations
# * iterations : int
# Number of time steps to take
# * boundary : 2D-array
# Those are the boundary values at z=0 for all iteration times.
# For zero-offset section migration, shot-gather migration
# this is a matrix of traces.
# Boundary must have same shape as vel and sample rate must be equal of dt.
# * snapshot : None or int
# If not None, than yield a snapshot of the scalar quantity disturbed at every
# *snapshot* iterations.
# * padding : int
# Number of grid nodes to use for the absorbing boundary region
# default 5 percent nz
# * taper : float (TODO: implement real gaussian)
# The intensity of the Gaussian taper function used for the absorbing
# boundary conditions. Adjust it for better absorption.
#
# Yields:
#
# * i, u : int, 2D-array
# The current iteration, the scalar quantity disturbed
#
# The last iteration is the migrated section in depth
#
# """
#
# if boundary.shape[1] != vel.shape[1]: # just x must be equal
# raise IndexError("boundary must have same shape as velocity")
# if iterations != boundary.shape[0]:
# raise IndexError("Same number of interations needed for rtm")
#
# nz, nx = numpy.shape(vel) # get simulation dimensions
# x1, x2, z1, z2 = area
# dz, dx = (z2 - z1)/(nz - 1), (x2 - x1)/(nx - 1)
#
# # Add some padding to x and z. The padding region is where the wave is
# # absorbed by gaussian dumping
# pad = int(padding)
# if pad == -1: # default 5% percent nz
# pad = int(0.05*nz) + 2 # plus 2 due 4th order
# nx += 2*pad
# nz += pad
# # Pad the velocity as well
# vel_pad = wavefd._add_pad(vel, pad, (nz, nx))
# # Pack the particle position u at 3 different times in one 3d array
# u = numpy.zeros((3, nz, nx), dtype=numpy.float)
# # insert the zero-offset samples reversed in time last ones first. For utp1 at z=0 for every x
# for j in xrange(nx-2*pad): # tp1
# u[0, 0, j + pad] = boundary[iterations-1, j]
# if snapshot is not None:
# yield 0, u[0, :-pad, pad:-pad]
# for j in xrange(nx-2*pad): # t
# u[1, 0, j + pad] = boundary[iterations-2, j]
# if snapshot is not None:
# yield 1, u[1, :-pad, pad:-pad]
# for iteration in xrange(2, iterations):
# tm1, t, tp1 = iteration % 3, (iteration-1) % 3, (iteration-2) % 3 # to avoid copying between panels
# # invert the order of the input parameters to make it reverse in time
# wavefd._step_scalar(u[tm1], u[t], u[tp1], 2, nx - 2, 2, nz - 2,
# dt, dx, dz, vel_pad)
# # _apply_damping(u[tp1], nx-2, nz-2, pad-2, taper)
# # forth order +2-2 indexes needed
# # apply Reynolds 1d plane wave absorbing condition
# wavefd._nonreflexive_scalar_boundary_conditions(u[tm1], u[t], u[tp1], vel_pad, dt, dx, dz, nx, nz)
# # insert the zero-offset samples reversed in time last ones first. For utp1 at z=0 for every x
# for j in xrange(nx-2*pad):
# u[t, 0, j + pad] = boundary[iterations-(iteration+1), j]
# if snapshot is not None and iteration%snapshot == 0:
# yield iteration, u[tm1, :-pad, pad:-pad]
# yield iteration, u[tm1, :-pad, pad:-pad]
|
eusoubrasileiro/fatiando_seismic
|
fatiando/seismic/migration.py
|
Python
|
bsd-3-clause
| 13,102
|
[
"Gaussian"
] |
d7ab2a2ad53b3baec88e2e0f902238987f571ead6ec1713987426c87226fb550
|
# dialog.py --- A Python interface to the ncurses-based "dialog" utility
# -*- coding: utf-8 -*-
#
# Copyright (C) 2002, 2003, 2004, 2009, 2010, 2013, 2014, 2015 Florent Rougon
# Copyright (C) 2004 Peter Åstrand
# Copyright (C) 2000 Robb Shecter, Sultanbek Tezadov
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
# MA 02110-1301 USA.
"""Python interface to :program:`dialog`-like programs.
This module provides a Python interface to :program:`dialog`-like
programs such as :program:`dialog` and :program:`Xdialog`.
It provides a :class:`Dialog` class that retains some parameters such as
the program name and path as well as the values to pass as DIALOG*
environment variables to the chosen program.
See the pythondialog manual for detailed documentation.
"""
from __future__ import with_statement, unicode_literals, print_function
import collections
from itertools import imap
from itertools import izip
from io import open
import locale
_VersionInfo = collections.namedtuple(
"VersionInfo", ("major", "minor", "micro", "releasesuffix"))
class VersionInfo(_VersionInfo):
"""Class used to represent the version of pythondialog.
This class is based on :func:`collections.namedtuple` and has the
following field names: ``major``, ``minor``, ``micro``,
``releasesuffix``.
.. versionadded:: 2.14
"""
def __unicode__(self):
"""Return an Unicode representation of the version."""
res = ".".join( ( unicode(elt) for elt in self[:3] ) )
if self.releasesuffix:
res += self.releasesuffix
return res
def __repr__(self):
# Unicode strings are not supported as the result of __repr__()
# in Python 2.x (cf. <http://bugs.python.org/issue5876>).
return b"{0}.{1}".format(__name__, _VersionInfo.__repr__(self))
#: Version of pythondialog as a :class:`VersionInfo` instance.
#:
#: .. versionadded:: 2.14
version_info = VersionInfo(3, 2, 2, None)
#: Version of pythondialog as an Unicode string.
#:
#: .. versionadded:: 2.12
__version__ = unicode(version_info)
import sys, os, tempfile, random, re, warnings, traceback
from contextlib import contextmanager
from textwrap import dedent
# This is not for calling programs, only to prepare the shell commands that are
# written to the debug log when debugging is enabled.
try:
from shlex import quote as _shell_quote
except ImportError:
def _shell_quote(s):
return "'%s'" % s.replace("'", "'\"'\"'")
# Exceptions raised by this module
#
# When adding, suppressing, renaming exceptions or changing their
# hierarchy, don't forget to update the module's docstring.
class error(Exception):
"""Base class for exceptions in pythondialog."""
def __init__(self, message=None):
self.message = message
def __unicode__(self):
return self.complete_message()
def __repr__(self):
# Unicode strings are not supported as the result of __repr__()
# in Python 2.x (cf. <http://bugs.python.org/issue5876>).
return b"{0}.{1}({2!r})".format(__name__, self.__class__.__name__,
self.message)
def complete_message(self):
if self.message:
return "{0}: {1}".format(self.ExceptionShortDescription,
self.message)
else:
return self.ExceptionShortDescription
ExceptionShortDescription = "{0} generic exception".format("pythondialog")
# For backward-compatibility
#
# Note: this exception was not documented (only the specific ones were), so
# the backward-compatibility binding could be removed relatively easily.
PythonDialogException = error
class ExecutableNotFound(error):
"""Exception raised when the :program:`dialog` executable can't be found."""
ExceptionShortDescription = "Executable not found"
class PythonDialogBug(error):
"""Exception raised when pythondialog finds a bug in his own code."""
ExceptionShortDescription = "Bug in pythondialog"
# Yeah, the "Probably" makes it look a bit ugly, but:
# - this is more accurate
# - this avoids a potential clash with an eventual PythonBug built-in
# exception in the Python interpreter...
class ProbablyPythonBug(error):
"""Exception raised when pythondialog behaves in a way that seems to \
indicate a Python bug."""
ExceptionShortDescription = "Bug in python, probably"
class BadPythonDialogUsage(error):
"""Exception raised when pythondialog is used in an incorrect way."""
ExceptionShortDescription = "Invalid use of pythondialog"
class PythonDialogSystemError(error):
"""Exception raised when pythondialog cannot perform a "system \
operation" (e.g., a system call) that should work in "normal" situations.
This is a convenience exception: :exc:`PythonDialogIOError`,
:exc:`PythonDialogOSError` and
:exc:`PythonDialogErrorBeforeExecInChildProcess` all derive from
this exception. As a consequence, watching for
:exc:`PythonDialogSystemError` instead of the aformentioned
exceptions is enough if you don't need precise details about these
kinds of errors.
Don't confuse this exception with Python's builtin
:exc:`SystemError` exception.
"""
ExceptionShortDescription = "System error"
class PythonDialogOSError(PythonDialogSystemError):
"""Exception raised when pythondialog catches an :exc:`OSError` exception \
that should be passed to the calling program."""
ExceptionShortDescription = "OS error"
class PythonDialogIOError(PythonDialogOSError):
"""Exception raised when pythondialog catches an :exc:`IOError` exception \
that should be passed to the calling program.
This exception should not be raised starting from Python 3.3, as the
built-in exception :exc:`IOError` becomes an alias of
:exc:`OSError`.
.. versionchanged:: 2.12
:exc:`PythonDialogIOError` is now a subclass of
:exc:`PythonDialogOSError` in order to help with the transition
from :exc:`IOError` to :exc:`OSError` in the Python language.
With this change, you can safely replace ``except
PythonDialogIOError`` clauses with ``except PythonDialogOSError``
even if running under Python < 3.3.
"""
ExceptionShortDescription = "IO error"
class PythonDialogErrorBeforeExecInChildProcess(PythonDialogSystemError):
"""Exception raised when an exception is caught in a child process \
before the exec sytem call (included).
This can happen in uncomfortable situations such as:
- the system being out of memory;
- the maximum number of open file descriptors being reached;
- the :program:`dialog`-like program being removed (or made
non-executable) between the time we found it with
:func:`_find_in_path` and the time the exec system call
attempted to execute it;
- the Python program trying to call the :program:`dialog`-like
program with arguments that cannot be represented in the user's
locale (:envvar:`LC_CTYPE`).
"""
ExceptionShortDescription = "Error in a child process before the exec " \
"system call"
class PythonDialogReModuleError(PythonDialogSystemError):
"""Exception raised when pythondialog catches a :exc:`re.error` exception."""
ExceptionShortDescription = "'re' module error"
class UnexpectedDialogOutput(error):
"""Exception raised when the :program:`dialog`-like program returns \
something not expected by pythondialog."""
ExceptionShortDescription = "Unexpected dialog output"
class DialogTerminatedBySignal(error):
"""Exception raised when the :program:`dialog`-like program is \
terminated by a signal."""
ExceptionShortDescription = "dialog-like terminated by a signal"
class DialogError(error):
"""Exception raised when the :program:`dialog`-like program exits \
with the code indicating an error."""
ExceptionShortDescription = "dialog-like terminated due to an error"
class UnableToRetrieveBackendVersion(error):
"""Exception raised when we cannot retrieve the version string of the \
:program:`dialog`-like backend.
.. versionadded:: 2.14
"""
ExceptionShortDescription = "Unable to retrieve the version of the \
dialog-like backend"
class UnableToParseBackendVersion(error):
"""Exception raised when we cannot parse the version string of the \
:program:`dialog`-like backend.
.. versionadded:: 2.14
"""
ExceptionShortDescription = "Unable to parse as a dialog-like backend \
version string"
class UnableToParseDialogBackendVersion(UnableToParseBackendVersion):
"""Exception raised when we cannot parse the version string of the \
:program:`dialog` backend.
.. versionadded:: 2.14
"""
ExceptionShortDescription = "Unable to parse as a dialog version string"
class InadequateBackendVersion(error):
"""Exception raised when the backend version in use is inadequate \
in a given situation.
.. versionadded:: 2.14
"""
ExceptionShortDescription = "Inadequate backend version"
@contextmanager
def _OSErrorHandling():
try:
yield
except OSError, e:
raise PythonDialogOSError(unicode(e))
except IOError, e:
raise PythonDialogIOError(unicode(e))
try:
# Values accepted for checklists
_on_cre = re.compile(r"on$", re.IGNORECASE)
_off_cre = re.compile(r"off$", re.IGNORECASE)
_calendar_date_cre = re.compile(
r"(?P<day>\d\d)/(?P<month>\d\d)/(?P<year>\d\d\d\d)$")
_timebox_time_cre = re.compile(
r"(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)$")
except re.error, e:
raise PythonDialogReModuleError(unicode(e))
# From dialog(1):
#
# All options begin with "--" (two ASCII hyphens, for the benefit of those
# using systems with deranged locale support).
#
# A "--" by itself is used as an escape, i.e., the next token on the
# command-line is not treated as an option, as in:
# dialog --title -- --Not an option
def _dash_escape(args):
"""Escape all elements of *args* that need escaping.
*args* may be any sequence and is not modified by this function.
Return a new list where every element that needs escaping has been
escaped.
An element needs escaping when it starts with two ASCII hyphens
(``--``). Escaping consists in prepending an element composed of two
ASCII hyphens, i.e., the string ``'--'``.
"""
res = []
for arg in args:
if arg.startswith("--"):
res.extend(("--", arg))
else:
res.append(arg)
return res
# We need this function in the global namespace for the lambda
# expressions in _common_args_syntax to see it when they are called.
def _dash_escape_nf(args): # nf: non-first
"""Escape all elements of *args* that need escaping, except the first one.
See :func:`_dash_escape` for details. Return a new list.
"""
if not args:
raise PythonDialogBug("not a non-empty sequence: {0!r}".format(args))
l = _dash_escape(args[1:])
l.insert(0, args[0])
return l
def _simple_option(option, enable):
"""Turn on or off the simplest :term:`dialog common options`."""
if enable:
return (option,)
else:
# This will not add any argument to the command line
return ()
# This dictionary allows us to write the dialog common options in a Pythonic
# way (e.g. dialog_instance.checklist(args, ..., title="Foo", no_shadow=True)).
#
# Options such as --separate-output should obviously not be set by the user
# since they affect the parsing of dialog's output:
_common_args_syntax = {
"ascii_lines": lambda enable: _simple_option("--ascii-lines", enable),
"aspect": lambda ratio: _dash_escape_nf(("--aspect", unicode(ratio))),
"backtitle": lambda backtitle: _dash_escape_nf(("--backtitle", backtitle)),
# Obsolete according to dialog(1)
"beep": lambda enable: _simple_option("--beep", enable),
# Obsolete according to dialog(1)
"beep_after": lambda enable: _simple_option("--beep-after", enable),
# Warning: order = y, x!
"begin": lambda coords: _dash_escape_nf(
("--begin", unicode(coords[0]), unicode(coords[1]))),
"cancel_label": lambda s: _dash_escape_nf(("--cancel-label", s)),
# Old, unfortunate choice of key, kept for backward compatibility
"cancel": lambda s: _dash_escape_nf(("--cancel-label", s)),
"clear": lambda enable: _simple_option("--clear", enable),
"colors": lambda enable: _simple_option("--colors", enable),
"column_separator": lambda s: _dash_escape_nf(("--column-separator", s)),
"cr_wrap": lambda enable: _simple_option("--cr-wrap", enable),
"create_rc": lambda filename: _dash_escape_nf(("--create-rc", filename)),
"date_format": lambda s: _dash_escape_nf(("--date-format", s)),
"defaultno": lambda enable: _simple_option("--defaultno", enable),
"default_button": lambda s: _dash_escape_nf(("--default-button", s)),
"default_item": lambda s: _dash_escape_nf(("--default-item", s)),
"exit_label": lambda s: _dash_escape_nf(("--exit-label", s)),
"extra_button": lambda enable: _simple_option("--extra-button", enable),
"extra_label": lambda s: _dash_escape_nf(("--extra-label", s)),
"help": lambda enable: _simple_option("--help", enable),
"help_button": lambda enable: _simple_option("--help-button", enable),
"help_label": lambda s: _dash_escape_nf(("--help-label", s)),
"help_status": lambda enable: _simple_option("--help-status", enable),
"help_tags": lambda enable: _simple_option("--help-tags", enable),
"hfile": lambda filename: _dash_escape_nf(("--hfile", filename)),
"hline": lambda s: _dash_escape_nf(("--hline", s)),
"ignore": lambda enable: _simple_option("--ignore", enable),
"insecure": lambda enable: _simple_option("--insecure", enable),
"item_help": lambda enable: _simple_option("--item-help", enable),
"keep_tite": lambda enable: _simple_option("--keep-tite", enable),
"keep_window": lambda enable: _simple_option("--keep-window", enable),
"max_input": lambda size: _dash_escape_nf(("--max-input", unicode(size))),
"no_cancel": lambda enable: _simple_option("--no-cancel", enable),
"nocancel": lambda enable: _simple_option("--nocancel", enable),
"no_collapse": lambda enable: _simple_option("--no-collapse", enable),
"no_kill": lambda enable: _simple_option("--no-kill", enable),
"no_label": lambda s: _dash_escape_nf(("--no-label", s)),
"no_lines": lambda enable: _simple_option("--no-lines", enable),
"no_mouse": lambda enable: _simple_option("--no-mouse", enable),
"no_nl_expand": lambda enable: _simple_option("--no-nl-expand", enable),
"no_ok": lambda enable: _simple_option("--no-ok", enable),
"no_shadow": lambda enable: _simple_option("--no-shadow", enable),
"no_tags": lambda enable: _simple_option("--no-tags", enable),
"ok_label": lambda s: _dash_escape_nf(("--ok-label", s)),
# cf. Dialog.maxsize()
"print_maxsize": lambda enable: _simple_option("--print-maxsize",
enable),
"print_size": lambda enable: _simple_option("--print-size", enable),
# cf. Dialog.backend_version()
"print_version": lambda enable: _simple_option("--print-version",
enable),
"scrollbar": lambda enable: _simple_option("--scrollbar", enable),
"separate_output": lambda enable: _simple_option("--separate-output",
enable),
"separate_widget": lambda s: _dash_escape_nf(("--separate-widget", s)),
"shadow": lambda enable: _simple_option("--shadow", enable),
# Obsolete according to dialog(1)
"size_err": lambda enable: _simple_option("--size-err", enable),
"sleep": lambda secs: _dash_escape_nf(("--sleep", unicode(secs))),
"stderr": lambda enable: _simple_option("--stderr", enable),
"stdout": lambda enable: _simple_option("--stdout", enable),
"tab_correct": lambda enable: _simple_option("--tab-correct", enable),
"tab_len": lambda n: _dash_escape_nf(("--tab-len", unicode(n))),
"time_format": lambda s: _dash_escape_nf(("--time-format", s)),
"timeout": lambda secs: _dash_escape_nf(("--timeout", unicode(secs))),
"title": lambda title: _dash_escape_nf(("--title", title)),
"trace": lambda filename: _dash_escape_nf(("--trace", filename)),
"trim": lambda enable: _simple_option("--trim", enable),
"version": lambda enable: _simple_option("--version", enable),
"visit_items": lambda enable: _simple_option("--visit-items", enable),
"yes_label": lambda s: _dash_escape_nf(("--yes-label", s)) }
def _find_in_path(prog_name):
"""Search an executable in the :envvar:`PATH`.
If :envvar:`PATH` is not defined, the default path
``:/bin:/usr/bin`` is used.
Return a path to the file or ``None`` if no readable and executable
file is found.
Notable exception:
:exc:`PythonDialogOSError`
"""
with _OSErrorHandling():
# Note that the leading empty component in the default value for PATH
# could lead to the returned path not being absolute.
PATH = os.getenv("PATH", ":/bin:/usr/bin") # see the execvp(3) man page
for d in PATH.split(":"):
file_path = os.path.join(d, prog_name)
if os.path.isfile(file_path) \
and os.access(file_path, os.R_OK | os.X_OK):
return file_path
return None
def _path_to_executable(f):
"""Find a path to an executable.
Find a path to an executable, using the same rules as the POSIX
exec*p functions (see execvp(3) for instance).
If *f* contains a ``/``, it is assumed to be a path and is simply
checked for read and write permissions; otherwise, it is looked for
according to the contents of the :envvar:`PATH` environment
variable, which defaults to ``:/bin:/usr/bin`` if unset.
The returned path is not necessarily absolute.
Notable exceptions:
- :exc:`ExecutableNotFound`
- :exc:`PythonDialogOSError`
"""
with _OSErrorHandling():
if '/' in f:
if os.path.isfile(f) and \
os.access(f, os.R_OK | os.X_OK):
res = f
else:
raise ExecutableNotFound("%s cannot be read and executed" % f)
else:
res = _find_in_path(f)
if res is None:
raise ExecutableNotFound(
"can't find the executable for the dialog-like "
"program")
return res
def _to_onoff(val):
"""Convert boolean expressions to ``"on"`` or ``"off"``.
:return:
- ``"on"`` if *val* is ``True``, a non-zero integer, ``"on"`` or
any case variation thereof;
- ``"off"`` if *val* is ``False``, ``0``, ``"off"`` or any case
variation thereof.
Notable exceptions:
- :exc:`PythonDialogReModuleError`
- :exc:`BadPythonDialogUsage`
"""
if isinstance(val, (bool, int)):
return "on" if val else "off"
elif isinstance(val, basestring):
try:
if _on_cre.match(val):
return "on"
elif _off_cre.match(val):
return "off"
except re.error, e:
raise PythonDialogReModuleError(unicode(e))
raise BadPythonDialogUsage("invalid boolean value: {0!r}".format(val))
def _compute_common_args(mapping):
"""Compute the list of arguments for :term:`dialog common options`.
Compute a list of the command-line arguments to pass to
:program:`dialog` from a keyword arguments dictionary for options
listed as "common options" in the manual page for :program:`dialog`.
These are the options that are not tied to a particular widget.
This allows one to specify these options in a pythonic way, such
as::
d.checklist(<usual arguments for a checklist>,
title="...",
backtitle="...")
instead of having to pass them with strings like ``"--title foo"``
or ``"--backtitle bar"``.
Notable exceptions: none
"""
args = []
for option, value in mapping.items():
args.extend(_common_args_syntax[option](value))
return args
# Classes for dealing with the version of dialog-like backend programs
if sys.hexversion >= 0x030200F0:
import abc
# Abstract base class
class BackendVersion():
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __unicode__(self):
raise NotImplementedError()
if sys.hexversion >= 0x030300F0:
@classmethod
@abc.abstractmethod
def fromstring(cls, s):
raise NotImplementedError()
else: # for Python 3.2
@abc.abstractclassmethod
def fromstring(cls, s):
raise NotImplementedError()
@abc.abstractmethod
def __lt__(self, other):
raise NotImplementedError()
@abc.abstractmethod
def __le__(self, other):
raise NotImplementedError()
@abc.abstractmethod
def __eq__(self, other):
raise NotImplementedError()
@abc.abstractmethod
def __ne__(self, other):
raise NotImplementedError()
@abc.abstractmethod
def __gt__(self, other):
raise NotImplementedError()
@abc.abstractmethod
def __ge__(self, other):
raise NotImplementedError()
else:
class BackendVersion(object):
pass
class DialogBackendVersion(BackendVersion):
"""Class representing possible versions of the :program:`dialog` backend.
The purpose of this class is to make it easy to reliably compare
between versions of the :program:`dialog` backend. It encapsulates
the specific details of the backend versioning scheme to allow
eventual adaptations to changes in this scheme without affecting
external code.
The version is represented by two components in this class: the
:dfn:`dotted part` and the :dfn:`rest`. For instance, in the
``'1.2'`` version string, the dotted part is ``[1, 2]`` and the rest
is the empty string. However, in version ``'1.2-20130902'``, the
dotted part is still ``[1, 2]``, but the rest is the string
``'-20130902'``.
Instances of this class can be created with the constructor by
specifying the dotted part and the rest. Alternatively, an instance
can be created from the corresponding version string (e.g.,
``'1.2-20130902'``) using the :meth:`fromstring` class method. This
is particularly useful with the result of
:samp:`{d}.backend_version()`, where *d* is a :class:`Dialog`
instance. Actually, the main constructor detects if its first
argument is a string and calls :meth:`!fromstring` in this case as a
convenience. Therefore, all of the following expressions are valid
to create a DialogBackendVersion instance::
DialogBackendVersion([1, 2])
DialogBackendVersion([1, 2], "-20130902")
DialogBackendVersion("1.2-20130902")
DialogBackendVersion.fromstring("1.2-20130902")
If *bv* is a :class:`DialogBackendVersion` instance,
:samp:`unicode({bv})` is an Unicode string representing the same
version (for instance, ``"1.2-20130902"``).
Two :class:`DialogBackendVersion` instances can be compared with the
usual comparison operators (``<``, ``<=``, ``==``, ``!=``, ``>=``,
``>``). The algorithm is designed so that the following order is
respected (after instanciation with :meth:`fromstring`)::
1.2 < 1.2-20130902 < 1.2-20130903 < 1.2.0 < 1.2.0-20130902
among other cases. Actually, the *dotted parts* are the primary keys
when comparing and *rest* strings act as secondary keys. *Dotted
parts* are compared with the standard Python list comparison and
*rest* strings using the standard Python string comparison.
"""
try:
_backend_version_cre = re.compile(r"""(?P<dotted> (\d+) (\.\d+)* )
(?P<rest>.*)$""", re.VERBOSE)
except re.error, e:
raise PythonDialogReModuleError(unicode(e))
def __init__(self, dotted_part_or_str, rest=""):
"""Create a :class:`DialogBackendVersion` instance.
Please see the class docstring for details.
"""
if isinstance(dotted_part_or_str, basestring):
if rest:
raise BadPythonDialogUsage(
"non-empty 'rest' with 'dotted_part_or_str' as string: "
"{0!r}".format(rest))
else:
tmp = self.__class__.fromstring(dotted_part_or_str)
dotted_part_or_str, rest = tmp.dotted_part, tmp.rest
for elt in dotted_part_or_str:
if not isinstance(elt, int):
raise BadPythonDialogUsage(
"when 'dotted_part_or_str' is not a string, it must "
"be a sequence (or iterable) of integers; however, "
"{0!r} is not an integer.".format(elt))
self.dotted_part = list(dotted_part_or_str)
self.rest = rest
def __repr__(self):
# Unicode strings are not supported as the result of __repr__()
# in Python 2.x (cf. <http://bugs.python.org/issue5876>).
return b"{0}.{1}({2!r}, rest={3!r})".format(
__name__, self.__class__.__name__, self.dotted_part, self.rest)
def __unicode__(self):
return '.'.join(imap(unicode, self.dotted_part)) + self.rest
@classmethod
def fromstring(cls, s):
"""Create a :class:`DialogBackendVersion` instance from a \
:program:`dialog` version string.
:param str s: a :program:`dialog` version string
:return:
a :class:`DialogBackendVersion` instance representing the same
string
Notable exceptions:
- :exc:`UnableToParseDialogBackendVersion`
- :exc:`PythonDialogReModuleError`
"""
try:
mo = cls._backend_version_cre.match(s)
if not mo:
raise UnableToParseDialogBackendVersion(s)
dotted_part = [ int(x) for x in mo.group("dotted").split(".") ]
rest = mo.group("rest")
except re.error, e:
raise PythonDialogReModuleError(unicode(e))
return cls(dotted_part, rest)
def __lt__(self, other):
return (self.dotted_part, self.rest) < (other.dotted_part, other.rest)
def __le__(self, other):
return (self.dotted_part, self.rest) <= (other.dotted_part, other.rest)
def __eq__(self, other):
return (self.dotted_part, self.rest) == (other.dotted_part, other.rest)
# Python 3.2 has a decorator (functools.total_ordering) to automate this.
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return not (self <= other)
def __ge__(self, other):
return not (self < other)
def widget(func):
"""Decorator to mark :class:`Dialog` methods that provide widgets.
This allows code to perform automatic operations on these specific
methods. For instance, one can define a class that behaves similarly
to :class:`Dialog`, except that after every widget-producing call,
it spawns a "confirm quit" dialog if the widget returned
:attr:`Dialog.ESC`, and loops in case the user doesn't actually want
to quit.
When it is unclear whether a method should have the decorator or
not, the return value is used to draw the line. For instance, among
:meth:`Dialog.gauge_start`, :meth:`Dialog.gauge_update` and
:meth:`Dialog.gauge_stop`, only the last one has the decorator
because it returns a :term:`Dialog exit code`, whereas the first two
don't return anything meaningful.
Note:
Some widget-producing methods return the Dialog exit code, but
other methods return a *sequence*, the first element of which is
the Dialog exit code; the ``retval_is_code`` attribute, which is
set by the decorator of the same name, allows to programmatically
discover the interface a given method conforms to.
.. versionadded:: 2.14
"""
func.is_widget = True
return func
def retval_is_code(func):
"""Decorator for :class:`Dialog` widget-producing methods whose \
return value is the :term:`Dialog exit code`.
This decorator is intended for widget-producing methods whose return
value consists solely of the Dialog exit code. When this decorator
is *not* used on a widget-producing method, the Dialog exit code
must be the first element of the return value.
.. versionadded:: 3.0
"""
func.retval_is_code = True
return func
def _obsolete_property(name, replacement=None):
if replacement is None:
replacement = name
def getter(self):
warnings.warn("the DIALOG_{name} attribute of Dialog instances is "
"obsolete; use the Dialog.{repl} class attribute "
"instead.".format(name=name, repl=replacement),
DeprecationWarning)
return getattr(self, replacement)
return getter
# Main class of the module
class Dialog(object):
"""Class providing bindings for :program:`dialog`-compatible programs.
This class allows you to invoke :program:`dialog` or a compatible
program in a pythonic way to quickly and easily build simple but
nice text interfaces.
An application typically creates one instance of the :class:`Dialog`
class and uses it for all its widgets, but it is possible to
concurrently use several instances of this class with different
parameters (such as the background title) if you have a need for
this.
"""
try:
_print_maxsize_cre = re.compile(r"""^MaxSize:[ \t]+
(?P<rows>\d+),[ \t]*
(?P<columns>\d+)[ \t]*$""",
re.VERBOSE)
_print_version_cre = re.compile(
r"^Version:[ \t]+(?P<version>.+?)[ \t]*$", re.MULTILINE)
except re.error, e:
raise PythonDialogReModuleError(unicode(e))
# DIALOG_OK, DIALOG_CANCEL, etc. are environment variables controlling
# the dialog backend exit status in the corresponding situation ("low-level
# exit status/code").
#
# Note:
# - 127 must not be used for any of the DIALOG_* values. It is used
# when a failure occurs in the child process before it exec()s
# dialog (where "before" includes a potential exec() failure).
# - 126 is also used (although in presumably rare situations).
_DIALOG_OK = 0
_DIALOG_CANCEL = 1
_DIALOG_ESC = 2
_DIALOG_ERROR = 3
_DIALOG_EXTRA = 4
_DIALOG_HELP = 5
_DIALOG_ITEM_HELP = 6
# cf. also _lowlevel_exit_codes and _dialog_exit_code_ll_to_hl which are
# created by __init__(). It is not practical to define everything here,
# because there is no equivalent of 'self' for the class outside method
# definitions.
_lowlevel_exit_code_varnames = frozenset(("OK", "CANCEL", "ESC", "ERROR",
"EXTRA", "HELP", "ITEM_HELP"))
# High-level exit codes, AKA "Dialog exit codes". These are the codes that
# pythondialog-based applications should use.
#
#: :term:`Dialog exit code` corresponding to the ``DIALOG_OK``
#: :term:`dialog exit status`
OK = "ok"
#: :term:`Dialog exit code` corresponding to the ``DIALOG_CANCEL``
#: :term:`dialog exit status`
CANCEL = "cancel"
#: :term:`Dialog exit code` corresponding to the ``DIALOG_ESC``
#: :term:`dialog exit status`
ESC = "esc"
#: :term:`Dialog exit code` corresponding to the ``DIALOG_EXTRA``
#: :term:`dialog exit status`
EXTRA = "extra"
#: :term:`Dialog exit code` corresponding to the ``DIALOG_HELP`` and
#: ``DIALOG_ITEM_HELP`` :term:`dialog exit statuses <dialog exit status>`
HELP = "help"
# Define properties to maintain backward-compatibility while warning about
# the obsolete attributes (which used to refer to the low-level exit codes
# in pythondialog 2.x).
#
#: Obsolete property superseded by :attr:`Dialog.OK` since version 3.0
DIALOG_OK = property(_obsolete_property("OK"),
doc="Obsolete property superseded by Dialog.OK")
#: Obsolete property superseded by :attr:`Dialog.CANCEL` since version 3.0
DIALOG_CANCEL = property(_obsolete_property("CANCEL"),
doc="Obsolete property superseded by Dialog.CANCEL")
#: Obsolete property superseded by :attr:`Dialog.ESC` since version 3.0
DIALOG_ESC = property(_obsolete_property("ESC"),
doc="Obsolete property superseded by Dialog.ESC")
#: Obsolete property superseded by :attr:`Dialog.EXTRA` since version 3.0
DIALOG_EXTRA = property(_obsolete_property("EXTRA"),
doc="Obsolete property superseded by Dialog.EXTRA")
#: Obsolete property superseded by :attr:`Dialog.HELP` since version 3.0
DIALOG_HELP = property(_obsolete_property("HELP"),
doc="Obsolete property superseded by Dialog.HELP")
# We treat DIALOG_ITEM_HELP and DIALOG_HELP the same way in pythondialog,
# since both indicate the same user action ("Help" button pressed).
#
#: Obsolete property superseded by :attr:`Dialog.HELP` since version 3.0
DIALOG_ITEM_HELP = property(_obsolete_property("ITEM_HELP",
replacement="HELP"),
doc="Obsolete property superseded by Dialog.HELP")
@property
def DIALOG_ERROR(self):
warnings.warn("the DIALOG_ERROR attribute of Dialog instances is "
"obsolete. Since the corresponding exit status is "
"automatically translated into a DialogError exception, "
"users should not see nor need this attribute. If you "
"think you have a good reason to use it, please expose "
"your situation on the pythondialog mailing-list.",
DeprecationWarning)
# There is no corresponding high-level code; and if the user *really*
# wants to know the (integer) error exit status, here it is...
return self._DIALOG_ERROR
def __init__(self, dialog="dialog", DIALOGRC=None,
compat="dialog", use_stdout=None, **kwargs):
"""Constructor for :class:`Dialog` instances.
:param str dialog:
name of (or path to) the :program:`dialog`-like program to
use; if it contains a ``'/'``, it is assumed to be a path and
is used as is; otherwise, it is looked for according to the
contents of the :envvar:`PATH` environment variable, which
defaults to ``":/bin:/usr/bin"`` if unset.
:param str DIALOGRC:
string to pass to the :program:`dialog`-like program as the
:envvar:`DIALOGRC` environment variable, or ``None`` if no
modification to the environment regarding this variable should
be done in the call to the :program:`dialog`-like program
:param str compat:
compatibility mode (see :ref:`below
<Dialog-constructor-compat-arg>`)
:param bool use_stdout:
read :program:`dialog`'s standard output stream instead of its
standard error stream in order to get most "results"
(user-supplied strings, selected items, etc.; basically,
everything except the exit status). This is for compatibility
with :program:`Xdialog` and should only be used if you have a
good reason to do so.
:param bool autowidgetsize:
whether to enable *autowidgetsize* mode. When enabled, all
pythondialog widget-producing methods will behave as if
``width=0``, ``height=0``, etc. had been passed, except where
these parameters are explicitely specified with different
values. This has the effect that, by default, the
:program:`dialog` backend will automatically compute a
suitable size for the widgets. More details about this option
are given :ref:`below <autowidgetsize>`.
:return: a :class:`Dialog` instance
.. _Dialog-constructor-compat-arg:
The officially supported :program:`dialog`-like program in
pythondialog is the well-known dialog_ program written in C,
based on the ncurses_ library.
.. _dialog: http://invisible-island.net/dialog/dialog.html
.. _ncurses: http://invisible-island.net/ncurses/ncurses.html
If you want to use a different program such as Xdialog_, you
should indicate the executable file name with the *dialog*
argument **and** the compatibility type that you think it
conforms to with the *compat* argument. Currently, *compat* can
be either ``"dialog"`` (for :program:`dialog`; this is the
default) or ``"Xdialog"`` (for, well, :program:`Xdialog`).
.. _Xdialog: http://xdialog.free.fr/
The *compat* argument allows me to cope with minor differences
in behaviour between the various programs implementing the
:program:`dialog` interface (not the text or graphical
interface, I mean the API). However, having to support various
APIs simultaneously is ugly and I would really prefer you to
report bugs to the relevant maintainers when you find
incompatibilities with :program:`dialog`. This is for the
benefit of pretty much everyone that relies on the
:program:`dialog` interface.
Notable exceptions:
- :exc:`ExecutableNotFound`
- :exc:`PythonDialogOSError`
- :exc:`UnableToRetrieveBackendVersion`
- :exc:`UnableToParseBackendVersion`
.. versionadded:: 3.1
Support for the *autowidgetsize* parameter.
"""
if 'autowidgetsize' in kwargs: autowidgetsize = kwargs['autowidgetsize']; del kwargs['autowidgetsize']
else: autowidgetsize = False
# DIALOGRC differs from the Dialog._DIALOG_* attributes in that:
# 1. It is an instance attribute instead of a class attribute.
# 2. It should be a string if not None.
# 3. We may very well want it to be unset.
if DIALOGRC is not None:
self.DIALOGRC = DIALOGRC
# Mapping from "OK", "CANCEL", ... to the corresponding dialog exit
# statuses (integers).
self._lowlevel_exit_codes = dict((
name, getattr(self, "_DIALOG_" + name))
for name in self._lowlevel_exit_code_varnames)
# Mapping from dialog exit status (integer) to Dialog exit code ("ok",
# "cancel", ... strings referred to by Dialog.OK, Dialog.CANCEL, ...);
# in other words, from low-level to high-level exit code.
self._dialog_exit_code_ll_to_hl = {}
for name in self._lowlevel_exit_code_varnames:
intcode = self._lowlevel_exit_codes[name]
if name == "ITEM_HELP":
self._dialog_exit_code_ll_to_hl[intcode] = self.HELP
elif name == "ERROR":
continue
else:
self._dialog_exit_code_ll_to_hl[intcode] = getattr(self, name)
self._dialog_prg = _path_to_executable(dialog)
self.compat = compat
self.autowidgetsize = autowidgetsize
self.dialog_persistent_arglist = []
# Use stderr or stdout for reading dialog's output?
if self.compat == "Xdialog":
# Default to using stdout for Xdialog
self.use_stdout = True
else:
self.use_stdout = False
if use_stdout is not None:
# Allow explicit setting
self.use_stdout = use_stdout
if self.use_stdout:
self.add_persistent_args(["--stdout"])
self.setup_debug(False)
if compat == "dialog":
self.cached_backend_version = DialogBackendVersion.fromstring(
self.backend_version())
else:
# Xdialog doesn't seem to offer --print-version (2013-09-12)
self.cached_backend_version = None
@classmethod
def dash_escape(cls, args):
"""
Escape all elements of *args* that need escaping for :program:`dialog`.
*args* may be any sequence and is not modified by this method.
Return a new list where every element that needs escaping has
been escaped.
An element needs escaping when it starts with two ASCII hyphens
(``--``). Escaping consists in prepending an element composed of
two ASCII hyphens, i.e., the string ``'--'``.
All high-level :class:`Dialog` methods automatically perform
:term:`dash escaping` where appropriate. In particular, this is
the case for every method that provides a widget: :meth:`yesno`,
:meth:`msgbox`, etc. You only need to do it yourself when
calling a low-level method such as :meth:`add_persistent_args`.
.. versionadded:: 2.12
"""
return _dash_escape(args)
@classmethod
def dash_escape_nf(cls, args):
"""
Escape all elements of *args* that need escaping, except the first one.
See :meth:`dash_escape` for details. Return a new list.
All high-level :class:`Dialog` methods automatically perform dash
escaping where appropriate. In particular, this is the case
for every method that provides a widget: :meth:`yesno`, :meth:`msgbox`,
etc. You only need to do it yourself when calling a low-level
method such as :meth:`add_persistent_args`.
.. versionadded:: 2.12
"""
return _dash_escape_nf(args)
def add_persistent_args(self, args):
"""Add arguments to use for every subsequent dialog call.
This method cannot guess which elements of *args* are dialog
options (such as ``--title``) and which are not (for instance,
you might want to use ``--title`` or even ``--`` as an argument
to a dialog option). Therefore, this method does not perform any
kind of :term:`dash escaping`; you have to do it yourself.
:meth:`dash_escape` and :meth:`dash_escape_nf` may be useful for
this purpose.
"""
self.dialog_persistent_arglist.extend(args)
def set_background_title(self, text):
"""Set the background title for dialog.
:param str text: string to use as background title
.. versionadded:: 2.13
"""
self.add_persistent_args(self.dash_escape_nf(("--backtitle", text)))
# For compatibility with the old dialog
def setBackgroundTitle(self, text):
"""Set the background title for :program:`dialog`.
:param str text: background title to use behind widgets
.. deprecated:: 2.03
Use :meth:`set_background_title` instead.
"""
warnings.warn("Dialog.setBackgroundTitle() has been obsolete for "
"many years; use Dialog.set_background_title() instead",
DeprecationWarning)
self.set_background_title(text)
def setup_debug(self, enable, file=None, always_flush=False):
"""Setup the debugging parameters.
When enabled, all :program:`dialog` commands are written to
*file* using POSIX shell syntax.
:param bool enable: whether to enable or disable debugging
:param file file: where to write debugging information
:param bool always_flush: whether to call :meth:`file.flush`
after each command written
.. versionadded:: 2.12
"""
self._debug_enabled = enable
if not hasattr(self, "_debug_logfile"):
self._debug_logfile = None
# Allows to switch debugging on and off without having to pass the file
# object again and again.
if file is not None:
self._debug_logfile = file
if enable and self._debug_logfile is None:
raise BadPythonDialogUsage(
"you must specify a file object when turning debugging on")
self._debug_always_flush = always_flush
self._debug_first_output = True
def _write_command_to_file(self, env, arglist):
envvar_settings_list = []
if "DIALOGRC" in env:
envvar_settings_list.append(
"DIALOGRC={0}".format(_shell_quote(env["DIALOGRC"])))
for var in self._lowlevel_exit_code_varnames:
varname = "DIALOG_" + var
envvar_settings_list.append(
"{0}={1}".format(varname, _shell_quote(env[varname])))
command_str = ' '.join(envvar_settings_list +
list(imap(_shell_quote, arglist)))
s = "{separator}{cmd}\n\nArgs: {args!r}\n".format(
separator="" if self._debug_first_output else ("-" * 79) + "\n",
cmd=command_str, args=arglist)
self._debug_logfile.write(s)
if self._debug_always_flush:
self._debug_logfile.flush()
self._debug_first_output = False
def _call_program(self, cmdargs, **kwargs):
"""Do the actual work of invoking the :program:`dialog`-like program.
Communication with the :program:`dialog`-like program is
performed through one :manpage:`pipe(2)` and optionally a
user-specified file descriptor, depending on
*redir_child_stdin_from_fd*. The pipe allows the parent process
to read what :program:`dialog` writes on its standard error
stream [#]_.
If *use_persistent_args* is ``True`` (the default), the elements
of ``self.dialog_persistent_arglist`` are passed as the first
arguments to ``self._dialog_prg``; otherwise,
``self.dialog_persistent_arglist`` is not used at all. The
remaining arguments are those computed from *kwargs* followed by
the elements of *cmdargs*.
If *dash_escape* is the string ``"non-first"``, then every
element of *cmdargs* that starts with ``'--'`` is escaped by
prepending an element consisting of ``'--'``, except the first
one (which is usually a :program:`dialog` option such as
``'--yesno'``). In order to disable this escaping mechanism,
pass the string ``"none"`` as *dash_escape*.
If *redir_child_stdin_from_fd* is not ``None``, it should be an
open file descriptor (i.e., an integer). That file descriptor
will be connected to :program:`dialog`'s standard input. This is
used by the gauge widget to feed data to :program:`dialog`, as
well as for :meth:`progressbox` in order to allow
:program:`dialog` to read data from a possibly-growing file.
If *redir_child_stdin_from_fd* is ``None``, the standard input
in the child process (which runs :program:`dialog`) is not
redirected in any way.
If *close_fds* is passed, it should be a sequence of file
descriptors that will be closed by the child process before it
exec()s the :program:`dialog`-like program.
Notable exception:
:exc:`PythonDialogOSError` (if any of the pipe(2) or close(2)
system calls fails...)
.. [#] standard ouput stream if *use_stdout* is ``True``
"""
if 'close_fds' in kwargs: close_fds = kwargs['close_fds']; del kwargs['close_fds']
else: close_fds = ()
if 'redir_child_stdin_from_fd' in kwargs: redir_child_stdin_from_fd = kwargs['redir_child_stdin_from_fd']; del kwargs['redir_child_stdin_from_fd']
else: redir_child_stdin_from_fd = None
if 'use_persistent_args' in kwargs: use_persistent_args = kwargs['use_persistent_args']; del kwargs['use_persistent_args']
else: use_persistent_args = True
if 'dash_escape' in kwargs: dash_escape = kwargs['dash_escape']; del kwargs['dash_escape']
else: dash_escape = "non-first"
# We want to define DIALOG_OK, DIALOG_CANCEL, etc. in the
# environment of the child process so that we know (and
# even control) the possible dialog exit statuses.
new_environ = {}
new_environ.update(os.environ)
for var, value in self._lowlevel_exit_codes.items():
varname = "DIALOG_" + var
new_environ[varname] = unicode(value)
if hasattr(self, "DIALOGRC"):
new_environ["DIALOGRC"] = self.DIALOGRC
if dash_escape == "non-first":
# Escape all elements of 'cmdargs' that start with '--', except the
# first one.
cmdargs = self.dash_escape_nf(cmdargs)
elif dash_escape != "none":
raise PythonDialogBug("invalid value for 'dash_escape' parameter: "
"{0!r}".format(dash_escape))
arglist = [ self._dialog_prg ]
if use_persistent_args:
arglist.extend(self.dialog_persistent_arglist)
arglist.extend(_compute_common_args(kwargs) + cmdargs)
if self._debug_enabled:
# Write the complete command line with environment variables
# setting to the debug log file (POSIX shell syntax for easy
# copy-pasting into a terminal, followed by repr(arglist)).
self._write_command_to_file(new_environ, arglist)
# Create a pipe so that the parent process can read dialog's
# output on stderr (stdout with 'use_stdout')
with _OSErrorHandling():
# rfd = File Descriptor for Reading
# wfd = File Descriptor for Writing
(child_output_rfd, child_output_wfd) = os.pipe()
child_pid = os.fork()
if child_pid == 0:
# We are in the child process. We MUST NOT raise any exception.
try:
# 1) If the write end of a pipe isn't closed, the read end
# will never see EOF, which can indefinitely block the
# child waiting for input. To avoid this, the write end
# must be closed in the father *and* child processes.
# 2) The child process doesn't need child_output_rfd.
for fd in close_fds + (child_output_rfd,):
os.close(fd)
# We want:
# - to keep a reference to the father's stderr for error
# reporting (and use line-buffering for this stream);
# - dialog's output on stderr[*] to go to child_output_wfd;
# - data written to fd 'redir_child_stdin_from_fd'
# (if not None) to go to dialog's stdin.
#
# [*] stdout with 'use_stdout'
#
# We'll just print the result of traceback.format_exc() to
# father_stderr, which is a byte string in Python 2, hence the
# binary mode.
father_stderr = open(os.dup(2), mode="wb")
os.dup2(child_output_wfd, 1 if self.use_stdout else 2)
if redir_child_stdin_from_fd is not None:
os.dup2(redir_child_stdin_from_fd, 0)
os.execve(self._dialog_prg, arglist, new_environ)
except:
print(traceback.format_exc(), file=father_stderr)
father_stderr.close()
os._exit(127)
# Should not happen unless there is a bug in Python
os._exit(126)
# We are in the father process.
#
# It is essential to close child_output_wfd, otherwise we will never
# see EOF while reading on child_output_rfd and the parent process
# will block forever on the read() call.
# [ after the fork(), the "reference count" of child_output_wfd from
# the operating system's point of view is 2; after the child exits,
# it is 1 until the father closes it itself; then it is 0 and a read
# on child_output_rfd encounters EOF once all the remaining data in
# the pipe has been read. ]
with _OSErrorHandling():
os.close(child_output_wfd)
return (child_pid, child_output_rfd)
def _wait_for_program_termination(self, child_pid, child_output_rfd):
"""Wait for a :program:`dialog`-like process to terminate.
This function waits for the specified process to terminate,
raises the appropriate exceptions in case of abnormal
termination and returns the :term:`Dialog exit code` and stderr
[#stream]_ output of the process as a tuple: :samp:`({hl_exit_code},
{output_string})`.
*child_output_rfd* must be the file descriptor for the
reading end of the pipe created by :meth:`_call_program`, the
writing end of which was connected by :meth:`_call_program`
to the child process's standard error [#stream]_.
This function reads the process output on the standard error
[#stream]_ from *child_output_rfd* and closes this file
descriptor once this is done.
Notable exceptions:
- :exc:`DialogTerminatedBySignal`
- :exc:`DialogError`
- :exc:`PythonDialogErrorBeforeExecInChildProcess`
- :exc:`PythonDialogIOError` if the Python version is < 3.3
- :exc:`PythonDialogOSError`
- :exc:`PythonDialogBug`
- :exc:`ProbablyPythonBug`
.. [#stream] standard output if ``self.use_stdout`` is ``True``
"""
# Read dialog's output on its stderr (stdout with 'use_stdout')
with _OSErrorHandling():
with open(child_output_rfd, "r") as f:
child_output = f.read()
# The closing of the file object causes the end of the pipe we used
# to read dialog's output on its stderr to be closed too. This is
# important, otherwise invoking dialog enough times would
# eventually exhaust the maximum number of open file descriptors.
exit_info = os.waitpid(child_pid, 0)[1]
if os.WIFEXITED(exit_info):
ll_exit_code = os.WEXITSTATUS(exit_info)
# As we wait()ed for the child process to terminate, there is no
# need to call os.WIFSTOPPED()
elif os.WIFSIGNALED(exit_info):
raise DialogTerminatedBySignal("the dialog-like program was "
"terminated by signal %d" %
os.WTERMSIG(exit_info))
else:
raise PythonDialogBug("please report this bug to the "
"pythondialog maintainer(s)")
if ll_exit_code == self._DIALOG_ERROR:
raise DialogError(
"the dialog-like program exited with status {0} (which was "
"passed to it as the DIALOG_ERROR environment variable). "
"Sometimes, the reason is simply that dialog was given a "
"height or width parameter that is too big for the terminal "
"in use. Its output, with leading and trailing whitespace "
"stripped, was:\n\n{1}".format(ll_exit_code,
child_output.strip()))
elif ll_exit_code == 127:
raise PythonDialogErrorBeforeExecInChildProcess(dedent("""\
possible reasons include:
- the dialog-like program could not be executed (this can happen
for instance if the Python program is trying to call the
dialog-like program with arguments that cannot be represented
in the user's locale [LC_CTYPE]);
- the system is out of memory;
- the maximum number of open file descriptors has been reached;
- a cosmic ray hit the system memory and flipped nasty bits.
There ought to be a traceback above this message that describes
more precisely what happened."""))
elif ll_exit_code == 126:
raise ProbablyPythonBug(
"a child process returned with exit status 126; this might "
"be the exit status of the dialog-like program, for some "
"unknown reason (-> probably a bug in the dialog-like "
"program); otherwise, we have probably found a python bug")
try:
hl_exit_code = self._dialog_exit_code_ll_to_hl[ll_exit_code]
except KeyError:
raise PythonDialogBug(
"unexpected low-level exit status (new code?): {0!r}".format(
ll_exit_code))
return (hl_exit_code, child_output)
def _perform(self, cmdargs, **kwargs):
"""Perform a complete :program:`dialog`-like program invocation.
This function invokes the :program:`dialog`-like program, waits
for its termination and returns the appropriate :term:`Dialog
exit code` along with whatever output it produced.
See :meth:`_call_program` for a description of the parameters.
Notable exceptions:
any exception raised by :meth:`_call_program` or
:meth:`_wait_for_program_termination`
"""
if 'use_persistent_args' in kwargs: use_persistent_args = kwargs['use_persistent_args']; del kwargs['use_persistent_args']
else: use_persistent_args = True
if 'dash_escape' in kwargs: dash_escape = kwargs['dash_escape']; del kwargs['dash_escape']
else: dash_escape = "non-first"
(child_pid, child_output_rfd) = \
self._call_program(cmdargs, dash_escape=dash_escape,
use_persistent_args=use_persistent_args,
**kwargs)
(exit_code, output) = \
self._wait_for_program_termination(child_pid,
child_output_rfd)
return (exit_code, output)
def _strip_xdialog_newline(self, output):
"""Remove trailing newline (if any) in \
:program:`Xdialog`-compatibility mode"""
if self.compat == "Xdialog" and output.endswith("\n"):
output = output[:-1]
return output
# This is for compatibility with the old dialog.py
def _perform_no_options(self, cmd):
"""Call :program:`dialog` without passing any more options."""
warnings.warn("Dialog._perform_no_options() has been obsolete for "
"many years", DeprecationWarning)
return os.system(self._dialog_prg + ' ' + cmd)
# For compatibility with the old dialog.py
def clear(self):
"""Clear the screen.
Equivalent to the :option:`--clear` option of :program:`dialog`.
.. deprecated:: 2.03
You may use the :manpage:`clear(1)` program instead.
cf. ``clear_screen()`` in :file:`examples/demo.py` for an
example.
"""
warnings.warn("Dialog.clear() has been obsolete for many years.\n"
"You may use the clear(1) program to clear the screen.\n"
"cf. clear_screen() in examples/demo.py for an example",
DeprecationWarning)
self._perform_no_options('--clear')
def _help_status_on(self, kwargs):
return ("--help-status" in self.dialog_persistent_arglist
or kwargs.get("help_status", False))
def _parse_quoted_string(self, s, start=0):
"""Parse a quoted string from a :program:`dialog` help output."""
if start >= len(s) or s[start] != '"':
raise PythonDialogBug("quoted string does not start with a double "
"quote: {0!r}".format(s))
l = []
i = start + 1
while i < len(s) and s[i] != '"':
if s[i] == "\\":
i += 1
if i >= len(s):
raise PythonDialogBug(
"quoted string ends with a backslash: {0!r}".format(s))
l.append(s[i])
i += 1
if s[i] != '"':
raise PythonDialogBug("quoted string does not and with a double "
"quote: {0!r}".format(s))
return (''.join(l), i+1)
def _split_shellstyle_arglist(self, s):
"""Split an argument list with shell-style quoting performed \
by :program:`dialog`.
Any argument in 's' may or may not be quoted. Quoted
arguments are always expected to be enclosed in double quotes
(more restrictive than what the POSIX shell allows).
This function could maybe be replaced with shlex.split(),
however:
- shlex only handles Unicode strings in Python 2.7.3 and
above;
- the bulk of the work is done by _parse_quoted_string(),
which is probably still needed in _parse_help(), where
one needs to parse things such as 'HELP <id> <status>' in
which <id> may be quoted but <status> is never quoted,
even if it contains spaces or quotes.
"""
s = s.rstrip()
l = []
i = 0
while i < len(s):
if s[i] == '"':
arg, i = self._parse_quoted_string(s, start=i)
if i < len(s) and s[i] != ' ':
raise PythonDialogBug(
"expected a space or end-of-string after quoted "
"string in {0!r}, but found {1!r}".format(s, s[i]))
# Start of the next argument, or after the end of the string
i += 1
l.append(arg)
else:
try:
end = s.index(' ', i)
except ValueError:
end = len(s)
l.append(s[i:end])
# Start of the next argument, or after the end of the string
i = end + 1
return l
def _parse_help(self, output, kwargs, **_3to2kwargs):
"""Parse the dialog help output from a widget.
'kwargs' should contain the keyword arguments used in the
widget call that produced the help output.
'multival' is for widgets that return a list of values as
opposed to a single value.
'raw_format' is for widgets that don't start their help
output with the string "HELP ".
"""
if 'raw_format' in _3to2kwargs: raw_format = _3to2kwargs['raw_format']; del _3to2kwargs['raw_format']
else: raw_format = False
if 'multival_on_single_line' in _3to2kwargs: multival_on_single_line = _3to2kwargs['multival_on_single_line']; del _3to2kwargs['multival_on_single_line']
else: multival_on_single_line = False
if 'multival' in _3to2kwargs: multival = _3to2kwargs['multival']; del _3to2kwargs['multival']
else: multival = False
l = output.splitlines()
if raw_format:
# This format of the help output is either empty or consists of
# only one line (possibly terminated with \n). It is
# encountered with --calendar and --inputbox, among others.
if len(l) > 1:
raise PythonDialogBug("raw help feedback unexpected as "
"multiline: {0!r}".format(output))
elif len(l) == 0:
return ""
else:
return l[0]
# Simple widgets such as 'yesno' will fall in this case if they use
# this method.
if not l:
return None
# The widgets that actually use --help-status always have the first
# help line indicating the active item; there is no risk of
# confusing this line with the first line produced by --help-status.
if not l[0].startswith("HELP "):
raise PythonDialogBug(
"unexpected help output that does not start with 'HELP ': "
"{0!r}".format(output))
# Everything that follows "HELP "; what it contains depends on whether
# --item-help and/or --help-tags were passed to dialog.
s = l[0][5:]
if not self._help_status_on(kwargs):
return s
if multival:
if multival_on_single_line:
args = self._split_shellstyle_arglist(s)
if not args:
raise PythonDialogBug(
"expected a non-empty space-separated list of "
"possibly-quoted strings in this help output: {0!r}"
.format(output))
return (args[0], args[1:])
else:
return (s, l[1:])
else:
if not s:
raise PythonDialogBug(
"unexpected help output whose first line is 'HELP '")
elif s[0] != '"':
l2 = s.split(' ', 1)
if len(l2) == 1:
raise PythonDialogBug(
"expected 'HELP <id> <status>' in the help output, "
"but couldn't find any space after 'HELP '")
else:
return tuple(l2)
else:
help_id, after_index = self._parse_quoted_string(s)
if not s[after_index:].startswith(" "):
raise PythonDialogBug(
"expected 'HELP <quoted_id> <status>' in the help "
"output, but couldn't find any space after "
"'HELP <quoted_id>'")
return (help_id, s[after_index+1:])
def _widget_with_string_output(self, args, kwargs,
strip_xdialog_newline=False,
raw_help=False):
"""Generic implementation for a widget that produces a single string.
The help output must be present regardless of whether
--help-status was passed or not.
"""
code, output = self._perform(args, **kwargs)
if strip_xdialog_newline:
output = self._strip_xdialog_newline(output)
if code == self.HELP:
# No check for --help-status
help_data = self._parse_help(output, kwargs, raw_format=raw_help)
return (code, help_data)
else:
return (code, output)
def _widget_with_no_output(self, widget_name, args, kwargs):
"""Generic implementation for a widget that produces no output."""
code, output = self._perform(args, **kwargs)
if output:
raise PythonDialogBug(
"expected an empty output from {0!r}, but got: {1!r}".format(
widget_name, output))
return code
def _dialog_version_check(self, version_string, feature):
if self.compat == "dialog":
minimum_version = DialogBackendVersion.fromstring(version_string)
if self.cached_backend_version < minimum_version:
raise InadequateBackendVersion(
"the programbox widget requires dialog {0} or later, "
"but you seem to be using version {1}".format(
minimum_version, self.cached_backend_version))
def backend_version(self):
"""Get the version of the :program:`dialog`-like program (backend).
If the version of the :program:`dialog`-like program can be
retrieved, return it as a string; otherwise, raise
:exc:`UnableToRetrieveBackendVersion`.
This version is not to be confused with the pythondialog
version.
In most cases, you should rather use the
:attr:`cached_backend_version` attribute of :class:`Dialog`
instances, because:
- it avoids calling the backend every time one needs the
version;
- it is a :class:`BackendVersion` instance (or instance of a
subclass) that allows easy and reliable comparisons between
versions;
- the version string corresponding to a
:class:`BackendVersion` instance (or instance of a subclass)
can be obtained with :func:`unicode`.
Notable exceptions:
- :exc:`UnableToRetrieveBackendVersion`
- :exc:`PythonDialogReModuleError`
- any exception raised by :meth:`Dialog._perform`
.. versionadded:: 2.12
.. versionchanged:: 2.14
Raise :exc:`UnableToRetrieveBackendVersion` instead of
returning ``None`` when the version of the
:program:`dialog`-like program can't be retrieved.
"""
code, output = self._perform(["--print-version"],
use_persistent_args=False)
# Workaround for old dialog versions
if code == self.OK and not (output.strip() or self.use_stdout):
# output.strip() is empty and self.use_stdout is False.
# This can happen with old dialog versions (1.1-20100428
# apparently does that). Try again, reading from stdout this
# time.
self.use_stdout = True
code, output = self._perform(["--stdout", "--print-version"],
use_persistent_args=False,
dash_escape="none")
self.use_stdout = False
if code == self.OK:
try:
mo = self._print_version_cre.match(output)
if mo:
return mo.group("version")
else:
raise UnableToRetrieveBackendVersion(
"unable to parse the output of '{0} --print-version': "
"{1!r}".format(self._dialog_prg, output))
except re.error, e:
raise PythonDialogReModuleError(unicode(e))
else:
raise UnableToRetrieveBackendVersion(
"exit code {0!r} from the backend".format(code))
def maxsize(self, **kwargs):
"""Get the maximum size of dialog boxes.
If the exit status from the backend corresponds to
:attr:`Dialog.OK`, return a :samp:`({lines}, {cols})` tuple of
integers; otherwise, return ``None``.
If you want to obtain the number of lines and columns of the
terminal, you should call this method with
``use_persistent_args=False``, because :program:`dialog` options
such as :option:`--backtitle` modify the returned values.
Notable exceptions:
- :exc:`PythonDialogReModuleError`
- any exception raised by :meth:`Dialog._perform`
.. versionadded:: 2.12
"""
code, output = self._perform(["--print-maxsize"], **kwargs)
if code == self.OK:
try:
mo = self._print_maxsize_cre.match(output)
if mo:
return tuple(imap(int, mo.group("rows", "columns")))
else:
raise PythonDialogBug(
"Unable to parse the output of '{0} --print-maxsize': "
"{1!r}".format(self._dialog_prg, output))
except re.error, e:
raise PythonDialogReModuleError(unicode(e))
else:
return None
def _default_size(self, values, defaults):
# If 'autowidgetsize' is enabled, set the default values for the
# width/height/... parameters of widget-producing methods to 0 (this
# will actually be done by the caller, this function is only a helper).
if self.autowidgetsize:
defaults = (0,) * len(defaults)
# For every element of 'values': keep it if different from None,
# otherwise replace it with the corresponding value from 'defaults'.
return [ v if v is not None else defaults[i]
for i, v in enumerate(values) ]
@widget
def buildlist(self, text, height=0, width=0, list_height=0, items=[],
**kwargs):
"""Display a buildlist box.
:param str text: text to display in the box
:param int height: height of the box
:param int width: width of the box
:param int list_height: height of the selected and unselected
list boxes
:param items:
an iterable of :samp:`({tag}, {item}, {status})` tuples where
*status* specifies the initial selected/unselected state of
each entry; can be ``True`` or ``False``, ``1`` or ``0``,
``"on"`` or ``"off"`` (``True``, ``1`` and ``"on"`` meaning
selected), or any case variation of these two strings.
:return: a tuple of the form :samp:`({code}, {tags})` where:
- *code* is a :term:`Dialog exit code`;
- *tags* is a list of the tags corresponding to the selected
items, in the order they have in the list on the right.
:rtype: tuple
A :meth:`!buildlist` dialog is similar in logic to the
:meth:`checklist`, but differs in presentation. In this widget,
two lists are displayed, side by side. The list on the left
shows unselected items. The list on the right shows selected
items. As items are selected or unselected, they move between
the two lists. The *status* component of *items* specifies which
items are initially selected.
+--------------+------------------------------------------------+
| Key | Action |
+==============+================================================+
| :kbd:`Space` | select or deselect the highlighted item, |
| | *i.e.*, move it between the left and right |
| | lists |
+--------------+------------------------------------------------+
| :kbd:`^` | move the focus to the left list |
+--------------+------------------------------------------------+
| :kbd:`$` | move the focus to the right list |
+--------------+------------------------------------------------+
| :kbd:`Tab` | move focus (see *visit_items* below) |
+--------------+------------------------------------------------+
| :kbd:`Enter` | press the focused button |
+--------------+------------------------------------------------+
If called with ``visit_items=True``, the :kbd:`Tab` key can move
the focus to the left and right lists, which is probably more
intuitive for users than the default behavior that requires
using :kbd:`^` and :kbd:`$` for this purpose.
This widget requires dialog >= 1.2-20121230.
Notable exceptions:
any exception raised by :meth:`Dialog._perform` or :func:`_to_onoff`
.. versionadded:: 3.0
"""
self._dialog_version_check("1.2-20121230", "the buildlist widget")
cmd = ["--buildlist", text, unicode(height), unicode(width), unicode(list_height)]
for t in items:
cmd.extend([ t[0], t[1], _to_onoff(t[2]) ] + list(t[3:]))
code, output = self._perform(cmd, **kwargs)
if code == self.HELP:
help_data = self._parse_help(output, kwargs, multival=True,
multival_on_single_line=True)
if self._help_status_on(kwargs):
help_id, selected_tags = help_data
updated_items = []
for elt in items:
tag, item, status = elt[:3]
rest = elt[3:]
updated_items.append([ tag, item, tag in selected_tags ]
+ list(rest))
return (code, (help_id, selected_tags, updated_items))
else:
return (code, help_data)
elif code in (self.OK, self.EXTRA):
return (code, self._split_shellstyle_arglist(output))
else:
return (code, None)
def _calendar_parse_date(self, date_str):
try:
mo = _calendar_date_cre.match(date_str)
except re.error, e:
raise PythonDialogReModuleError(unicode(e))
if not mo:
raise UnexpectedDialogOutput(
"the dialog-like program returned the following "
"unexpected output (a date string was expected) from the "
"calendar box: {0!r}".format(date_str))
return [ int(s) for s in mo.group("day", "month", "year") ]
@widget
def calendar(self, text, height=None, width=0, day=-1, month=-1, year=-1,
**kwargs):
"""Display a calendar dialog box.
:param str text: text to display in the box
:param height: height of the box (minus the calendar height)
:type height: int or ``None``
:param int width: width of the box
:param int day: inititial day highlighted
:param int month: inititial month displayed
:param int year: inititial year selected
:return: a tuple of the form :samp:`({code}, {date})` where:
- *code* is a :term:`Dialog exit code`;
- *date* is a list of the form :samp:`[{day}, {month},
{year}]`, where *day*, *month* and *year* are integers
corresponding to the date chosen by the user.
:rtype: tuple
A :meth:`!calendar` box displays day, month and year in
separately adjustable windows. If *year* is given as ``0``, the
current date is used as initial value; otherwise, if any of the
values for *day*, *month* and *year* is negative, the current
date's corresponding value is used. You can increment or
decrement any of those using the :kbd:`Left`, :kbd:`Up`,
:kbd:`Right` and :kbd:`Down` arrows. Use :kbd:`Tab` or
:kbd:`Backtab` to move between windows.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=6, width=0``.
Notable exceptions:
- any exception raised by :meth:`Dialog._perform`
- :exc:`UnexpectedDialogOutput`
- :exc:`PythonDialogReModuleError`
.. versionchanged:: 3.2
The default values for *day*, *month* and *year* have been
changed from ``0`` to ``-1``.
"""
(height,) = self._default_size((height, ), (6,))
(code, output) = self._perform(
["--calendar", text, unicode(height), unicode(width), unicode(day),
unicode(month), unicode(year)],
**kwargs)
if code == self.HELP:
# The output does not depend on whether --help-status was passed
# (dialog 1.2-20130902).
help_data = self._parse_help(output, kwargs, raw_format=True)
return (code, self._calendar_parse_date(help_data))
elif code in (self.OK, self.EXTRA):
return (code, self._calendar_parse_date(output))
else:
return (code, None)
@widget
def checklist(self, text, height=None, width=None, list_height=None,
choices=[], **kwargs):
"""Display a checklist box.
:param str text: text to display in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:param list_height:
number of entries displayed in the box at a given time (the
contents can be scrolled)
:type list_height: int or ``None``
:param choices:
an iterable of :samp:`({tag}, {item}, {status})` tuples where
*status* specifies the initial selected/unselected state of
each entry; can be ``True`` or ``False``, ``1`` or ``0``,
``"on"`` or ``"off"`` (``True``, ``1`` and ``"on"`` meaning
selected), or any case variation of these two strings.
:return: a tuple of the form :samp:`({code}, [{tag}, ...])`
whose first element is a :term:`Dialog exit code` and second
element lists all tags for the entries selected by the user.
If the user exits with :kbd:`Esc` or :guilabel:`Cancel`, the
returned tag list is empty.
:rtype: tuple
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=15, width=54, list_height=7``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform` or :func:`_to_onoff`
"""
height, width, list_height = self._default_size(
(height, width, list_height), (15, 54, 7))
cmd = ["--checklist", text, unicode(height), unicode(width), unicode(list_height)]
for t in choices:
t = [ t[0], t[1], _to_onoff(t[2]) ] + list(t[3:])
cmd.extend(t)
# The dialog output cannot be parsed reliably (at least in dialog
# 0.9b-20040301) without --separate-output (because double quotes in
# tags are escaped with backslashes, but backslashes are not
# themselves escaped and you have a problem when a tag ends with a
# backslash--the output makes you think you've encountered an embedded
# double-quote).
kwargs["separate_output"] = True
(code, output) = self._perform(cmd, **kwargs)
# Since we used --separate-output, the tags are separated by a newline
# in the output. There is also a final newline after the last tag.
if code == self.HELP:
help_data = self._parse_help(output, kwargs, multival=True)
if self._help_status_on(kwargs):
help_id, selected_tags = help_data
updated_choices = []
for elt in choices:
tag, item, status = elt[:3]
rest = elt[3:]
updated_choices.append([ tag, item, tag in selected_tags ]
+ list(rest))
return (code, (help_id, selected_tags, updated_choices))
else:
return (code, help_data)
else:
return (code, output.split('\n')[:-1])
def _form_updated_items(self, status, elements):
"""Return a complete list with up-to-date items from 'status'.
Return a new list of same length as 'elements'. Items are
taken from 'status', except when data inside 'elements'
indicates a read-only field: such items are not output by
dialog ... --help-status ..., and therefore have to be
extracted from 'elements' instead of 'status'.
Actually, for 'mixedform', the elements that are defined as
read-only using the attribute instead of a non-positive
field_length are not concerned by this function, since they
are included in the --help-status output.
"""
res = []
for i, elt in enumerate(elements):
label, yl, xl, item, yi, xi, field_length = elt[:7]
res.append(status[i] if field_length > 0 else item)
return res
def _generic_form(self, widget_name, method_name, text, elements, height=0,
width=0, form_height=0, **kwargs):
cmd = ["--%s" % widget_name, text, unicode(height), unicode(width),
unicode(form_height)]
if not elements:
raise BadPythonDialogUsage(
"{0}.{1}.{2}: empty ELEMENTS sequence: {3!r}".format(
__name__, type(self).__name__, method_name, elements))
elt_len = len(elements[0]) # for consistency checking
for i, elt in enumerate(elements):
if len(elt) != elt_len:
raise BadPythonDialogUsage(
"{0}.{1}.{2}: ELEMENTS[0] has length {3}, whereas "
"ELEMENTS[{4}] has length {5}".format(
__name__, type(self).__name__, method_name,
elt_len, i, len(elt)))
# Give names to make the code more readable
if widget_name in ("form", "passwordform"):
label, yl, xl, item, yi, xi, field_length, input_length = \
elt[:8]
rest = elt[8:] # optional "item_help" string
elif widget_name == "mixedform":
label, yl, xl, item, yi, xi, field_length, input_length, \
attributes = elt[:9]
rest = elt[9:] # optional "item_help" string
else:
raise PythonDialogBug(
"unexpected widget name in {0}.{1}._generic_form(): "
"{2!r}".format(__name__, type(self).__name__, widget_name))
for name, value in (("LABEL", label), ("ITEM", item)):
if not isinstance(value, basestring):
raise BadPythonDialogUsage(
"{0}.{1}.{2}: {3!r} element not a string: {4!r}".format(
__name__, type(self).__name__,
method_name, name, value))
cmd.extend((label, unicode(yl), unicode(xl), item, unicode(yi), unicode(xi),
unicode(field_length), unicode(input_length)))
if widget_name == "mixedform":
cmd.append(unicode(attributes))
# "item help" string when using --item-help, nothing otherwise
cmd.extend(rest)
(code, output) = self._perform(cmd, **kwargs)
if code == self.HELP:
help_data = self._parse_help(output, kwargs, multival=True)
if self._help_status_on(kwargs):
help_id, status = help_data
# 'status' does not contain the fields marked as read-only in
# 'elements'. Build a list containing all up-to-date items.
updated_items = self._form_updated_items(status, elements)
# Reconstruct 'elements' with the updated items taken from
# 'status'.
updated_elements = []
for elt, updated_item in izip(elements, updated_items):
label, yl, xl, item = elt[:4]
rest = elt[4:]
updated_elements.append([ label, yl, xl, updated_item ]
+ list(rest))
return (code, (help_id, status, updated_elements))
else:
return (code, help_data)
else:
return (code, output.split('\n')[:-1])
@widget
def form(self, text, elements, height=0, width=0, form_height=0, **kwargs):
"""Display a form consisting of labels and fields.
:param str text: text to display in the box
:param elements: sequence describing the labels and
fields (see below)
:param int height: height of the box
:param int width: width of the box
:param int form_height: number of form lines displayed at the
same time
:return: a tuple of the form :samp:`({code}, {list})` where:
- *code* is a :term:`Dialog exit code`;
- *list* gives the contents of every editable field on exit,
with the same order as in *elements*.
:rtype: tuple
A :meth:`!form` box consists in a series of :dfn:`fields` and
associated :dfn:`labels`. This type of dialog is suitable for
adjusting configuration parameters and similar tasks.
Each element of *elements* must itself be a sequence
:samp:`({label}, {yl}, {xl}, {item}, {yi}, {xi}, {field_length},
{input_length})` containing the various parameters concerning a
given field and the associated label.
*label* is a string that will be displayed at row *yl*, column
*xl*. *item* is a string giving the initial value for the field,
which will be displayed at row *yi*, column *xi* (row and column
numbers starting from 1).
*field_length* and *input_length* are integers that respectively
specify the number of characters used for displaying the field
and the maximum number of characters that can be entered for
this field. These two integers also determine whether the
contents of the field can be modified, as follows:
- if *field_length* is zero, the field cannot be altered and
its contents determines the displayed length;
- if *field_length* is negative, the field cannot be altered
and the opposite of *field_length* gives the displayed
length;
- if *input_length* is zero, it is set to *field_length*.
Notable exceptions:
- :exc:`BadPythonDialogUsage`
- any exception raised by :meth:`Dialog._perform`
"""
return self._generic_form("form", "form", text, elements,
height, width, form_height, **kwargs)
@widget
def passwordform(self, text, elements, height=0, width=0, form_height=0,
**kwargs):
"""Display a form consisting of labels and invisible fields.
This widget is identical to the :meth:`form` box, except that
all text fields are treated as :meth:`passwordbox` widgets
rather than :meth:`inputbox` widgets.
By default (as in :program:`dialog)`, nothing is echoed to the
terminal as the user types in the invisible fields. This can be
confusing to users. Use ``insecure=True`` (keyword argument) if
you want an asterisk to be echoed for each character entered by
the user.
Notable exceptions:
- :exc:`BadPythonDialogUsage`
- any exception raised by :meth:`Dialog._perform`
"""
return self._generic_form("passwordform", "passwordform", text,
elements, height, width, form_height,
**kwargs)
@widget
def mixedform(self, text, elements, height=0, width=0, form_height=0,
**kwargs):
"""Display a form consisting of labels and fields.
:param str text: text to display in the box
:param elements: sequence describing the labels and
fields (see below)
:param int height: height of the box
:param int width: width of the box
:param int form_height: number of form lines displayed at the
same time
:return: a tuple of the form :samp:`({code}, {list})` where:
- *code* is a :term:`Dialog exit code`;
- *list* gives the contents of every field on exit, with the
same order as in *elements*.
:rtype: tuple
A :meth:`!mixedform` box is very similar to a :meth:`form` box,
and differs from the latter by allowing field attributes to be
specified.
Each element of *elements* must itself be a sequence
:samp:`({label}, {yl}, {xl}, {item}, {yi}, {xi}, {field_length},
{input_length}, {attributes})` containing the various parameters
concerning a given field and the associated label.
*attributes* is an integer interpreted as a bit mask with the
following meaning (bit 0 being the least significant bit):
+------------+-----------------------------------------------+
| Bit number | Meaning |
+============+===============================================+
| 0 | the field should be hidden (e.g., a password) |
+------------+-----------------------------------------------+
| 1 | the field should be read-only (e.g., a label) |
+------------+-----------------------------------------------+
For all other parameters, please refer to the documentation of
the :meth:`form` box.
The return value is the same as would be with the :meth:`!form`
box, except that fields marked as read-only with bit 1 of
*attributes* are also included in the output list.
Notable exceptions:
- :exc:`BadPythonDialogUsage`
- any exception raised by :meth:`Dialog._perform`
"""
return self._generic_form("mixedform", "mixedform", text, elements,
height, width, form_height, **kwargs)
@widget
def dselect(self, filepath, height=0, width=0, **kwargs):
"""Display a directory selection dialog box.
:param str filepath: initial path
:param int height: height of the box
:param int width: width of the box
:return: a tuple of the form :samp:`({code}, {path})` where:
- *code* is a :term:`Dialog exit code`;
- *path* is the directory chosen by the user.
:rtype: tuple
The directory selection dialog displays a text entry window
in which you can type a directory, and above that a window
with directory names.
Here, *filepath* can be a path to a file, in which case the
directory window will display the contents of the path and the
text entry window will contain the preselected directory.
Use :kbd:`Tab` or the arrow keys to move between the windows.
Within the directory window, use the :kbd:`Up` and :kbd:`Down`
arrow keys to scroll the current selection. Use the :kbd:`Space`
bar to copy the current selection into the text entry window.
Typing any printable character switches focus to the text entry
window, entering that character as well as scrolling the
directory window to the closest match.
Use :kbd:`Enter` or the :guilabel:`OK` button to accept the
current value in the text entry window and exit.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
# The help output does not depend on whether --help-status was passed
# (dialog 1.2-20130902).
return self._widget_with_string_output(
["--dselect", filepath, unicode(height), unicode(width)],
kwargs, raw_help=True)
@widget
def editbox(self, filepath, height=0, width=0, **kwargs):
"""Display a basic text editor dialog box.
:param str filepath: path to a file which determines the initial
contents of the dialog box
:param int height: height of the box
:param int width: width of the box
:return: a tuple of the form :samp:`({code}, {text})` where:
- *code* is a :term:`Dialog exit code`;
- *text* is the contents of the text entry window on exit.
:rtype: tuple
The :meth:`!editbox` dialog displays a copy of the file
contents. You may edit it using the :kbd:`Backspace`,
:kbd:`Delete` and cursor keys to correct typing errors. It also
recognizes :kbd:`Page Up` and :kbd:`Page Down`. Unlike the
:meth:`inputbox`, you must tab to the :guilabel:`OK` or
:guilabel:`Cancel` buttons to close the dialog. Pressing the
:kbd:`Enter` key within the box will split the corresponding
line.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
return self._widget_with_string_output(
["--editbox", filepath, unicode(height), unicode(width)],
kwargs)
@widget
def fselect(self, filepath, height=0, width=0, **kwargs):
"""Display a file selection dialog box.
:param str filepath: initial path
:param int height: height of the box
:param int width: width of the box
:return: a tuple of the form :samp:`({code}, {path})` where:
- *code* is a :term:`Dialog exit code`;
- *path* is the path chosen by the user (the last element of
which may be a directory or a file).
:rtype: tuple
The file selection dialog displays a text entry window in
which you can type a file name (or directory), and above that
two windows with directory names and file names.
Here, *filepath* can be a path to a file, in which case the file
and directory windows will display the contents of the path and
the text entry window will contain the preselected file name.
Use :kbd:`Tab` or the arrow keys to move between the windows.
Within the directory or file name windows, use the :kbd:`Up` and
:kbd:`Down` arrow keys to scroll the current selection. Use the
:kbd:`Space` bar to copy the current selection into the text
entry window.
Typing any printable character switches focus to the text entry
window, entering that character as well as scrolling the
directory and file name windows to the closest match.
Use :kbd:`Enter` or the :guilabel:`OK` button to accept the
current value in the text entry window, or the
:guilabel:`Cancel` button to cancel.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
# The help output does not depend on whether --help-status was passed
# (dialog 1.2-20130902).
return self._widget_with_string_output(
["--fselect", filepath, unicode(height), unicode(width)],
kwargs, strip_xdialog_newline=True, raw_help=True)
def gauge_start(self, text="", height=None, width=None, percent=0,
**kwargs):
"""Display a gauge box.
:param str text: text to display in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:param int percent: initial percentage shown in the meter
:return: undefined
A gauge box displays a meter along the bottom of the box. The
meter indicates a percentage.
This function starts the :program:`dialog`-like program, telling
it to display a gauge box containing a text and an initial
percentage in the meter.
.. rubric:: Gauge typical usage
Gauge typical usage (assuming that *d* is an instance of the
:class:`Dialog` class) looks like this::
d.gauge_start()
# do something
d.gauge_update(10) # 10% of the whole task is done
# ...
d.gauge_update(100, "any text here") # work is done
exit_code = d.gauge_stop() # cleanup actions
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=8, width=54``.
Notable exceptions:
- any exception raised by :meth:`_call_program`
- :exc:`PythonDialogOSError`
"""
height, width = self._default_size((height, width), (8, 54))
with _OSErrorHandling():
# We need a pipe to send data to the child (dialog) process's
# stdin while it is running.
# rfd = File Descriptor for Reading
# wfd = File Descriptor for Writing
(child_stdin_rfd, child_stdin_wfd) = os.pipe()
(child_pid, child_output_rfd) = self._call_program(
["--gauge", text, unicode(height), unicode(width), unicode(percent)],
redir_child_stdin_from_fd=child_stdin_rfd,
close_fds=(child_stdin_wfd,), **kwargs)
# fork() is done. We don't need child_stdin_rfd in the father
# process anymore.
os.close(child_stdin_rfd)
self._gauge_process = {
"pid": child_pid,
"stdin": open(child_stdin_wfd, "w"),
"child_output_rfd": child_output_rfd
}
def gauge_update(self, percent, text="", update_text=False):
"""Update a running gauge box.
:param int percent: new percentage to show in the gauge
meter
:param str text: new text to optionally display in the
box
:param bool update_text: whether to update the text in the box
:return: undefined
This function updates the percentage shown by the meter of a
running gauge box (meaning :meth:`gauge_start` must have been
called previously). If *update_text* is ``True``, the text
displayed in the box is also updated.
See the :meth:`gauge_start` method documentation for information
about how to use a gauge.
Notable exception:
:exc:`PythonDialogIOError` (:exc:`PythonDialogOSError` from
Python 3.3 onwards) can be raised if there is an I/O error
while trying to write to the pipe used to talk to the
:program:`dialog`-like program.
"""
if not isinstance(percent, int):
raise BadPythonDialogUsage(
"the 'percent' argument of gauge_update() must be an integer, "
"but {0!r} is not".format(percent))
if update_text:
gauge_data = "XXX\n{0}\n{1}\nXXX\n".format(percent, text)
else:
gauge_data = "{0}\n".format(percent)
with _OSErrorHandling():
self._gauge_process["stdin"].write(gauge_data)
self._gauge_process["stdin"].flush()
# For "compatibility" with the old dialog.py...
def gauge_iterate(*args, **kwargs):
"""Update a running gauge box.
.. deprecated:: 2.03
Use :meth:`gauge_update` instead.
"""
warnings.warn("Dialog.gauge_iterate() has been obsolete for "
"many years", DeprecationWarning)
gauge_update(*args, **kwargs)
@widget
@retval_is_code
def gauge_stop(self):
"""Terminate a running gauge widget.
:return: a :term:`Dialog exit code`
:rtype: str
This function performs the appropriate cleanup actions to
terminate a running gauge started with :meth:`gauge_start`.
See the :meth:`!gauge_start` method documentation for
information about how to use a gauge.
Notable exceptions:
- any exception raised by
:meth:`_wait_for_program_termination`;
- :exc:`PythonDialogIOError` (:exc:`PythonDialogOSError` from
Python 3.3 onwards) can be raised if closing the pipe used
to talk to the :program:`dialog`-like program fails.
"""
p = self._gauge_process
# Close the pipe that we are using to feed dialog's stdin
with _OSErrorHandling():
p["stdin"].close()
# According to dialog(1), the output should always be empty.
exit_code = \
self._wait_for_program_termination(p["pid"],
p["child_output_rfd"])[0]
return exit_code
@widget
@retval_is_code
def infobox(self, text, height=None, width=None, **kwargs):
"""Display an information dialog box.
:param str text: text to display in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:return: a :term:`Dialog exit code`
:rtype: str
An info box is basically a message box. However, in this case,
:program:`dialog` will exit immediately after displaying the
message to the user. The screen is not cleared when
:program:`dialog` exits, so that the message will remain on the
screen after the method returns. This is useful when you want to
inform the user that some operations are carrying on that may
require some time to finish.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=10, width=30``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
height, width = self._default_size((height, width), (10, 30))
return self._widget_with_no_output(
"infobox",
["--infobox", text, unicode(height), unicode(width)],
kwargs)
@widget
def inputbox(self, text, height=None, width=None, init='', **kwargs):
"""Display an input dialog box.
:param str text: text to display in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:param str init: default input string
:return: a tuple of the form :samp:`({code}, {string})` where:
- *code* is a :term:`Dialog exit code`;
- *string* is the string entered by the user.
:rtype: tuple
An input box is useful when you want to ask questions that
require the user to input a string as the answer. If *init* is
supplied, it is used to initialize the input string. When
entering the string, the :kbd:`Backspace` key can be used to
correct typing errors. If the input string is longer than can
fit in the dialog box, the input field will be scrolled.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=10, width=30``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
height, width = self._default_size((height, width), (10, 30))
# The help output does not depend on whether --help-status was passed
# (dialog 1.2-20130902).
return self._widget_with_string_output(
["--inputbox", text, unicode(height), unicode(width), init],
kwargs, strip_xdialog_newline=True, raw_help=True)
@widget
def inputmenu(self, text, height=0, width=None, menu_height=None,
choices=[], **kwargs):
"""Display an inputmenu dialog box.
:param str text: text to display in the box
:param int height: height of the box
:param width: width of the box
:type width: int or ``None``
:param menu_height: height of the menu (scrollable part)
:type menu_height: int or ``None``
:param choices: an iterable of :samp:`({tag}, {item})`
tuples, the meaning of which is explained
below
:return: see :ref:`below <inputmenu-return-value>`
.. rubric:: Overview
An :meth:`!inputmenu` box is a dialog box that can be used to
present a list of choices in the form of a menu for the user to
choose. Choices are displayed in the given order. The main
differences with the :meth:`menu` dialog box are:
- entries are not automatically centered, but left-adjusted;
- the current entry can be renamed by pressing the
:guilabel:`Rename` button, which allows editing the *item*
part of the current entry.
Each menu entry consists of a *tag* string and an *item* string.
The :dfn:`tag` gives the entry a name to distinguish it from the
other entries in the menu and to provide quick keyboard access.
The :dfn:`item` is a short description of the option that the
entry represents.
The user can move between the menu entries by pressing the
:kbd:`Up` and :kbd:`Down` arrow keys or the first letter of the
tag as a hot key. There are *menu_height* lines (not entries!)
displayed in the scrollable part of the menu at one time.
At the time of this writing (with :program:`dialog`
1.2-20140219), it is not possible to add an Extra button to this
widget, because internally, the :guilabel:`Rename` button *is*
the Extra button.
.. note::
It is strongly advised not to put any space in tags, otherwise
the :program:`dialog` output can be ambiguous if the
corresponding entry is renamed, causing pythondialog to return
a wrong tag string and new item text.
The reason is that in this case, the :program:`dialog` output
is :samp:`RENAMED {tag} {item}` and pythondialog cannot guess
whether spaces after the :samp:`RENAMED` + *space* prefix
belong to the *tag* or the new *item* text.
.. note::
There is no point in calling this method with
``help_status=True``, because it is not possible to rename
several items nor is it possible to choose the
:guilabel:`Help` button (or any button other than
:guilabel:`Rename`) once one has started to rename an item.
.. _inputmenu-return-value:
.. rubric:: Return value
Return a tuple of the form :samp:`({exit_info}, {tag},
{new_item_text})` where:
+ *exit_info* is either:
- the string ``"accepted"``, meaning that an entry was
accepted without renaming;
- the string ``"renamed"``, meaning that an entry was
accepted after being renamed;
- one of the standard :term:`Dialog exit codes <Dialog exit
code>` :attr:`Dialog.CANCEL`, :attr:`Dialog.ESC` or
:attr:`Dialog.HELP` (:attr:`Dialog.EXTRA` can't be
returned, because internally, the :guilabel:`Rename`
button *is* the Extra button).
+ *tag* indicates which entry was accepted (with or without
renaming), if any. If no entry was accepted (e.g., if the
dialog was exited with the :guilabel:`Cancel` button), then
*tag* is ``None``.
+ *new_item_text* gives the new *item* part of the renamed
entry if *exit_info* is ``"renamed"``, otherwise it is
``None``.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=0, width=60, menu_height=7``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
width, menu_height = self._default_size((width, menu_height), (60, 7))
cmd = ["--inputmenu", text, unicode(height), unicode(width), unicode(menu_height)]
for t in choices:
cmd.extend(t)
(code, output) = self._perform(cmd, **kwargs)
if code == self.HELP:
help_id = self._parse_help(output, kwargs)
return (code, help_id, None)
elif code == self.OK:
return ("accepted", output, None)
elif code == self.EXTRA:
if not output.startswith("RENAMED "):
raise PythonDialogBug(
"'output' does not start with 'RENAMED ': {0!r}".format(
output))
t = output.split(' ', 2)
return ("renamed", t[1], t[2])
else:
return (code, None, None)
@widget
def menu(self, text, height=None, width=None, menu_height=None, choices=[],
**kwargs):
"""Display a menu dialog box.
:param str text: text to display in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:param menu_height: number of entries displayed in the box
(which can be scrolled) at a given time
:type menu_height: int or ``None``
:param choices: an iterable of :samp:`({tag}, {item})`
tuples, the meaning of which is explained
below
:return: a tuple of the form :samp:`({code}, {tag})` where:
- *code* is a :term:`Dialog exit code`;
- *tag* is the tag string corresponding to the item that the
user chose.
:rtype: tuple
As its name suggests, a :meth:`!menu` box is a dialog box that
can be used to present a list of choices in the form of a menu
for the user to choose. Choices are displayed in the given
order.
Each menu entry consists of a *tag* string and an *item* string.
The :dfn:`tag` gives the entry a name to distinguish it from the
other entries in the menu and to provide quick keyboard access.
The :dfn:`item` is a short description of the option that the
entry represents.
The user can move between the menu entries by pressing the
:kbd:`Up` and :kbd:`Down` arrow keys, the first letter of the
tag as a hotkey, or the number keys :kbd:`1` through :kbd:`9`.
There are *menu_height* entries displayed in the menu at one
time, but it will be scrolled if there are more entries than
that.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=15, width=54, menu_height=7``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
height, width, menu_height = self._default_size(
(height, width, menu_height), (15, 54, 7))
cmd = ["--menu", text, unicode(height), unicode(width), unicode(menu_height)]
for t in choices:
cmd.extend(t)
return self._widget_with_string_output(
cmd, kwargs, strip_xdialog_newline=True)
@widget
@retval_is_code
def mixedgauge(self, text, height=0, width=0, percent=0, elements=[],
**kwargs):
"""Display a mixed gauge dialog box.
:param str text: text to display in the middle of the box,
between the elements list and the progress
bar
:param int height: height of the box
:param int width: width of the box
:param int percent: integer giving the percentage for the global
progress bar
:param elements: an iterable of :samp:`({tag}, {item})`
tuples, the meaning of which is explained
below
:return: a :term:`Dialog exit code`
:rtype: str
A :meth:`!mixedgauge` box displays a list of "elements" with
status indication for each of them, followed by a text and
finally a global progress bar along the bottom of the box.
The top part ("elements") is suitable for displaying a task
list. One element is displayed per line, with its *tag* part on
the left and its *item* part on the right. The *item* part is a
string that is displayed on the right of the same line.
The *item* part of an element can be an arbitrary string.
Special values listed in the :manpage:`dialog(3)` manual page
are translated into a status indication for the corresponding
task (*tag*), such as: "Succeeded", "Failed", "Passed",
"Completed", "Done", "Skipped", "In Progress", "Checked", "N/A"
or a progress bar.
A progress bar for an element is obtained by supplying a
negative number for the *item*. For instance, ``"-75"`` will
cause a progress bar indicating 75% to be displayed on the
corresponding line.
For your convenience, if an *item* appears to be an integer or a
float, it will be converted to a string before being passed to
the :program:`dialog`-like program.
*text* is shown as a sort of caption between the list and the
global progress bar. The latter displays *percent* as the
percentage of completion.
Contrary to the regular :ref:`gauge widget <gauge-widget>`,
:meth:`!mixedgauge` is completely static. You have to call
:meth:`!mixedgauge` several times in order to display different
percentages in the global progress bar or various status
indicators for a given task.
.. note::
Calling :meth:`!mixedgauge` several times is likely to cause
unwanted flickering because of the screen initializations
performed by :program:`dialog` on every run.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
cmd = ["--mixedgauge", text, unicode(height), unicode(width), unicode(percent)]
for t in elements:
cmd.extend( (t[0], unicode(t[1])) )
return self._widget_with_no_output("mixedgauge", cmd, kwargs)
@widget
@retval_is_code
def msgbox(self, text, height=None, width=None, **kwargs):
"""Display a message dialog box, with scrolling and line wrapping.
:param str text: text to display in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:return: a :term:`Dialog exit code`
:rtype: str
Display *text* in a message box, with a scrollbar and percentage
indication if *text* is too long to fit in a single "screen".
An :meth:`!msgbox` is very similar to a :meth:`yesno` box. The
only difference between an :meth:`!msgbox` and a :meth:`!yesno`
box is that the former only has a single :guilabel:`OK` button.
You can use :meth:`!msgbox` to display any message you like.
After reading the message, the user can press the :kbd:`Enter`
key so that :program:`dialog` will exit and the calling program
can continue its operation.
:meth:`!msgbox` performs automatic line wrapping. If you want to
force a newline at some point, simply insert it in *text*. In
other words (with the default settings), newline characters in
*text* **are** respected; the line wrapping process performed by
:program:`dialog` only inserts **additional** newlines when
needed. If you want no automatic line wrapping, consider using
:meth:`scrollbox`.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=10, width=30``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
height, width = self._default_size((height, width), (10, 30))
return self._widget_with_no_output(
"msgbox",
["--msgbox", text, unicode(height), unicode(width)],
kwargs)
@widget
@retval_is_code
def pause(self, text, height=None, width=None, seconds=5, **kwargs):
"""Display a pause dialog box.
:param str text: text to display in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:param int seconds: number of seconds to pause for
:return:
a :term:`Dialog exit code` (which is :attr:`Dialog.OK` if the
widget ended automatically after *seconds* seconds or if the
user pressed the :guilabel:`OK` button)
:rtype: str
A :meth:`!pause` box displays a text and a meter along the
bottom of the box, during a specified amount of time
(*seconds*). The meter indicates how many seconds remain until
the end of the pause. The widget exits when the specified number
of seconds is elapsed, or immediately if the user presses the
:guilabel:`OK` button, the :guilabel:`Cancel` button or the
:kbd:`Esc` key.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=15, width=60``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
height, width = self._default_size((height, width), (15, 60))
return self._widget_with_no_output(
"pause",
["--pause", text, unicode(height), unicode(width), unicode(seconds)],
kwargs)
@widget
def passwordbox(self, text, height=None, width=None, init='', **kwargs):
"""Display a password input dialog box.
:param str text: text to display in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:param str init: default input password
:return: a tuple of the form :samp:`({code}, {password})` where:
- *code* is a :term:`Dialog exit code`;
- *password* is the password entered by the user.
:rtype: tuple
A :meth:`!passwordbox` is similar to an :meth:`inputbox`, except
that the text the user enters is not displayed. This is useful
when prompting for passwords or other sensitive information. Be
aware that if anything is passed in *init*, it will be visible
in the system's process table to casual snoopers. Also, it is
very confusing to the user to provide them with a default
password they cannot see. For these reasons, using *init* is
highly discouraged.
By default (as in :program:`dialog`), nothing is echoed to the
terminal as the user enters the sensitive text. This can be
confusing to users. Use ``insecure=True`` (keyword argument) if
you want an asterisk to be echoed for each character entered by
the user.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=10, width=60``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
height, width = self._default_size((height, width), (10, 60))
# The help output does not depend on whether --help-status was passed
# (dialog 1.2-20130902).
return self._widget_with_string_output(
["--passwordbox", text, unicode(height), unicode(width), init],
kwargs, strip_xdialog_newline=True, raw_help=True)
def _progressboxoid(self, widget, file_path=None, file_flags=os.O_RDONLY,
fd=None, text=None, height=20, width=78, **kwargs):
if (file_path is None and fd is None) or \
(file_path is not None and fd is not None):
raise BadPythonDialogUsage(
"{0}.{1}.{2}: either 'file_path' or 'fd' must be provided, and "
"not both at the same time".format(
__name__, self.__class__.__name__, widget))
with _OSErrorHandling():
if file_path is not None:
if fd is not None:
raise PythonDialogBug(
"unexpected non-None value for 'fd': {0!r}".format(fd))
# No need to pass 'mode', as the file is not going to be
# created here.
fd = os.open(file_path, file_flags)
try:
args = [ "--{0}".format(widget) ]
if text is not None:
args.append(text)
args.extend([unicode(height), unicode(width)])
kwargs["redir_child_stdin_from_fd"] = fd
code = self._widget_with_no_output(widget, args, kwargs)
finally:
with _OSErrorHandling():
if file_path is not None:
# We open()ed file_path ourselves, let's close it now.
os.close(fd)
return code
@widget
@retval_is_code
def progressbox(self, file_path=None, file_flags=os.O_RDONLY,
fd=None, text=None, height=None, width=None, **kwargs):
"""
Display a possibly growing stream in a dialog box, as with ``tail -f``.
A file, or more generally a stream that can be read from, must
be specified with either:
:param str file_path: path to the file that is going to be displayed
:param file_flags:
flags used when opening *file_path*; those are passed to
:func:`os.open` (not the built-in :func:`open` function!). By
default, only one flag is set: :data:`os.O_RDONLY`.
or
:param int fd: file descriptor for the stream to be displayed
Remaining parameters:
:param text: caption continuously displayed at the top, above
the stream text, or ``None`` to disable the
caption
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:return: a :term:`Dialog exit code`
:rtype: str
Display the contents of the specified file, updating the dialog
box whenever the file grows, as with the ``tail -f`` command.
The file can be specified in two ways:
- either by giving its path (and optionally :func:`os.open`
flags) with parameters *file_path* and *file_flags*;
- or by passing its file descriptor with parameter *fd* (in
which case it may not even be a file; for instance, it could
be an anonymous pipe created with :func:`os.pipe`).
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=20, width=78``.
Notable exceptions:
- :exc:`PythonDialogOSError` (:exc:`PythonDialogIOError` if
the Python version is < 3.3)
- any exception raised by :meth:`Dialog._perform`
"""
height, width = self._default_size((height, width), (20, 78))
return self._progressboxoid(
"progressbox", file_path=file_path, file_flags=file_flags,
fd=fd, text=text, height=height, width=width, **kwargs)
@widget
@retval_is_code
def programbox(self, file_path=None, file_flags=os.O_RDONLY,
fd=None, text=None, height=None, width=None, **kwargs):
"""
Display a possibly growing stream in a dialog box, as with ``tail -f``.
A :meth:`!programbox` is very similar to a :meth:`progressbox`.
The only difference between a :meth:`!programbox` and a
:meth:`!progressbox` is that a :meth:`!programbox` displays an
:guilabel:`OK` button, but only after the input stream has been
exhausted (i.e., *End Of File* has been reached).
This dialog box can be used to display the piped output of an
external program. After the program completes, the user can
press the :kbd:`Enter` key to close the dialog and resume
execution of the calling program.
The parameters and exceptions are the same as for
:meth:`progressbox`. Please refer to the corresponding
documentation.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=20, width=78``.
This widget requires :program:`dialog` >= 1.1-20110302.
.. versionadded:: 2.14
"""
self._dialog_version_check("1.1-20110302", "the programbox widget")
height, width = self._default_size((height, width), (20, 78))
return self._progressboxoid(
"programbox", file_path=file_path, file_flags=file_flags,
fd=fd, text=text, height=height, width=width, **kwargs)
@widget
def radiolist(self, text, height=None, width=None, list_height=None,
choices=[], **kwargs):
"""Display a radiolist box.
:param str text: text to display in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:param list_height: number of entries displayed in the box
(which can be scrolled) at a given time
:type list_height: int or ``None``
:param choices:
an iterable of :samp:`({tag}, {item}, {status})` tuples
where *status* specifies the initial selected/unselected
state of each entry; can be ``True`` or ``False``, ``1`` or
``0``, ``"on"`` or ``"off"`` (``True``, ``1`` and ``"on"``
meaning selected), or any case variation of these two
strings. No more than one entry should be set to ``True``.
:return: a tuple of the form :samp:`({code}, {tag})` where:
- *code* is a :term:`Dialog exit code`;
- *tag* is the tag string corresponding to the entry that was
chosen by the user.
:rtype: tuple
A :meth:`!radiolist` box is similar to a :meth:`menu` box. The
main differences are presentation and that the
:meth:`!radiolist` allows you to indicate which entry is
initially selected, by setting its status to ``True``.
If the user exits with :kbd:`Esc` or :guilabel:`Cancel`, or if
all entries were initially set to ``False`` and not altered
before the user chose :guilabel:`OK`, the returned tag is the
empty string.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=15, width=54, list_height=7``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform` or :func:`_to_onoff`
"""
height, width, list_height = self._default_size(
(height, width, list_height), (15, 54, 7))
cmd = ["--radiolist", text, unicode(height), unicode(width), unicode(list_height)]
for t in choices:
cmd.extend([ t[0], t[1], _to_onoff(t[2]) ] + list(t[3:]))
(code, output) = self._perform(cmd, **kwargs)
output = self._strip_xdialog_newline(output)
if code == self.HELP:
help_data = self._parse_help(output, kwargs)
if self._help_status_on(kwargs):
help_id, selected_tag = help_data
# Reconstruct 'choices' with the selected item inferred from
# 'selected_tag'.
updated_choices = []
for elt in choices:
tag, item, status = elt[:3]
rest = elt[3:]
updated_choices.append([ tag, item, tag == selected_tag ]
+ list(rest))
return (code, (help_id, selected_tag, updated_choices))
else:
return (code, help_data)
else:
return (code, output)
@widget
def rangebox(self, text, height=0, width=0, min=None, max=None, init=None,
**kwargs):
"""Display a range dialog box.
:param str text: text to display above the actual range control
:param int height: height of the box
:param int width: width of the box
:param int min: minimum value for the range control
:param int max: maximum value for the range control
:param int init: initial value for the range control
:return: a tuple of the form :samp:`({code}, {val})` where:
- *code* is a :term:`Dialog exit code`;
- *val* is an integer: the value chosen by the user.
:rtype: tuple
The :meth:`!rangebox` dialog allows the user to select from a
range of integers using a kind of slider. The range control
shows the current value as a bar (like the :ref:`gauge dialog
<gauge-widget>`).
The :kbd:`Tab` and arrow keys move the cursor between the
buttons and the range control. When the cursor is on the latter,
you can change the value with the following keys:
+-----------------------+----------------------------+
| Key | Action |
+=======================+============================+
| :kbd:`Left` and | select a digit to modify |
| :kbd:`Right` arrows | |
+-----------------------+----------------------------+
| :kbd:`+` / :kbd:`-` | increment/decrement the |
| | selected digit by one unit |
+-----------------------+----------------------------+
| :kbd:`0`–:kbd:`9` | set the selected digit to |
| | the given value |
+-----------------------+----------------------------+
Some keys are also recognized in all cursor positions:
+------------------+--------------------------------------+
| Key | Action |
+==================+======================================+
| :kbd:`Home` / | set the value to its minimum or |
| :kbd:`End` | maximum |
+------------------+--------------------------------------+
| :kbd:`Page Up` / | decrement/increment the value so |
| :kbd:`Page Down` | that the slider moves by one column |
+------------------+--------------------------------------+
This widget requires :program:`dialog` >= 1.2-20121230.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
.. versionadded:: 2.14
"""
self._dialog_version_check("1.2-20121230", "the rangebox widget")
for name in ("min", "max", "init"):
if not isinstance(locals()[name], int):
raise BadPythonDialogUsage(
"'{0}' argument not an int: {1!r}".format(name,
locals()[name]))
(code, output) = self._perform(
["--rangebox", text] + [ unicode(i) for i in
(height, width, min, max, init) ],
**kwargs)
if code == self.HELP:
help_data = self._parse_help(output, kwargs, raw_format=True)
# The help output does not depend on whether --help-status was
# passed (dialog 1.2-20130902).
return (code, int(help_data))
elif code in (self.OK, self.EXTRA):
return (code, int(output))
else:
return (code, None)
@widget
@retval_is_code
def scrollbox(self, text, height=None, width=None, **kwargs):
"""Display a string in a scrollable box, with no line wrapping.
:param str text: string to display in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:return: a :term:`Dialog exit code`
:rtype: str
This method is a layer on top of :meth:`textbox`. The
:meth:`!textbox` widget in :program:`dialog` allows one to
display file contents only. This method can be used to display
any text in a scrollable box. This is simply done by creating a
temporary file, calling :meth:`!textbox` and deleting the
temporary file afterwards.
The text is not automatically wrapped. New lines in the
scrollable box will be placed exactly as in *text*. If you want
automatic line wrapping, you should use the :meth:`msgbox`
widget instead (the :mod:`textwrap` module from the Python
standard library is also worth knowing about).
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=20, width=78``.
Notable exceptions:
:exc:`PythonDialogOSError` (:exc:`PythonDialogIOError` if the
Python version is < 3.3)
.. versionchanged:: 3.1
:exc:`UnableToCreateTemporaryDirectory` exception can't be
raised anymore. The equivalent condition now raises
:exc:`PythonDialogOSError`.
"""
height, width = self._default_size((height, width), (20, 78))
with _OSErrorHandling():
# There is currently no 'encoding' parameter in
# tempfile.NamedTemporaryFile(), and the encoding for text files
# defaults to ASCII in Python 2 → let's use binary mode.
tmpfile = tempfile.NamedTemporaryFile(
mode="wb", prefix="pythondialog.tmp", delete=False)
try:
with tmpfile as f:
f.write(text.encode(locale.getpreferredencoding(False)))
# The temporary file is now closed. According to the tempfile
# module documentation, this is necessary if we want to be able
# to reopen it reliably regardless of the platform.
# Ask for an empty title unless otherwise specified
if kwargs.get("title", None) is None:
kwargs["title"] = ""
return self._widget_with_no_output(
"textbox",
["--textbox", tmpfile.name, unicode(height), unicode(width)],
kwargs)
finally:
# The test should always succeed, but I prefer being on the
# safe side.
if os.path.exists(tmpfile.name):
os.unlink(tmpfile.name)
@widget
@retval_is_code
def tailbox(self, filepath, height=None, width=None, **kwargs):
"""Display the contents of a file in a dialog box, as with ``tail -f``.
:param str filepath: path to a file, the contents of which is to
be displayed in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:return: a :term:`Dialog exit code`
:rtype: str
Display the contents of the file specified with *filepath*,
updating the dialog box whenever the file grows, as with the
``tail -f`` command.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=20, width=60``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
height, width = self._default_size((height, width), (20, 60))
return self._widget_with_no_output(
"tailbox",
["--tailbox", filepath, unicode(height), unicode(width)],
kwargs)
# No tailboxbg widget, at least for now.
@widget
@retval_is_code
def textbox(self, filepath, height=None, width=None, **kwargs):
"""Display the contents of a file in a dialog box.
:param str filepath: path to a file, the contents of which is to
be displayed in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:return: a :term:`Dialog exit code`
:rtype: str
A :meth:`!textbox` lets you display the contents of a text file
in a dialog box. It is like a simple text file viewer. The user
can move through the file using the :kbd:`Up` and :kbd:`Down`
arrow keys, :kbd:`Page Up` and :kbd:`Page Down` as well as the
:kbd:`Home` and :kbd:`End` keys available on most keyboards. If
the lines are too long to be displayed in the box, the
:kbd:`Left` and :kbd:`Right` arrow keys can be used to scroll
the text region horizontally. For more convenience, forward and
backward search functions are also provided.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=20, width=60``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
height, width = self._default_size((height, width), (20, 60))
# This is for backward compatibility... not that it is
# stupid, but I prefer explicit programming.
if kwargs.get("title", None) is None:
kwargs["title"] = filepath
return self._widget_with_no_output(
"textbox",
["--textbox", filepath, unicode(height), unicode(width)],
kwargs)
def _timebox_parse_time(self, time_str):
try:
mo = _timebox_time_cre.match(time_str)
except re.error, e:
raise PythonDialogReModuleError(unicode(e))
if not mo:
raise UnexpectedDialogOutput(
"the dialog-like program returned the following "
"unexpected output (a time string was expected) with the "
"--timebox option: {0!r}".format(time_str))
return [ int(s) for s in mo.group("hour", "minute", "second") ]
@widget
def timebox(self, text, height=None, width=None, hour=-1, minute=-1,
second=-1, **kwargs):
"""Display a time dialog box.
:param str text: text to display in the box
:param height: height of the box
:type height: int or ``None``
:param int width: width of the box
:type width: int or ``None``
:param int hour: inititial hour selected
:param int minute: inititial minute selected
:param int second: inititial second selected
:return: a tuple of the form :samp:`({code}, {time})` where:
- *code* is a :term:`Dialog exit code`;
- *time* is a list of the form :samp:`[{hour}, {minute},
{second}]`, where *hour*, *minute* and *second* are integers
corresponding to the time chosen by the user.
:rtype: tuple
:meth:`timebox` is a dialog box which allows one to select an
hour, minute and second. If any of the values for *hour*,
*minute* and *second* is negative, the current time's
corresponding value is used. You can increment or decrement any
of those using the :kbd:`Left`, :kbd:`Up`, :kbd:`Right` and
:kbd:`Down` arrows. Use :kbd:`Tab` or :kbd:`Backtab` to move
between windows.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=3, width=30``.
Notable exceptions:
- any exception raised by :meth:`Dialog._perform`
- :exc:`PythonDialogReModuleError`
- :exc:`UnexpectedDialogOutput`
"""
height, width = self._default_size((height, width), (3, 30))
(code, output) = self._perform(
["--timebox", text, unicode(height), unicode(width),
unicode(hour), unicode(minute), unicode(second)],
**kwargs)
if code == self.HELP:
help_data = self._parse_help(output, kwargs, raw_format=True)
# The help output does not depend on whether --help-status was
# passed (dialog 1.2-20130902).
return (code, self._timebox_parse_time(help_data))
elif code in (self.OK, self.EXTRA):
return (code, self._timebox_parse_time(output))
else:
return (code, None)
@widget
def treeview(self, text, height=0, width=0, list_height=0,
nodes=[], **kwargs):
"""Display a treeview box.
:param str text: text to display at the top of the box
:param int height: height of the box
:param int width: width of the box
:param int list_height:
number of lines reserved for the main part of the box,
where the tree is displayed
:param nodes:
an iterable of :samp:`({tag}, {item}, {status}, {depth})` tuples
describing nodes, where:
- *tag* is used to indicate which node was selected by
the user on exit;
- *item* is the text displayed for the node;
- *status* specifies the initial selected/unselected
state of each entry; can be ``True`` or ``False``,
``1`` or ``0``, ``"on"`` or ``"off"`` (``True``, ``1``
and ``"on"`` meaning selected), or any case variation
of these two strings;
- *depth* is a non-negative integer indicating the depth
of the node in the tree (``0`` for the root node).
:return: a tuple of the form :samp:`({code}, {tag})` where:
- *code* is a :term:`Dialog exit code`;
- *tag* is the tag of the selected node.
Display nodes organized in a tree structure. Each node has a
*tag*, an *item* text, a selected *status*, and a *depth* in
the tree. Only the *item* texts are displayed in the widget;
*tag*\s are only used for the return value. Only one node can
be selected at a given time, as for the :meth:`radiolist`
widget.
This widget requires :program:`dialog` >= 1.2-20121230.
Notable exceptions:
any exception raised by :meth:`Dialog._perform` or :func:`_to_onoff`
.. versionadded:: 2.14
"""
self._dialog_version_check("1.2-20121230", "the treeview widget")
cmd = ["--treeview", text, unicode(height), unicode(width), unicode(list_height)]
nselected = 0
for i, t in enumerate(nodes):
if not isinstance(t[3], int):
raise BadPythonDialogUsage(
"fourth element of node {0} not an int: {1!r}".format(
i, t[3]))
status = _to_onoff(t[2])
if status == "on":
nselected += 1
cmd.extend([ t[0], t[1], status, unicode(t[3]) ] + list(t[4:]))
if nselected != 1:
raise BadPythonDialogUsage(
"exactly one node must be selected, not {0}".format(nselected))
(code, output) = self._perform(cmd, **kwargs)
if code == self.HELP:
help_data = self._parse_help(output, kwargs)
if self._help_status_on(kwargs):
help_id, selected_tag = help_data
# Reconstruct 'nodes' with the selected item inferred from
# 'selected_tag'.
updated_nodes = []
for elt in nodes:
tag, item, status = elt[:3]
rest = elt[3:]
updated_nodes.append([ tag, item, tag == selected_tag ]
+ list(rest))
return (code, (help_id, selected_tag, updated_nodes))
else:
return (code, help_data)
elif code in (self.OK, self.EXTRA):
return (code, output)
else:
return (code, None)
@widget
@retval_is_code
def yesno(self, text, height=None, width=None, **kwargs):
"""Display a yes/no dialog box.
:param str text: text to display in the box
:param height: height of the box
:type height: int or ``None``
:param width: width of the box
:type width: int or ``None``
:return: a :term:`Dialog exit code`
:rtype: str
Display a dialog box containing *text* and two buttons labelled
:guilabel:`Yes` and :guilabel:`No` by default.
The box size is *height* rows by *width* columns. If *text* is
too long to fit in one line, it will be automatically divided
into multiple lines at appropriate places. *text* may also
contain the substring ``"\\n"`` or newline characters to control
line breaking explicitly.
This :meth:`!yesno` dialog box is useful for asking questions
that require the user to answer either "yes" or "no". These are
the default button labels, however they can be freely set with
the ``yes_label`` and ``no_label`` keyword arguments. The user
can switch between the buttons by pressing the :kbd:`Tab` key.
Default values for the size parameters when the
:ref:`autowidgetsize <autowidgetsize>` option is disabled:
``height=10, width=30``.
Notable exceptions:
any exception raised by :meth:`Dialog._perform`
"""
height, width = self._default_size((height, width), (10, 30))
return self._widget_with_no_output(
"yesno",
["--yesno", text, unicode(height), unicode(width)],
kwargs)
|
skalkoto/pythondialog
|
dialog.py
|
Python
|
lgpl-2.1
| 150,922
|
[
"VisIt"
] |
3391ea81d69fc5e6b5af8d773f7b8f8b345af83ebcfb49798e96f0931a237d35
|
import argparse
import os
import subprocess
from pocket_api import Pocket, PocketException
from pocket import refresh_list
from workflow import Workflow
import config
WF = Workflow()
POCKET_URL = 'http://getpocket.com/a/read/%s'
def execute():
args = parse_args(WF.args)
if args.query is None:
print "No argument provided"
return 0
url = args.query
if args.visit_archive:
subprocess.call(['open', url])
refresh_list()
print archive_item(url)
elif args.archive:
refresh_list()
print archive_item(url)
open_alfred()
elif args.favorite:
refresh_list()
print favorite_item(url)
open_alfred()
elif args.delete:
refresh_list()
print delete_item(url)
open_alfred()
elif args.website:
subprocess.call(['open', POCKET_URL % get_id(url)])
else:
print "An error occured"
def get_id(url):
links = WF.cached_data('pocket_list', max_age=0)
if links is None:
return None
for link in links.values():
if url == link['given_url']:
return link['item_id']
return None
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--visit-and-archive', dest='visit_archive',
action='store_true', default=None)
parser.add_argument('--archive', dest='archive', action='store_true',
default=None)
parser.add_argument('--favorite', dest='favorite', action='store_true',
default=None)
parser.add_argument('--delete', dest='delete', action='store_true',
default=None)
parser.add_argument('--website', dest='website', action='store_true',
default=None)
parser.add_argument('query', nargs='?', default=None)
return parser.parse_args(args)
def archive_item(url):
item_id = get_id(url)
if not item_id:
return '"item_id" not found'
access_token = WF.get_password('pocket_access_token')
pocket_instance = Pocket(config.CONSUMER_KEY, access_token)
try:
pocket_instance.archive(item_id, wait=False)
remove_from_cache(item_id)
return 'Link archived'
except PocketException:
return 'Connection error'
def favorite_item(url):
item_id = get_id(url)
if not item_id:
return '"item_id" not found'
access_token = WF.get_password('pocket_access_token')
pocket_instance = Pocket(config.CONSUMER_KEY, access_token)
try:
pocket_instance.favorite(item_id, wait=False)
return 'Link favorited'
except PocketException:
return 'Connection error'
def delete_item(url):
item_id = get_id(url)
if not item_id:
return '"item_id" not found'
access_token = WF.get_password('pocket_access_token')
pocket_instance = Pocket(config.CONSUMER_KEY, access_token)
try:
pocket_instance.delete(item_id, wait=False)
remove_from_cache(item_id)
return 'Link deleted'
except PocketException:
return 'Connection error'
def remove_from_cache(item_id):
# remove entry in cache
links = WF.cached_data('pocket_list', max_age=0)
if type(links) is dict and item_id in links:
del links[item_id]
WF.cache_data('pocket_list', links)
def open_alfred():
os.system("osascript -e 'tell application \"Alfred 3\" to run trigger "
"\"open\" in workflow \"com.fniephaus.pocket\"'")
if __name__ == '__main__':
execute()
|
danielma/dotfiles
|
Alfred/Alfred.alfredpreferences/workflows/user.workflow.8EEBA440-B5C0-4FCB-B302-C69767A65CB6/pocket_launcher.py
|
Python
|
mit
| 3,561
|
[
"VisIt"
] |
75f99eb4fa367d35d5dfaa866896bd2db8f1d94c7385fd9ed0209cbacac5f3cd
|
'''Transform operation on globally scoped symbols to
operations on symbol_cell mapping.
'''
from __future__ import absolute_import
from __future__ import with_statement
from ..runtime.symbol import get_symbol_cells_map, gensym
from ..compiler import ir as I
from ..compiler import bind
from ..compiler.walk import IRWalker, propigate_location
from ..compiler.translate import state as translation_state
class GlobalSymbolTransformer(IRWalker):
def __init__(self, symbol_map_sym, top_scope):
IRWalker.__init__(self)
self.symbol_map_sym = symbol_map_sym
self.current_scope = top_scope
@staticmethod
def is_global(binding):
return bind.get_binding_use_type(binding) == bind.BND_GLOBAL
@staticmethod
def replace(old, new, skips=[]):
propigate_location(old, new, skips)
I.replace_child(old, new)
def visit_function(self, func):
for child in func.defaults:
self.visit(child)
old_scope = self.current_scope
self.current_scope = func.scope
self.visit(func.body)
self.current_scope = old_scope
def make_read_map(self):
return I.make_read_binding(self.current_scope.use_symbol(self.symbol_map_sym))
def visit_read_binding(self, rb):
if not self.is_global(rb.binding):
return
self.replace(rb, I.make_getitem(self.make_read_map(),
I.make_constant(rb.binding.symbol)))
def make_set(self, binding, value_ir):
return I.make_setitem(self.make_read_map(),
I.make_constant(binding.symbol),
value_ir)
def visit_write_binding(self, wb):
value = wb.value
if self.is_global(wb.binding):
del value.continuation
self.replace(wb, self.make_set(wb.binding, value),
skips=[value])
self.visit(value)
def visit_delete_binding(self, db):
if not self.is_global(db.binding):
return
self.replace(db, I.make_delitem(self.make_read_map(),
I.make_constant(db.binding.symbol)))
def visit_foriter(self, fi):
itr = fi.iter
if self.is_global(fi.binding):
old_binding = fi.binding
del fi.binding
sym = gensym('foriter-tmp')
self.current_scope.register_local(sym)
del itr.continuation
self.replace(fi, I.make_progn([
I.make_foriter(tag=fi.tag,
binding=self.current_scope.use_symbol(sym),
iter=itr),
self.make_set(old_binding, I.make_read_binding(self.current_scope.use_symbol(sym)))
]),
skips=[itr])
del fi.tag
self.visit(itr)
def visit_unpack_seq(self, us):
new_bindings = []
copies = []
for binding in us.places:
if not self.is_global(binding):
new_bindings.append(binding)
else:
gs = gensym('unpack-tmp')
new_bindings.append(self.current_scope.register_and_use_local(gs))
copies.append([gs, binding])
seq = us.seq
if copies:
del seq.continuation
del us.places
self.replace(us, I.make_progn([
I.make_unpack_seq(seq, new_bindings)
] + [self.make_set(binding, I.make_read_binding(self.current_scope.use_symbol(gs)))
for gs,binding in copies]),
skips=[seq])
self.visit(seq)
def transform_global_symbol_use(top):
assert isinstance(top, I.toplevel)
top_scope = top.scope
assert not top_scope.parent
symbol_map_sym = gensym('symbol-cells-map')
symbol_map_binding = top_scope.register_local(symbol_map_sym)
GlobalSymbolTransformer(symbol_map_sym, top_scope).visit(top.expression)
if not len(symbol_map_binding.uses):
top_scope.unregister_binding(symbol_map_binding)
return top
expression = top.expression
del expression.continuation
when = None
if isinstance(expression, I.evalwhen):
when = expression.when
expression = expression.expression
del expression.continuation
new_ir = I.make_progn([I.make_write_binding(
top_scope.use_symbol(symbol_map_sym),
I.make_call(callee=I.make_constant(get_symbol_cells_map),
args=[], kwd_names=[], kwd_values=[],
star_args=None, star_kwds=None)),
expression])
if when is not None:
new_ir = I.make_evalwhen(when=when, expression=new_ir)
new_top = I.make_toplevel(new_ir, top_scope)
propigate_location(top, new_top, [expression])
return new_top
|
matthagy/Jamenson
|
jamenson/transform/globals.py
|
Python
|
apache-2.0
| 4,950
|
[
"VisIt"
] |
3c661e5ebfc4a00ba36f20f3671a81b0c8a2f475eb1f9d3ae6b7d13a0c21e823
|
'''
Simple catalog
'''
import numpy as np
from hvs import HVSsample, Contigiani2018
'''
Create ejection catalog
'''
# Initialize an ejection model, in this case the default Contigiani2018 with a minor namne customization
ejectionmodel = Contigiani2018(name_modifier='TEST') # The name will appear in the final catalog
print ejectionmodel._name
# Print the allowed ranges of HVS mass and initial velocity for this model -- these variables can be changed
print ejectionmodel.v_range
print ejectionmodel.m_range
# Create a sample of n HVSs
mysample = HVSsample(ejectionmodel, name='My test sample', n=1e5, verbose=True)
# Save it for later!
mysample.save('myfirstcatalog.fits')
'''
Propagate it through the Galaxy
'''
# Take the default MW galactic potential
from hvs.utils.mwpotential import MWPotential
from astropy import units as u
default_potential = MWPotential() # This potential can be personalized, check the documentation using help()
mysample.propagate(potential = default_potential, dt=1*u.Myr, threshold = 1e-7) # See documentation
mysample.save('myfirstcatalog_propagated.fits')
|
contigiani/hvs
|
examples/myfirstcatalog.py
|
Python
|
gpl-3.0
| 1,114
|
[
"Galaxy"
] |
d2f6c6a394625d254d309967106b2474a0d64b33efbd2e972aea6b9b623421d7
|
import numpy as np
### Defining the single-peak Gaussian function
def Single_Gaussian(params, x):
x0 = params[0]
intensity = params[1]
FWHM = params[2]
bkgnd = params[3]
return float(intensity)*np.exp(- 0.5*((x0-x)/(FWHM/2.355))**2) + bkgnd
def Single_Gaussian_integrand(x, x0, intensity, FWHM, bkgnd):
return float(intensity)*np.exp(- 0.5*((x0-x)/(FWHM/2.355))**2) #+ bkgnd
### Defining the double-peak Gaussian function
def Double_Gaussian(params, x):
x1 = params[0]
intensity1 = params[1]
FWHM1 = params[2]
bkgnd = params[3]
x2 = params[4]
intensity2 = params[5]
FWHM2 = params[6]
return float(intensity1)*np.exp(- 0.5*((x1-x)/(FWHM1/2.355))**2) + float(intensity2)*np.exp(- 0.5*((x2-x)/(FWHM2/2.355))**2) + bkgnd
### Defining the single-peak Crystalball function
def Crystalball(x_array, x0, sigma, alpha, n):
x = (x_array-x0)/sigma*np.sign(alpha)
def bigger(x):
return np.exp(-0.5 * x*x)
def smaller(x):
alph = np.abs(alpha)
b = n / np.abs(alpha) - np.abs(alpha)
a = ((n / alph)**n) * np.exp(-0.5*alph*alph)
return a / (b - x) ** n
y = np.piecewise(x, x >= -np.abs(alpha), [bigger, smaller])
return y
def Single_Crystalball(params, x_array):
x0 = params[0]
N = params[1]
sigma = params[2]
bkgnd = params[3]
alpha = params[4]
n = params[5]
y = N * Crystalball(x_array, x0, sigma, alpha, n) + bkgnd
return y
def Single_Crystalball_integrand(x, x0, N, sigma, alpha, n, bkgnd):
t = (x-x0)/sigma
if (alpha < 0):
t = -t
if (t >= -abs(alpha)):
y = np.exp(-0.5*t*t)
else:
a = ((n/abs(alpha))**n)*np.exp(-0.5*abs(alpha)*abs(alpha))
b = n/abs(alpha) - abs(alpha)
y = a/(b - t)**n
return N*y
### Defining the double-peak Crystalball function
def Double_Crystalball(params, x_array):
x1 = params[0]
N1 = params[1]
sigma1 = params[2]
bkgnd = params[3]
x2 = params[4]
N2 = params[5]
sigma2 = params[6]
alpha = params[7]
n = params[8]
y1 = N1 * Crystalball(x_array, x1, sigma1, alpha, n)
y2 = N2 * Crystalball(x_array, x2, sigma2, alpha, n)
y = y1 + y2 + bkgnd
return y
|
karamarielynch/dss-df
|
lineshape.py
|
Python
|
mit
| 2,359
|
[
"Gaussian"
] |
1d4cafc5787206bc96caf41380509d8c8998bd4aba9f2ce4c0625368d5bf6220
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
class Lbann(CMakePackage, CudaPackage):
"""LBANN: Livermore Big Artificial Neural Network Toolkit. A distributed
memory, HPC-optimized, model and data parallel training toolkit for deep
neural networks."""
homepage = "http://software.llnl.gov/lbann/"
url = "https://github.com/LLNL/lbann/archive/v0.91.tar.gz"
git = "https://github.com/LLNL/lbann.git"
maintainers = ['bvanessen']
version('develop', branch='develop')
version('0.100', sha256='d1bab4fb6f1b80ae83a7286cc536a32830890f6e5b0c3107a17c2600d0796912')
version('0.99', sha256='3358d44f1bc894321ce07d733afdf6cb7de39c33e3852d73c9f31f530175b7cd')
version('0.98.1', sha256='9a2da8f41cd8bf17d1845edf9de6d60f781204ebd37bffba96d8872036c10c66')
version('0.98', sha256='8d64b9ac0f1d60db553efa4e657f5ea87e790afe65336117267e9c7ae6f68239')
version('0.97.1', sha256='2f2756126ac8bb993202cf532d72c4d4044e877f4d52de9fdf70d0babd500ce4')
version('0.97', sha256='9794a706fc7ac151926231efdf74564c39fbaa99edca4acb745ee7d20c32dae7')
version('0.96', sha256='97af78e9d3c405e963361d0db96ee5425ee0766fa52b43c75b8a5670d48e4b4a')
version('0.95', sha256='d310b986948b5ee2bedec36383a7fe79403721c8dc2663a280676b4e431f83c2')
version('0.94', sha256='567e99b488ebe6294933c98a212281bffd5220fc13a0a5cd8441f9a3761ceccf')
version('0.93', sha256='77bfd7fe52ee7495050f49bcdd0e353ba1730e3ad15042c678faa5eeed55fb8c')
version('0.92', sha256='9187c5bcbc562c2828fe619d53884ab80afb1bcd627a817edb935b80affe7b84')
version('0.91', sha256='b69f470829f434f266119a33695592f74802cff4b76b37022db00ab32de322f5')
variant('nccl', default=False, description='Builds with support for NCCL communication lib')
variant('opencv', default=True, description='Builds with support for image processing routines with OpenCV')
variant('seq_init', default=False, description='Force serial initialization of weight matrices.')
variant('dtype', default='float',
description='Type for floating point representation of weights',
values=('float', 'double'))
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
variant('al', default=True, description='Builds with support for Aluminum Library')
variant('conduit', default=True,
description='Builds with support for Conduit Library '
'(note that for v0.99 conduit is required)')
variant('vtune', default=False, description='Builds with support for Intel VTune')
variant('docs', default=False, description='Builds with support for building documentation')
variant('extras', default=False, description='Add python modules for LBANN related tools')
conflicts('@:0.90,0.99:', when='~conduit')
depends_on('cmake@3.16.0:', type='build')
# It seems that there is a need for one statement per version bounds
depends_on('hydrogen +openmp_blas +shared +int64', when='@:0.90,0.95: ~al')
depends_on('hydrogen +openmp_blas +shared +int64 +al', when='@:0.90,0.95: +al')
depends_on('hydrogen +openmp_blas +shared +int64 build_type=Debug',
when='build_type=Debug @:0.90,0.95: ~al')
depends_on('hydrogen +openmp_blas +shared +int64 build_type=Debug +al',
when='build_type=Debug @:0.90,0.95: +al')
depends_on('hydrogen +openmp_blas +shared +int64 +cuda',
when='+gpu @:0.90,0.95: ~al')
depends_on('hydrogen +openmp_blas +shared +int64 +cuda +al',
when='+gpu @:0.90,0.95: +al')
depends_on('hydrogen +openmp_blas +shared +int64 +cuda build_type=Debug',
when='build_type=Debug @:0.90,0.95: +gpu')
depends_on('hydrogen +openmp_blas +shared +int64 +cuda build_type=Debug +al',
when='build_type=Debug @:0.90,0.95: +gpu +al')
# Older versions depended on Elemental not Hydrogen
depends_on('elemental +openmp_blas +shared +int64', when='@0.91:0.94')
depends_on('elemental +openmp_blas +shared +int64 build_type=Debug',
when='build_type=Debug @0.91:0.94')
depends_on('aluminum', when='@:0.90,0.95: +al ~gpu')
depends_on('aluminum +cuda +ht', when='@:0.90,0.95: +al +cuda ~nccl')
depends_on('aluminum +cuda +nccl +ht', when='@:0.90,0.95: +al +cuda +nccl')
depends_on('cudnn', when='+cuda')
depends_on('cub', when='@0.94:0.98.2 +cuda')
depends_on('mpi')
depends_on('hwloc')
# LBANN wraps OpenCV calls in OpenMP parallel loops, build without OpenMP
# Additionally disable video related options, they incorrectly link in a
# bad OpenMP library when building with clang or Intel compilers
# Note that for Power systems we want the environment to add +powerpc +vsx
depends_on('opencv@3.2.0: +core +highgui +imgproc +jpeg +png +tiff +zlib '
'+fast-math ~calib3d ~cuda ~dnn ~eigen'
'~features2d ~flann ~gtk ~ipp ~ipp_iw ~jasper ~java ~lapack ~ml'
'~openmp ~opencl ~opencl_svm ~openclamdblas ~openclamdfft'
'~pthreads_pf ~python ~qt ~stitching ~superres ~ts ~video'
'~videostab ~videoio ~vtk', when='+opencv')
depends_on('cnpy')
depends_on('nccl', when='@0.94:0.98.2 +cuda +nccl')
depends_on('conduit@0.4.0: +hdf5', when='@0.94:0.99 +conduit')
depends_on('conduit@0.4.0: +hdf5', when='@:0.90,0.99:')
depends_on('python@3: +shared', type=('build', 'run'), when='@:0.90,0.99:')
extends("python")
depends_on('py-setuptools', type='build')
depends_on('py-argparse', type='run', when='@:0.90,0.99: ^python@:2.6')
depends_on('py-configparser', type='run', when='@:0.90,0.99: +extras')
depends_on('py-graphviz@0.10.1:', type='run', when='@:0.90,0.99: +extras')
depends_on('py-matplotlib@3.0.0:', type='run', when='@:0.90,0.99: +extras')
depends_on('py-numpy@1.16.0:', type=('build', 'run'), when='@:0.90,0.99: +extras')
depends_on('py-onnx@1.3.0:', type='run', when='@:0.90,0.99: +extras')
depends_on('py-pandas@0.24.1:', type='run', when='@:0.90,0.99: +extras')
depends_on('py-texttable@1.4.0:', type='run', when='@:0.90,0.99: +extras')
depends_on('py-pytest', type='test', when='@:0.90,0.99:')
depends_on('py-protobuf+cpp@3.6.1:', type=('build', 'run'), when='@:0.90,0.99:')
depends_on('py-breathe', type='build', when='+docs')
depends_on('doxygen', type='build', when='+docs')
depends_on('py-m2r', type='build', when='+docs')
depends_on('cereal')
depends_on('catch2', type='test')
depends_on('clara')
generator = 'Ninja'
depends_on('ninja', type='build')
@property
def common_config_args(self):
spec = self.spec
# Environment variables
cppflags = []
cppflags.append('-DLBANN_SET_EL_RNG -ldl')
return [
'-DCMAKE_CXX_FLAGS=%s' % ' '.join(cppflags),
'-DLBANN_VERSION=spack',
'-DCNPY_DIR={0}'.format(spec['cnpy'].prefix),
]
# Get any recent versions or non-numeric version
# Note that develop > numeric and non-develop < numeric
@when('@:0.90,0.94:')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DLBANN_WITH_TOPO_AWARE:BOOL=%s' % ('+cuda +nccl' in spec),
'-DLBANN_WITH_ALUMINUM:BOOL=%s' % ('+al' in spec),
'-DLBANN_WITH_CONDUIT:BOOL=%s' % ('+conduit' in spec),
'-DLBANN_WITH_CUDA:BOOL=%s' % ('+cuda' in spec),
'-DLBANN_WITH_CUDNN:BOOL=%s' % ('+cuda' in spec),
'-DLBANN_WITH_SOFTMAX_CUDA:BOOL=%s' % ('+cuda' in spec),
'-DLBANN_SEQUENTIAL_INITIALIZATION:BOOL=%s' %
('+seq_init' in spec),
'-DLBANN_WITH_TBINF=OFF',
'-DLBANN_WITH_VTUNE:BOOL=%s' % ('+vtune' in spec),
'-DLBANN_DATATYPE={0}'.format(spec.variants['dtype'].value),
'-DLBANN_VERBOSE=0',
'-DCEREAL_DIR={0}'.format(spec['cereal'].prefix),
# protobuf is included by py-protobuf+cpp
'-DProtobuf_DIR={0}'.format(spec['protobuf'].prefix)])
if spec.satisfies('@:0.90') or spec.satisfies('@0.95:'):
args.extend([
'-DHydrogen_DIR={0}/CMake/hydrogen'.format(
spec['hydrogen'].prefix)])
elif spec.satisfies('@0.94'):
args.extend([
'-DElemental_DIR={0}/CMake/elemental'.format(
spec['elemental'].prefix)])
if spec.satisfies('@0.94:0.98.2'):
args.extend(['-DLBANN_WITH_NCCL:BOOL=%s' %
('+cuda +nccl' in spec)])
if '+vtune' in spec:
args.extend(['-DVTUNE_DIR={0}'.format(spec['vtune'].prefix)])
if '+al' in spec:
args.extend(['-DAluminum_DIR={0}'.format(spec['aluminum'].prefix)])
if '+conduit' in spec:
args.extend([
'-DLBANN_CONDUIT_DIR={0}'.format(spec['conduit'].prefix),
'-DConduit_DIR={0}'.format(spec['conduit'].prefix)])
# Add support for OpenMP
if spec.satisfies('%clang') or spec.satisfies('%apple-clang'):
if sys.platform == 'darwin':
clang = self.compiler.cc
clang_bin = os.path.dirname(clang)
clang_root = os.path.dirname(clang_bin)
args.extend([
'-DOpenMP_CXX_FLAGS=-fopenmp=libomp',
'-DOpenMP_CXX_LIB_NAMES=libomp',
'-DOpenMP_libomp_LIBRARY={0}/lib/libomp.dylib'.format(
clang_root)])
if '+opencv' in spec:
args.extend(['-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix)])
if '+cuda' in spec:
args.extend([
'-DCUDA_TOOLKIT_ROOT_DIR={0}'.format(
spec['cuda'].prefix)])
args.extend([
'-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix)])
if spec.satisfies('@0.94:0.98.2'):
args.extend(['-DCUB_DIR={0}'.format(
spec['cub'].prefix)])
if '+nccl' in spec:
args.extend([
'-DNCCL_DIR={0}'.format(
spec['nccl'].prefix)])
return args
@when('@0.91:0.93')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DWITH_CUDA:BOOL=%s' % ('+cuda' in spec),
'-DWITH_CUDNN:BOOL=%s' % ('+cuda' in spec),
'-DELEMENTAL_USE_CUBLAS:BOOL=%s' % (
'+cublas' in spec['elemental']),
'-DWITH_TBINF=OFF',
'-DWITH_VTUNE=OFF',
'-DElemental_DIR={0}'.format(spec['elemental'].prefix),
'-DELEMENTAL_MATH_LIBS={0}'.format(
spec['elemental'].libs),
'-DSEQ_INIT:BOOL=%s' % ('+seq_init' in spec),
'-DVERBOSE=0',
'-DLBANN_HOME=.'])
if spec.variants['dtype'].value == 'float':
args.extend(['-DDATATYPE=4'])
elif spec.variants['dtype'].value == 'double':
args.extend(['-DDATATYPE=8'])
if '+opencv' in spec:
args.extend(['-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix)])
if '+cudnn' in spec:
args.extend(['-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix)])
if '+cub' in spec:
args.extend(['-DCUB_DIR={0}'.format(
spec['cub'].prefix)])
return args
|
rspavel/spack
|
var/spack/repos/builtin/packages/lbann/package.py
|
Python
|
lgpl-2.1
| 11,820
|
[
"VTK"
] |
ced4f315c9294ff08559504d86fa48c01281aa1358c709bd8d72fc984713c0ee
|
""" GOCDBClient module is a client for the GOC DB, looking for Downtimes.
"""
__RCSID__ = "$Id$"
import urllib2
import time
import socket
from datetime import datetime, timedelta
from xml.dom import minidom
from DIRAC import S_OK, S_ERROR, gLogger
def _parseSingleElement( element, attributes = None ):
"""
Given a DOM Element, return a dictionary of its
child elements and values (as strings).
"""
handler = {}
for child in element.childNodes:
attrName = str( child.nodeName )
if attributes is not None:
if attrName not in attributes:
continue
try:
attrValue = str( child.childNodes[0].nodeValue )
except IndexError:
continue
handler[attrName] = attrValue
return handler
#############################################################################
class GOCDBClient( object ):
""" Class for dealing with GOCDB. Class because of easier use from RSS
"""
#############################################################################
def getStatus( self, granularity, name = None, startDate = None,
startingInHours = None, timeout = None ):
"""
Return actual GOCDB status of entity in `name`
:params:
:attr:`granularity`: string: should be a ValidRes, e.g. "Resource"
:attr:`name`: should be the name(s) of the ValidRes.
Could be a list of basestring or simply one basestring.
If not given, fetches the complete list.
:attr:`startDate`: if not given, takes only ongoing DownTimes.
if given, could be a datetime or a string ("YYYY-MM-DD"), and download
DownTimes starting after that date.
:attr:`startingInHours`: optional integer. If given, donwload
DownTimes starting in the next given hours (startDate is then useless)
:return: (example)
{'OK': True,
'Value': {'92569G0': {'DESCRIPTION': 'Annual site downtime for various major tasks in the area of network, storage, etc.',
'FORMATED_END_DATE': '2014-05-27 15:21',
'FORMATED_START_DATE': '2014-05-26 04:00',
'GOCDB_PORTAL_URL': 'https://goc.egi.eu/portal/index.php?Page_Type=Downtime&id=14051',
'HOSTED_BY': 'FZK-LCG2',
'HOSTNAME': 'lhcbsrm-kit.gridka.de',
'SERVICE_TYPE': 'SRM.nearline',
'SEVERITY': 'OUTAGE'},
'93293G0': {'DESCRIPTION': 'Maintenance on KIT campus border routers. In the unlikely event that redundancy should fail, FZK-LCG2 connection to the GPN will be down. LHCOPN/LHCONE will stay up.',
'FORMATED_END_DATE': '2014-07-12 14:00',
'FORMATED_START_DATE': '2014-07-12 06:00',
'GOCDB_PORTAL_URL': 'https://goc.egi.eu/portal/index.php?Page_Type=Downtime&id=14771',
'HOSTED_BY': 'FZK-LCG2',
'HOSTNAME': 'lhcbsrm-kit.gridka.de',
'SERVICE_TYPE': 'SRM.nearline',
'SEVERITY': 'WARNING'}
}
}
"""
startDate_STR = None
startDateMax = None
if startingInHours is not None:
startDate = datetime.utcnow()
startDateMax = startDate + timedelta( hours = startingInHours )
if startDate is not None:
if isinstance( startDate, basestring ):
startDate_STR = startDate
startDate = datetime( *time.strptime( startDate, "%Y-%m-%d" )[0:3] )
elif isinstance( startDate, datetime ):
startDate_STR = startDate.isoformat( ' ' )[0:10]
if timeout is not None:
socket.setdefaulttimeout( 10 )
if startingInHours is not None:
# make 2 queries and later merge the results
# first call: pass the startDate argument as None,
# so the curlDownload method will search for only ongoing DTs
resXML_ongoing = self._downTimeCurlDownload( name )
if resXML_ongoing is None:
res_ongoing = {}
else:
res_ongoing = self._downTimeXMLParsing( resXML_ongoing, granularity, name )
# second call: pass the startDate argument
resXML_startDate = self._downTimeCurlDownload( name, startDate_STR )
if resXML_startDate is None:
res_startDate = {}
else:
res_startDate = self._downTimeXMLParsing( resXML_startDate, granularity,
name, startDateMax )
# merge the results of the 2 queries:
res = res_ongoing
for k in res_startDate.keys():
if k not in res.keys():
res[k] = res_startDate[k]
else:
#just query for onGoing downtimes
resXML = self._downTimeCurlDownload( name, startDate_STR )
if resXML is None:
return S_OK( None )
res = self._downTimeXMLParsing( resXML, granularity, name, startDateMax )
# Common: build URL
# if res is None or res == []:
# return S_OK(None)
#
# self.buildURL(res)
if res == {}:
res = None
return S_OK( res )
#############################################################################
def getServiceEndpointInfo( self, granularity, entity ):
"""
Get service endpoint info (in a dictionary)
:params:
:attr:`granularity` : a string. Could be in ('hostname', 'sitename', 'roc',
'country', 'service_type', 'monitored')
:attr:`entity` : a string. Actual name of the entity.
"""
assert( type( granularity ) == str and type( entity ) == str )
try:
serviceXML = self._getServiceEndpointCurlDownload( granularity, entity )
return S_OK( self._serviceEndpointXMLParsing( serviceXML ) )
except Exception, e:
_msg = 'Exception getting information for %s %s: %s' % ( granularity, entity, e )
gLogger.exception( _msg )
return S_ERROR( _msg )
#############################################################################
# def getSiteInfo(self, site):
# """
# Get site info (in a dictionary)
#
# :params:
# :attr:`entity` : a string. Actual name of the site.
# """
#
# siteXML = self._getSiteCurlDownload(site)
# return S_OK(self._siteXMLParsing(siteXML))
#############################################################################
# def buildURL(self, DTList):
# '''build the URL relative to the DT '''
# baseURL = "https://goc.egi.eu/downtime/list?id="
# for dt in DTList:
# id = str(dt['id'])
# url = baseURL + id
# dt['URL'] = url
#############################################################################
def _downTimeCurlDownload( self, entity = None, startDate = None ):
""" Download ongoing downtimes for entity using the GOC DB programmatic interface
"""
#GOCDB-PI url and method settings
#
# Set the GOCDB URL
gocdbpi_url = "https://goc.egi.eu/gocdbpi_v4/public/?method=get_downtime"
# Set the desidered start date
if startDate is None:
when = "&ongoing_only=yes"
gocdbpi_startDate = ""
else:
when = "&startdate="
gocdbpi_startDate = startDate
# GOCDB-PI to query
gocdb_ep = gocdbpi_url
if entity is not None:
if isinstance( entity, basestring ):
gocdb_ep = gocdb_ep + "&topentity=" + entity
gocdb_ep = gocdb_ep + when + gocdbpi_startDate
req = urllib2.Request( gocdb_ep )
dtPage = urllib2.urlopen( req )
dt = dtPage.read()
return dt
#############################################################################
def _getServiceEndpointCurlDownload( self, granularity, entity ):
"""
Calls method `get_service_endpoint` from the GOC DB programmatic interface.
:params:
:attr:`granularity` : a string. Could be in ('hostname', 'sitename', 'roc',
'country', 'service_type', 'monitored')
:attr:`entity` : a string. Actual name of the entity.
"""
if type( granularity ) != str or type( entity ) != str:
raise ValueError, "Arguments must be strings."
# GOCDB-PI query
gocdb_ep = "https://goc.egi.eu/gocdbpi_v4/public/?method=get_service_endpoint&" \
+ granularity + '=' + entity
service_endpoint_page = urllib2.urlopen( gocdb_ep )
return service_endpoint_page.read()
#############################################################################
# def _getSiteCurlDownload(self, site):
# """
# Calls method `get_site` from the GOC DB programmatic interface.
#
# :params:
# :attr:`site` : a string. Actual name of the site.
# """
#
# # GOCDB-PI query
# gocdb_ep = "https://goc.egi.eu/gocdbpi_v4/public/?method=get_site&sitename="+site
#
# req = urllib2.Request(gocdb_ep)
# site_page = urllib2.urlopen(req)
#
# return site_page.read()
#############################################################################
def _downTimeXMLParsing( self, dt, siteOrRes, entities = None, startDateMax = None ):
""" Performs xml parsing from the dt string (returns a dictionary)
"""
doc = minidom.parseString( dt )
downtimeElements = doc.getElementsByTagName( "DOWNTIME" )
dtDict = {}
for dtElement in downtimeElements:
elements = _parseSingleElement( dtElement, ['SEVERITY', 'SITENAME', 'HOSTNAME',
'HOSTED_BY', 'FORMATED_START_DATE',
'FORMATED_END_DATE', 'DESCRIPTION',
'GOCDB_PORTAL_URL', 'SERVICE_TYPE' ] )
dtDict[ str( dtElement.getAttributeNode( "PRIMARY_KEY" ).nodeValue ) ] = elements
for dt_ID in dtDict.keys():
if siteOrRes in ( 'Site', 'Sites' ):
if not ( 'SITENAME' in dtDict[dt_ID].keys() ):
dtDict.pop( dt_ID )
continue
if entities is not None:
if not isinstance( entities, list ):
entities = [entities]
if not ( dtDict[dt_ID]['SITENAME'] in entities ):
dtDict.pop( dt_ID )
elif siteOrRes in ( 'Resource', 'Resources' ):
if not ( 'HOSTNAME' in dtDict[dt_ID].keys() ):
dtDict.pop( dt_ID )
continue
if entities is not None:
if not isinstance( entities, list ):
entities = [entities]
if not ( dtDict[dt_ID]['HOSTNAME'] in entities ):
dtDict.pop( dt_ID )
if startDateMax is not None:
for dt_ID in dtDict.keys():
startDateMaxFromKeys = datetime( *time.strptime( dtDict[dt_ID]['FORMATED_START_DATE'],
"%Y-%m-%d %H:%M" )[0:5] )
if startDateMaxFromKeys > startDateMax:
dtDict.pop( dt_ID )
return dtDict
#############################################################################
def _serviceEndpointXMLParsing( self, serviceXML ):
""" Performs xml parsing from the service endpoint string
Returns a list.
"""
doc = minidom.parseString( serviceXML )
services = doc.getElementsByTagName( "SERVICE_ENDPOINT" )
services = [_parseSingleElement( s ) for s in services]
return services
|
sposs/DIRAC
|
Core/LCG/GOCDBClient.py
|
Python
|
gpl-3.0
| 11,129
|
[
"DIRAC"
] |
320420c1d2c1905d1e5479eba454f29eb00177ae59fd08689e99ddfce6c5fa7e
|
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom speech generator for gnome-panel."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.orca as orca
import orca.speech_generator as speech_generator
_settingsManager = getattr(orca, '_settingsManager')
class SpeechGenerator(speech_generator.SpeechGenerator):
def __init__(self, script):
speech_generator.SpeechGenerator.__init__(self, script)
def _generateName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the name of the object. If the object is directly
displaying any text, that text will be treated as the name.
Otherwise, the accessible name of the object will be used. If
there is no accessible name, then the description of the
object will be used. This method will return an empty array
if nothing can be found.
"""
acss = self.voice(speech_generator.DEFAULT)
role = args.get('role', obj.getRole())
if role == pyatspi.ROLE_FRAME:
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
else:
acss = self.voice(speech_generator.SYSTEM)
result = speech_generator.SpeechGenerator.\
_generateName(self, obj, **args)
if result:
result.extend(acss)
return result
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/gnome-panel/speech_generator.py
|
Python
|
gpl-3.0
| 2,264
|
[
"ORCA"
] |
07d5c2879ff7c55fb900325005067fe4449352301680384da031f145776b9822
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the ExecuteProcess Action."""
import asyncio
import os
import platform
import signal
import sys
from launch import LaunchContext
from launch import LaunchDescription
from launch import LaunchService
from launch.actions import SetLaunchConfiguration
from launch.actions.emit_event import EmitEvent
from launch.actions.execute_process import ExecuteProcess
from launch.actions.opaque_function import OpaqueFunction
from launch.actions.register_event_handler import RegisterEventHandler
from launch.actions.shutdown_action import Shutdown
from launch.actions.timer_action import TimerAction
from launch.event_handlers.on_process_start import OnProcessStart
from launch.events.shutdown import Shutdown as ShutdownEvent
import pytest
@pytest.mark.parametrize('test_input,expected', [
(None, [True, False]),
({'TEST_NEW_ENV': '2'}, [False, True])
])
def test_execute_process_with_env(test_input, expected):
"""Test launching a process with an environment variable."""
os.environ['TEST_CHANGE_CURRENT_ENV'] = '1'
additional_env = {'TEST_PROCESS_WITH_ENV': 'Hello World'}
executable = ExecuteProcess(
cmd=[sys.executable, 'TEST_PROCESS_WITH_ENV'],
output='screen',
env=test_input,
additional_env=additional_env
)
ld = LaunchDescription([executable])
ls = LaunchService()
ls.include_launch_description(ld)
assert 0 == ls.run()
env = executable.process_details['env']
assert env['TEST_PROCESS_WITH_ENV'] == 'Hello World'
assert ('TEST_CHANGE_CURRENT_ENV' in env) is expected[0]
if expected[0]:
assert env['TEST_CHANGE_CURRENT_ENV'] == '1'
assert ('TEST_NEW_ENV' in env) is expected[1]
if expected[1]:
assert env['TEST_NEW_ENV'] == '2'
def test_execute_process_with_on_exit_behavior():
"""Test a process' on_exit callback and actions are processed."""
def on_exit_callback(event, context):
on_exit_callback.called = True
on_exit_callback.called = False
executable_with_on_exit_callback = ExecuteProcess(
cmd=[sys.executable, '-c', "print('callback')"],
output='screen', on_exit=on_exit_callback
)
assert len(executable_with_on_exit_callback.get_sub_entities()) == 0
def on_exit_function(context):
on_exit_function.called = True
on_exit_function.called = False
on_exit_action = OpaqueFunction(function=on_exit_function)
executable_with_on_exit_action = ExecuteProcess(
cmd=[sys.executable, '-c', "print('action')"],
output='screen', on_exit=[on_exit_action]
)
assert executable_with_on_exit_action.get_sub_entities() == [on_exit_action]
ld = LaunchDescription([
executable_with_on_exit_callback,
executable_with_on_exit_action
])
ls = LaunchService()
ls.include_launch_description(ld)
assert 0 == ls.run()
assert on_exit_callback.called
assert on_exit_function.called
def test_execute_process_shutdown():
"""Test shutting down a process in (non)interactive settings."""
def on_exit(event, ctx):
on_exit.returncode = event.returncode
def generate_launch_description():
process_action = ExecuteProcess(
cmd=[sys.executable, '-c', 'import signal; signal.pause()'],
sigterm_timeout='1', # shorten timeouts
on_exit=on_exit
)
# Launch process and emit shutdown event as if
# launch had received a SIGINT
return LaunchDescription([
process_action,
RegisterEventHandler(event_handler=OnProcessStart(
target_action=process_action,
on_start=[
EmitEvent(event=ShutdownEvent(
reason='none',
due_to_sigint=True
))
]
))
])
ls = LaunchService(noninteractive=True)
ls.include_launch_description(generate_launch_description())
assert 0 == ls.run()
if platform.system() != 'Windows':
assert on_exit.returncode == -signal.SIGINT # Got SIGINT
else:
assert on_exit.returncode != 0 # Process terminated
ls = LaunchService() # interactive
ls.include_launch_description(generate_launch_description())
assert 0 == ls.run()
if platform.system() != 'Windows':
# Assume interactive Ctrl+C (i.e. SIGINT to process group)
assert on_exit.returncode == -signal.SIGTERM # Got SIGTERM
else:
assert on_exit.returncode != 0 # Process terminated
def test_execute_process_with_respawn():
"""Test launching a process with a respawn and respawn_delay attribute."""
def on_exit_callback(event, context):
on_exit_callback.called_count = on_exit_callback.called_count + 1
on_exit_callback.called_count = 0
respawn_delay = 2.0
shutdown_time = 3.0 # to shutdown the launch service, so that the process only respawn once
expected_called_count = 2 # normal exit and respawn exit
def generate_launch_description():
return LaunchDescription([
ExecuteProcess(
cmd=[sys.executable, '-c', "print('action')"],
respawn=True, respawn_delay=respawn_delay, on_exit=on_exit_callback
),
TimerAction(
period=shutdown_time,
actions=[
Shutdown(reason='Timer expired')
]
)
])
ls = LaunchService()
ls.include_launch_description(generate_launch_description())
assert 0 == ls.run()
assert expected_called_count == on_exit_callback.called_count
def test_execute_process_prefix_filter_match():
lc = LaunchContext()
lc._set_asyncio_loop(asyncio.get_event_loop())
SetLaunchConfiguration('launch-prefix', 'time').visit(lc)
assert len(lc.launch_configurations) == 1
SetLaunchConfiguration(
'launch-prefix-filter',
f'{os.path.basename(sys.executable)}').visit(lc)
assert len(lc.launch_configurations) == 2
test_process = ExecuteProcess(
cmd=[sys.executable, '-c', "print('action')"],
output='screen'
)
test_process.execute(lc)
assert 'time' in test_process.process_details['cmd']
def test_execute_process_prefix_filter_no_match():
lc = LaunchContext()
lc._set_asyncio_loop(asyncio.get_event_loop())
SetLaunchConfiguration('launch-prefix', 'time').visit(lc)
assert len(lc.launch_configurations) == 1
SetLaunchConfiguration(
'launch-prefix-filter', 'no-match').visit(lc)
assert len(lc.launch_configurations) == 2
test_process = ExecuteProcess(
cmd=[sys.executable, '-c', "print('action')"],
output='screen'
)
test_process.execute(lc)
assert 'time' not in test_process.process_details['cmd']
def test_execute_process_prefix_filter_override_in_launch_file():
lc = LaunchContext()
lc._set_asyncio_loop(asyncio.get_event_loop())
SetLaunchConfiguration('launch-prefix', 'time').visit(lc)
assert len(lc.launch_configurations) == 1
SetLaunchConfiguration(
'launch-prefix-filter', 'no-match').visit(lc)
assert len(lc.launch_configurations) == 2
test_process = ExecuteProcess(
prefix='echo',
cmd=[sys.executable, '-c', "print('action')"],
output='screen'
)
test_process.execute(lc)
assert 'echo' in test_process.process_details['cmd'] and \
'time' not in test_process.process_details['cmd']
|
ros2/launch
|
launch/test/launch/test_execute_process.py
|
Python
|
apache-2.0
| 8,098
|
[
"VisIt"
] |
66ecd44b1fbef167b6dd26ad661bb264a62dca283cc67ebffceb2a95e9d01319
|
import json
import operator
import pytest
from flask import Blueprint, Flask, g, jsonify, request
from flask.views import MethodView
from flask_allows import (
Additional,
Allows,
And,
C,
Not,
Or,
Override,
Permission,
Requirement,
exempt_from_requirements,
guard_entire,
requires,
)
from flask_allows.additional import _additional_ctx_stack
from flask_allows.overrides import _override_ctx_stack
pytestmark = pytest.mark.integration
# This is a whole bunch of setup for the integration tests
# route registrations, user setup, etc
# if you're not interested in this skip to the comment
# THIS IS WHERE THE TESTS BEGIN
class User(object):
def __init__(self, username, userlevel, *permissions):
self.username = username
self.permissions = frozenset(permissions)
self.userlevel = userlevel
def has_permission(self, permission):
return permission in self.permissions
def __repr__(self):
return "User({}, {}, {!r})".format(
self.username, self.userlevel, self.permissions
)
class AuthLevels:
banned = "banned"
guest = "guest"
user = "user"
admin = "admin"
staff = "staff"
users = {
"banned": User("Brian", AuthLevels.banned),
"guest": User("George", AuthLevels.guest, "view"),
"user": User("Ulric", AuthLevels.user, "view", "reply"),
"admin": User("Adam", AuthLevels.admin, "view", "reply", "edit", "ban"),
"staff": User("Seth", AuthLevels.staff, "view", "reply", "edit", "ban", "promote"),
}
app = Flask(__name__)
# explicitly turn these off, act like we're the real thing
app.testing = False
app.debug = False
# register this first so we sandwich the extension's before/after
# wouldn't check this in a real application, but we're trying to
# surface corner cases
@app.after_request
@app.before_request
def ensure_empty_stacks(resp=None):
assert _override_ctx_stack.top is None
assert _additional_ctx_stack.top is None
return resp
allows = Allows(
app=app, identity_loader=lambda: g.user, on_fail=lambda *a, **k: ("nope", 403)
)
@app.before_request
def load_user():
username = request.headers.get("Authorization")
user = users.get(username, users["guest"])
g.user = user
@app.before_request
def block_banned():
allows.additional.current.add(Not(HasLevel(AuthLevels.banned)))
@app.before_request
def add_override():
override = request.headers.get("Override")
if override:
allows.overrides.current.add(HasPermission(override))
class HasPermission(Requirement):
def __init__(self, name):
self.name = name
def fulfill(self, user):
return user.has_permission(self.name)
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, HasPermission) and self.name == other.name
def __repr__(self):
return "HasPermission({})".format(self.name)
class HasLevel(Requirement):
def __init__(self, userlevel):
self.userlevel = userlevel
def fulfill(self, user):
return self.userlevel == user.userlevel
def __hash__(self):
return hash(self.userlevel)
def __eq__(self, other):
return isinstance(other, HasLevel) and self.userlevel == other.userlevel
def __repr__(self):
return "HasLevel({})".format(self.userlevel)
@app.route("/")
# empty, run any ambient additional requirements
# should add this as a feature...?
@requires()
def index():
return "Welcome", 200
@app.route("/promote")
@requires(HasPermission("promote"))
def promote():
return "promoted", 200
class ItemsView(MethodView):
decorators = [requires(HasPermission("view"))]
def get(self):
return "the things", 200
@requires(HasPermission("reply"))
def post(self):
return "posted reply", 200
@requires(HasPermission("edit"))
def patch(self):
return "updated", 200
app.add_url_rule("/items", view_func=ItemsView.as_view(name="items"))
@app.route("/raise")
def raiser():
allows.overrides.current.add(lambda u: True)
raise Exception()
@app.route("/use-permission")
def use_permission():
with Permission(
Or(
HasLevel(AuthLevels.admin),
HasLevel(AuthLevels.staff),
And(HasLevel(AuthLevels.user), HasPermission("promote")),
)
):
pass
return "thumbs up"
@app.route("/odd-perm")
@requires(C(HasLevel("admin"), HasPermission("ban"), op=operator.xor))
def odd_perm():
return "thumbsup"
@app.route("/misbehave")
def misbehave():
# push our own contexts and forget to remove them
# after request handler will be made if the extension doesn't
# clean them up
allows.overrides.push(Override())
allows.additional.push(Additional())
bp = Blueprint("test_integration_bp", "bp")
bp.before_request(guard_entire([HasPermission("promote")]))
@bp.route("/")
def bp_index():
return "hello from permissioned endpoint"
@bp.route("/exempt")
@exempt_from_requirements
def exempt():
return "hello from exempt endpoint"
failure = Blueprint("test_integration_failure", "failure")
failure.before_request(
guard_entire([HasPermission("promote")], on_fail=lambda **k: ("bp nope", 403))
)
@failure.route("/")
def failure_index():
return None
cbv = Blueprint("test_integration_cbv", "cbv")
cbv.before_request(guard_entire([HasPermission("promote")]))
class SomeCBV(MethodView):
decorators = [exempt_from_requirements]
def get(self):
return "hello"
cbv.add_url_rule("/", view_func=SomeCBV.as_view(name="exempt"))
multi = Blueprint("test_integration_multi", "multi")
multi.before_request(
guard_entire(
[HasPermission("ban")], on_fail=lambda *a, **k: ("must be able to ban", 403)
)
)
multi.before_request(
guard_entire(
[HasLevel(AuthLevels.staff)], on_fail=lambda *a, **k: ("must be staff", 403)
)
)
@multi.route("/")
def multi_index():
return "hello"
def view_args_to_resp(*a, **k):
return jsonify(k)
has_view_args = Blueprint("test_integration_args", "args")
has_view_args.before_request(
guard_entire([HasPermission("noone")], on_fail=view_args_to_resp)
)
@has_view_args.route("/<foo>/<bar>/")
def has_view_args_index():
return ""
app.register_blueprint(bp, url_prefix="/bp")
app.register_blueprint(failure, url_prefix="/failure")
app.register_blueprint(cbv, url_prefix="/cbv")
app.register_blueprint(multi, url_prefix="/multi")
app.register_blueprint(has_view_args, url_prefix="/args")
guarded_app = Flask(__name__)
guarded_app.testing = False
guarded_app.debug = False
all_app_allows = Allows(
app=guarded_app,
identity_loader=lambda: g.user,
on_fail=lambda *a, **k: ("nope", 403),
)
@guarded_app.before_request
def guarded_app_load_user():
username = request.headers.get("Authorization")
user = users.get(username, users["guest"])
g.user = user
guarded_app.before_request(
guard_entire(
[HasLevel(AuthLevels.staff)], on_fail=lambda *a, **k: ("must be staff", 403)
)
)
@guarded_app.route("/")
@exempt_from_requirements
def unguarded_index():
return "welcome", 200
@guarded_app.route("/staff")
def guarded_staff():
return "welcome to staff", 200
@pytest.fixture
def client():
with app.test_client() as client:
yield client
@pytest.fixture
def guarded_app_client():
with guarded_app.test_client() as client:
yield client
# THIS IS WHERE THE TESTS BEGIN
def test_blocks_guests_from_entering(client):
rv = client.get("/", headers={"Authorization": "banned"})
assert rv.data == b"nope"
assert rv.status_code == 403
def test_must_have_view_to_access_items(client):
rv = client.get("/items", headers={"Authorization": "banned"})
assert rv.data == b"nope"
assert rv.status_code == 403
@pytest.mark.parametrize("user", ["guest", "user", "admin", "staff"])
def test_can_view_items(user, client):
rv = client.get("/items", headers={"Authorization": user})
assert rv.data == b"the things"
assert rv.status_code == 200
@pytest.mark.parametrize("user", ["banned", "guest"])
def test_must_have_reply_to_access_reply(user, client):
rv = client.post("/items", headers={"Authorization": user})
assert rv.data == b"nope"
assert rv.status_code == 403
@pytest.mark.parametrize("user", ["user", "admin", "staff"])
def test_can_post_item_reply(user, client):
rv = client.post("/items", headers={"Authorization": user})
assert rv.data == b"posted reply"
assert rv.status_code == 200
def test_can_post_reply_with_override(client):
rv = client.post("/items", headers={"Authorization": "guest", "Override": "reply"})
assert rv.data == b"posted reply"
assert rv.status_code == 200
def test_recovers_from_endpoint_that_raises(client):
rv = client.get("/raise")
assert rv.status_code == 500
@pytest.mark.parametrize("user", ["admin", "staff"])
def test_can_access_permissioned_endpoint(user, client):
rv = client.get("/use-permission", headers={"Authorization": user})
assert rv.status_code == 200
assert rv.data == b"thumbs up"
def test_can_access_permissioned_endpoint_with_override(client):
rv = client.get(
"/use-permission", headers={"Authorization": "user", "Override": "promote"}
)
assert rv.status_code == 200
assert rv.data == b"thumbs up"
# python2 dict.keys returns a list, python3 doesn't have views
@pytest.mark.parametrize("user", set(users.keys()) - {"admin", "staff"})
def test_cant_access_permissioned_endpoint(user, client):
rv = client.get("/use-permission", headers={"Authorization": user})
assert rv.status_code == 403
def test_odd_permission(client):
# has neither, xor should be false
rv = client.get("/odd-perm", headers={"Authorization": "guest"})
assert rv.status_code == 403
# has both, xor should be false
rv = client.get("/odd-perm", headers={"Authorization": "admin"})
assert rv.status_code == 403
# has one, xor should be true
rv = client.get("/odd-perm", headers={"Authorization": "staff"})
assert rv.status_code == 200
# has one because of override, xor should be True
rv = client.get("/odd-perm", headers={"Authorization": "admin", "Override": "ban"})
assert rv.status_code == 200
def test_cleans_up_lingering_contexts(client):
# endpoint pushes its own contexts but doesn't clean them up
# asserts the extension object does clean them up
client.get("/misbehave")
assert not allows.additional.current
assert not allows.overrides.current
def test_exempts_from_blueprint_requirements(client):
rv = client.get("/bp/exempt", headers={"Authorization": "guest"})
assert rv.status_code == 200
@pytest.mark.parametrize("user", ["guest", "user", "admin"])
def test_blocks_unpermissioned_from_accessing_blueprint(user, client):
rv = client.get("/bp/", headers={"Authorization": user})
assert rv.status_code == 403
def test_allows_user_to_access_blueprint(client):
rv = client.get("/bp/", headers={"Authorization": "staff"})
assert rv.status_code == 200
assert b"permissioned" in rv.data
@pytest.mark.parametrize("user", ["guest", "user", "admin"])
def test_override_works_with_permission_blueprint(user, client):
rv = client.get("/bp/", headers={"Authorization": user, "Override": "promote"})
assert rv.status_code == 200
assert b"permissioned" in rv.data
def test_blueprint_guard_can_return_early_response(client):
rv = client.get("/failure/", headers={"Authorization": "user"})
assert rv.status_code == 403
assert b"bp nope" in rv.data
def test_exempts_cbv_when_class_decorated(client):
rv = client.get("/cbv/", headers={"Authorization": "user"})
assert rv.status_code == 200
def test_multi_tiered_guard_triggers_separately(client):
rv = client.get("/multi/", headers={"Authorization": "user"})
assert rv.status_code == 403
assert b"ban" in rv.data
rv = client.get("/multi/", headers={"Authorization": "admin"})
assert rv.status_code == 403
assert b"staff" in rv.data
rv = client.get("/multi/", headers={"Authorization": "staff"})
assert rv.status_code == 200
assert b"hello" == rv.data
def test_guard_entire_passes_view_args_to_on_fail(client):
rv = client.get("/args/foo/bar/")
data = json.loads(rv.data.decode("utf-8"))
assert data == {"foo": "foo", "bar": "bar"}
@pytest.mark.parametrize("user", users.keys())
def test_guarded_application_allows_everyone_to_index(guarded_app_client, user):
rv = guarded_app_client.get("/", headers={"Authorization": user})
assert rv.status_code == 200
def test_guarded_application_allows_staff_into_protected_route(guarded_app_client):
rv = guarded_app_client.get("/staff", headers={"Authorization": "staff"})
assert rv.status_code == 200
@pytest.mark.parametrize("user", set(users.keys()) - {"staff"})
def test_guarded_application_denies_nonstaff_into_protected_route(
guarded_app_client, user
):
rv = guarded_app_client.get("/staff", headers={"Authorization": user})
assert rv.status_code == 403
def test_guard_entire_doesnt_explode_with_no_populated_endpoint(client):
rv = client.get("/totally/made/up", headers={"Authorization": "staff"})
assert rv.status_code == 404
|
justanr/flask-allows
|
test/test_integration.py
|
Python
|
mit
| 13,379
|
[
"Brian"
] |
b992f6de17fa511ae827783250e55454d35aabe0cbd28244f84e5e9b53ddb8d1
|
"""Demonstrates molecular dynamics with constant energy."""
from ase.lattice.cubic import FaceCenteredCubic
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
from ase.md.verlet import VelocityVerlet
from ase import units
# Use Asap for a huge performance increase if it is installed
use_asap = True
if use_asap:
from asap3 import EMT
size = 10
else:
from ase.calculators.emt import EMT
size = 3
# Set up a crystal
atoms = FaceCenteredCubic(directions=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
symbol="Cu",
size=(size, size, size),
pbc=True)
# Describe the interatomic interactions with the Effective Medium Theory
atoms.set_calculator(EMT())
# Set the momenta corresponding to T=300K
MaxwellBoltzmannDistribution(atoms, 300 * units.kB)
# We want to run MD with constant energy using the VelocityVerlet algorithm.
dyn = VelocityVerlet(atoms, 5 * units.fs) # 5 fs time step.
def printenergy(a=atoms): # store a reference to atoms in the definition.
"""Function to print the potential, kinetic and total energy."""
epot = a.get_potential_energy() / len(a)
ekin = a.get_kinetic_energy() / len(a)
print('Energy per atom: Epot = %.3feV Ekin = %.3feV (T=%3.0fK) '
'Etot = %.3feV' % (epot, ekin, ekin / (1.5 * units.kB), epot + ekin))
# Now run the dynamics
dyn.attach(printenergy, interval=10)
printenergy()
dyn.run(200)
|
misdoro/python-ase
|
doc/tutorials/md/moldyn2.py
|
Python
|
gpl-2.0
| 1,464
|
[
"ASE",
"CRYSTAL"
] |
15502fe9798de9c8e6013a853e0f8544d0c81aebcfcd8be982b1021d6881c690
|
import ast
import networkx as nx
import os
import pandas as pd
import numpy as np
MASK_TOKEN='_mask_'
class OGB_ASTWalker(ast.NodeVisitor):
def __init__(self):
self.node_id = 0
self.stack = []
self.graph = nx.Graph()
self.nodes = {}
def generic_visit(self, node):
node_name = self.node_id
self.node_id += 1
# if available, extract AST node attributes
name = getattr(node, 'name', None)
arg = getattr(node, 'arg', None)
s = getattr(node, 's', None)
n = getattr(node, 'n', None)
id_ = getattr(node, 'id', None)
attr = getattr(node, 'attr', None)
values = [name, arg, s, n, id_, attr]
node_value = next((str(value) for value in values if value is not None), None)
if isinstance(node_value, str):
node_value = node_value.encode('utf-8', errors='surrogatepass')
# encapsulate all node features in a dict
self.nodes[node_name] = {'type': type(node).__name__,
'attribute': node_value.decode('UTF-8') if node_value is not None else node_value,
'attributed': True if node_value != None else False,
'depth': len(self.stack),
'dfs_order': node_name}
# DFS traversal logic
parent_name = None
if self.stack:
parent_name = self.stack[-1]
self.stack.append(node_name)
self.graph.add_node(node_name)
if parent_name != None:
# replicate AST as NetworkX object
self.graph.add_edge(node_name, parent_name)
super().generic_visit(node)
self.stack.pop()
def py2graph_helper(code, attr2idx, type2idx, mask=True):
'''
Input:
code: code snippet
Mappers:
- attr_mapping: mapping from attribute to integer idx
- type_mapping: mapping from type to integer idx
- mask (bool): whether to mask the method name or not.
If we do method naming, we need to set it to True. Otherwise, there is data leakage.
Output: OGB graph object
'''
tree = ast.parse(code)
walker = OGB_ASTWalker()
walker.visit(tree)
ast_nodes, ast_edges = walker.nodes, walker.graph.edges()
if mask:
assert 'FunctionDef' in ast_nodes[1]['type'], 'To mask method name, 1st node in AST must be of type FunctionDef'
method_name = ast_nodes[1]['attribute']
for idx, ast_node in ast_nodes.items():
if 'FunctionDef' in ast_node['type'] and ast_node['attribute'] == method_name:
ast_nodes[idx]['attribute'] = MASK_TOKEN
print(ast_nodes)
data = dict()
data['edge_index'] = np.array([[i, j] for i, j in ast_edges]).transpose()
# first dim: type
# second dim: attr
# meta-info
# dfs_order: integer
# attributed: 0 or 1
node_feat = []
dfs_order = []
depth = []
attributed = []
for i in range(len(ast_nodes)):
typ = ast_nodes[i]['type'] if ast_nodes[i]['type'] in type2idx else '__UNK__'
if ast_nodes[i]['attributed']:
attr = ast_nodes[i]['attribute'] if ast_nodes[i]['attribute'] in attr2idx else '__UNK__'
else:
attr = '__NONE__'
node_feat.append([type2idx[typ], attr2idx[attr]])
dfs_order.append(ast_nodes[i]['dfs_order'])
depth.append(ast_nodes[i]['depth'])
attributed.append(ast_nodes[i]['attributed'])
### meta-information
data['node_feat'] = np.array(node_feat, dtype = np.int64)
data['node_dfs_order'] = np.array(dfs_order, dtype = np.int64).reshape(-1,1)
data['node_depth'] = np.array(depth, dtype = np.int64).reshape(-1,1)
data['node_is_attributed'] = np.array(attributed, dtype = np.int64).reshape(-1,1)
data['num_nodes'] = len(data['node_feat'])
data['num_edges'] = len(data['edge_index'][0])
return data
def test_transform(py2graph):
code = '''
def train(model, device, loader, optimizer):
model.train()
loss_accum = 0
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
if batch.x.shape[0] == 1 or batch.batch[-1] == 0:
pass
else:
pred_list = model(batch)
optimizer.zero_grad()
loss = 0
for i in range(len(pred_list)):
loss += multicls_criterion(pred_list[i].to(torch.float32), batch.y_arr[:,i])
loss = loss / len(pred_list)
loss.backward()
optimizer.step()
loss_accum += loss.item()
print('Average training loss: {}'.format(loss_accum / (step + 1)))
'''
graph = py2graph(code)
print(graph)
invalid_code = '''
import antigravity
xkcd loves Python
'''
try:
graph = py2graph(invalid_code)
except SyntaxError:
print('Successfully caught syntax error')
if __name__ == "__main__":
mapping_dir = 'dataset/ogbg_code2/mapping'
attr_mapping = dict()
type_mapping = dict()
for line in pd.read_csv(os.path.join(mapping_dir, 'attridx2attr.csv.gz')).values:
attr_mapping[line[1]] = int(line[0])
for line in pd.read_csv(os.path.join(mapping_dir, 'typeidx2type.csv.gz')).values:
type_mapping[line[1]] = int(line[0])
py2graph = lambda py: py2graph_helper(py, attr_mapping, type_mapping)
test_transform(py2graph)
|
snap-stanford/ogb
|
examples/graphproppred/code2/py2graph.py
|
Python
|
mit
| 5,537
|
[
"VisIt"
] |
635d5484608e415bfa5b95cecacd80c23ff1e45ca6868410013463135ed34ecf
|
# -*- coding: utf-8 -*-
# Copyright 2014 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from __future__ import with_statement
import os.path
import re
import time
import sys
import lxml.html
from lxml.cssselect import CSSSelector
from splinter.cookie_manager import CookieManagerAPI
from splinter.driver import DriverAPI, ElementAPI
from splinter.element_list import ElementList
from splinter.exceptions import ElementDoesNotExist
from splinter.request_handler.status_code import StatusCode
class CookieManager(CookieManagerAPI):
def __init__(self, browser_cookies):
self._cookies = browser_cookies
def add(self, cookies):
if isinstance(cookies, list):
for cookie in cookies:
for key, value in cookie.items():
self._cookies.set_cookie('localhost', key, value)
return
for key, value in cookies.items():
self._cookies.set_cookie('localhost', key, value)
def delete(self, *cookies):
if cookies:
for cookie in cookies:
try:
self._cookies.delete_cookie('localhost', cookie)
except KeyError:
pass
else:
self._cookies.cookie_jar.clear()
def all(self, verbose=False):
cookies = {}
for cookie in self._cookies.cookie_jar:
cookies[cookie.name] = cookie.value
return cookies
def __getitem__(self, item):
cookies = dict([(c.name, c) for c in self._cookies.cookie_jar])
return cookies[item].value
def __eq__(self, other_object):
if isinstance(other_object, dict):
cookies_dict = dict([(c.name, c.value)
for c in self._cookies.cookie_jar])
return cookies_dict == other_object
class FlaskClient(DriverAPI):
driver_name = "flask"
def __init__(self, app, user_agent=None, wait_time=2):
self.wait_time = wait_time
app.config['TESTING'] = True
self._browser = app.test_client()
self._history = []
self._cookie_manager = CookieManager(self._browser)
self._last_urls = []
self._forms = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def _post_load(self):
self._forms = {}
try:
del self._html
except AttributeError:
pass
self.status_code = StatusCode(self._response.status_code, '')
def visit(self, url):
self._url = url
self._response = self._browser.get(url, follow_redirects=True)
self._last_urls.append(url)
self._post_load()
def submit(self, form):
method = form.attrib['method']
func_method = getattr(self._browser, method.lower())
action = form.attrib['action']
if action.strip() != '.':
url = os.path.join(self._url, form.attrib['action'])
else:
url = self._url
self._url = url
data = dict(((k, v) for k, v in form.fields.items() if v is not None))
for key in form.inputs.keys():
input = form.inputs[key]
if getattr(input, 'type', '') == 'file' and key in data:
data[key] = open(data[key], 'rb')
self._response = func_method(url, data=data, follow_redirects=True)
self._post_load()
return self._response
def back(self):
self._last_urls.insert(0, self.url)
self.visit(self._last_urls[1])
def forward(self):
try:
self.visit(self._last_urls.pop())
except IndexError:
pass
def reload(self):
self.visit(self._url)
def quit(self):
pass
@property
def htmltree(self):
try:
return self._html
except AttributeError:
self._html = lxml.html.fromstring(self.html)
return self._html
@property
def title(self):
html = self.htmltree
return html.xpath('//title')[0].text_content().strip()
@property
def html(self):
return self._response.get_data(as_text=True)
@property
def url(self):
return self._url
def find_option_by_value(self, value):
html = self.htmltree
element = html.xpath('//option[@value="%s"]' % value)[0]
control = FlaskClientControlElement(element.getparent(), self)
return ElementList([FlaskClientOptionElement(element, control)], find_by="value", query=value)
def find_option_by_text(self, text):
html = self.htmltree
element = html.xpath('//option[normalize-space(text())="%s"]' % text)[0]
control = FlaskClientControlElement(element.getparent(), self)
return ElementList([FlaskClientOptionElement(element, control)], find_by="text", query=text)
def find_by_css(self, selector):
xpath = CSSSelector(selector).path
return self.find_by_xpath(xpath, original_find="css", original_selector=selector)
def find_by_xpath(self, xpath, original_find=None, original_selector=None):
html = self.htmltree
elements = []
for xpath_element in html.xpath(xpath):
if self._element_is_link(xpath_element):
return self._find_links_by_xpath(xpath)
elif self._element_is_control(xpath_element):
elements.append((FlaskClientControlElement, xpath_element))
else:
elements.append((FlaskClientElement, xpath_element))
find_by = original_find or "xpath"
query = original_selector or xpath
return ElementList(
[element_class(element, self) for element_class, element in elements],
find_by=find_by, query=query)
def find_by_tag(self, tag):
return self.find_by_xpath('//%s' % tag, original_find="tag", original_selector=tag)
def find_by_value(self, value):
return self.find_by_xpath('//*[@value="%s"]' % value, original_find="value", original_selector=value)
def find_by_id(self, id_value):
return self.find_by_xpath(
'//*[@id="%s"][1]' % id_value, original_find="id", original_selector=id_value)
def find_by_name(self, name):
html = self.htmltree
xpath = '//*[@name="%s"]' % name
elements = []
for xpath_element in html.xpath(xpath):
elements.append(xpath_element)
find_by = "name"
query = xpath
return ElementList(
[FlaskClientControlElement(element, self) for element in elements],
find_by=find_by, query=query)
def find_link_by_text(self, text):
return self._find_links_by_xpath("//a[text()='%s']" % text)
def find_link_by_href(self, href):
return self._find_links_by_xpath("//a[@href='%s']" % href)
def find_link_by_partial_href(self, partial_href):
return self._find_links_by_xpath("//a[contains(@href, '%s')]" % partial_href)
def find_link_by_partial_text(self, partial_text):
return self._find_links_by_xpath("//a[contains(normalize-space(.), '%s')]" % partial_text)
def fill(self, name, value):
self.find_by_name(name=name).first.fill(value)
def fill_form(self, field_values):
for name, value in field_values.items():
element = self.find_by_name(name)
control = element.first._control
control_type = control.get('type')
if control_type == 'checkbox':
if value:
control.value = value # control.options
else:
control.value = []
elif control_type == 'radio':
control.value = value # [option for option in control.options if option == value]
elif control_type == 'select':
control.value = [value]
else:
# text, textarea, password, tel
control.value = value
def choose(self, name, value):
self.find_by_name(name).first._control.value = value
def check(self, name):
control = self.find_by_name(name).first._control
control.value = ['checked']
def uncheck(self, name):
control = self.find_by_name(name).first._control
control.value = []
def attach_file(self, name, file_path):
control = self.find_by_name(name).first._control
control.value = file_path
def _find_links_by_xpath(self, xpath):
html = self.htmltree
links = html.xpath(xpath)
return ElementList(
[FlaskClientLinkElement(link, self) for link in links], find_by="xpath", query=xpath)
def select(self, name, value):
self.find_by_name(name).first._control.value = value
def is_text_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if self._is_text_present(text):
return True
return False
def _is_text_present(self, text):
try:
body = self.find_by_tag('body').first
return text in body.text
except ElementDoesNotExist:
# This exception will be thrown if the body tag isn't present
# This has occasionally been observed. Assume that the
# page isn't fully loaded yet
return False
def is_text_not_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if not self._is_text_present(text):
return True
return False
def _element_is_link(self, element):
return element.tag == 'a'
def _element_is_control(self, element):
return hasattr(element, 'type')
@property
def cookies(self):
return self._cookie_manager
re_extract_inner_html = re.compile(r'^<[^<>]+>(.*)</[^<>]+>$')
class FlaskClientElement(ElementAPI):
def __init__(self, element, parent):
self._element = element
self.parent = parent
def __getitem__(self, attr):
return self._element.attrib[attr]
def find_by_css(self, selector):
elements = self._element.cssselect(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_xpath(self, selector):
elements = self._element.xpath(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_name(self, name):
elements = self._element.cssselect('[name="%s"]' % name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_tag(self, name):
elements = self._element.cssselect(name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_value(self, value):
elements = self._element.cssselect('[value="%s"]' % value)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_id(self, id):
elements = self._element.cssselect('#%s' % id)
return ElementList([self.__class__(element, self) for element in elements])
@property
def value(self):
return self._element.text_content()
@property
def text(self):
return self.value
@property
def outer_html(self):
return lxml.html.tostring(self._element, encoding='unicode').strip()
@property
def html(self):
return re_extract_inner_html.match(self.outer_html).group(1)
def has_class(self, class_name):
return len(self._element.find_class(class_name)) > 0
class FlaskClientLinkElement(FlaskClientElement):
def __init__(self, element, parent):
super(FlaskClientLinkElement, self).__init__(element, parent)
self._browser = parent
def __getitem__(self, attr):
return super(FlaskClientLinkElement, self).__getitem__(attr)
def click(self):
return self._browser.visit(self["href"])
class FlaskClientControlElement(FlaskClientElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return self._control.attrib[attr]
@property
def value(self):
return self._control.value
@property
def checked(self):
return bool(self._control.value)
def click(self):
parent_form = self._get_parent_form()
return self.parent.submit(parent_form).data
def fill(self, value):
parent_form = self._get_parent_form()
if sys.version_info[0] > 2:
parent_form.fields[self['name']] = value
else:
parent_form.fields[self['name']] = value.decode('utf-8')
def select(self, value):
self._control.value = value
def _get_parent_form(self):
parent_form = next(self._control.iterancestors('form'))
return self.parent._forms.setdefault(parent_form._name(), parent_form)
class FlaskClientOptionElement(FlaskClientElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return self._control.attrib[attr]
@property
def text(self):
return self._control.text
@property
def value(self):
return self._control.attrib['value']
@property
def selected(self):
return self.parent.value == self.value
|
harish0507/GMapsScrapper
|
lib/splinter-0.7.2/splinter/driver/flaskclient.py
|
Python
|
mit
| 13,708
|
[
"VisIt"
] |
0f782b7327fcba28296527fe947b7dc4cbe2d2479540ba53d22fa3762748a556
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""MMTF trajectory reader --- :mod:`MDAnalysis.coordinates.MMTF`
================================================================
Reads coordinates data from the `Macromolecular Transmission Format
(MMTF) format`_. This should generally be a quicker alternative to PDB.
.. versionadded:: 0.16.0
Classes
-------
.. autoclass:: MMTFReader
:members:
.. autofunction:: fetch_mmtf
.. _Macromolecular Transmission Format (MMTF) format: https://mmtf.rcsb.org/
"""
import mmtf
from . import base
from ..core.universe import Universe
def _parse_mmtf(fn):
if fn.endswith('gz'):
return mmtf.parse_gzip(fn)
else:
return mmtf.parse(fn)
class MMTFReader(base.SingleFrameReaderBase):
"""Topology parser for the MMTF_ format.
.. _Macromolecular Transmission Format (MMTF) format:
https://mmtf.rcsb.org/
"""
format = 'MMTF'
def _read_first_frame(self):
# TOOD: Check units?
if isinstance(self.filename, mmtf.MMTFDecoder):
top = self.filename
else:
top = _parse_mmtf(self.filename)
self.n_atoms = top.num_atoms
self.ts = ts = self._Timestep(self.n_atoms,
**self._ts_kwargs)
ts._pos[:, 0] = top.x_coord_list
ts._pos[:, 1] = top.y_coord_list
ts._pos[:, 2] = top.z_coord_list
ts.dimensions = top.unit_cell
return ts
def fetch_mmtf(pdb_id):
"""Create a Universe from the RCSB Protein Data Bank using mmtf format
Parameters
----------
pdb_id : string
PDB code of the desired data, eg '4UCP'
Returns
-------
MDAnalysis Universe of the corresponding PDB system
See Also
--------
mmtf.fetch : Function for fetching raw mmtf data
.. versionadded:: 0.16.0
"""
return Universe(mmtf.fetch(pdb_id))
|
alejob/mdanalysis
|
package/MDAnalysis/coordinates/MMTF.py
|
Python
|
gpl-2.0
| 2,868
|
[
"MDAnalysis"
] |
034e7ed33f51556e7e95c16221a25a503953bc5d2b0c17f03b1b803c94925bf6
|
'''
PathwayGenie (c) University of Manchester 2017
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=signature-differs
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-arguments
from collections import defaultdict
from random import randint, sample
import re
from synbiochem.utils import plate_utils
from sbclearn.optimisation import gen_alg
def get_dest_comps(designs=12, rows=8, columns=12, all_comps=12, max_comps=6):
'''Gets dest_comps.'''
dest_comps = []
for idx in range(designs):
dest_well = plate_utils.get_well(idx, rows, columns)
comps = sorted(sample(range(all_comps), randint(1, max_comps)))
dest_comps.append([dest_well, comps])
return dest_comps
def get_perfect_comps(rows=8, columns=12):
'''Gets dest_comps.'''
dest_comps = []
for idx in range(rows):
dest_well = plate_utils.get_well(idx, rows, columns)
comps = [idx]
dest_comps.append([dest_well, comps])
return dest_comps
def optimise(comps_dest, rows=8, columns=12):
'''Optimise component layout.'''
alg = AssemblyGeneticAlgorithm(100, comps_dest, rows, columns)
return alg.run(1000)
class AssemblyGeneticAlgorithm(gen_alg.GeneticAlgorithm):
'''Class to run a genetic algorithm to optimise LCR assembly.'''
def __init__(self, pop_size, comps_dest, rows=8, cols=12,
retain=0.1, random_select=0.5, mutate=0.8, verbose=True):
self.__comps_dest = comps_dest
self.__rows = rows
self.__cols = cols
args = {key: None for key in comps_dest}
super(AssemblyGeneticAlgorithm, self).__init__(
pop_size, args, retain=retain, random_select=random_select,
mutate=mutate, verbose=verbose)
def _get_individual(self):
'''Create a member of the population.'''
rnd = [plate_utils.get_well(idx, self.__rows, self.__cols)
for idx in sample(range(self.__rows * self.__cols),
len(self._args))]
return dict(zip(self._args, rnd))
def _get_arg(self, key, individual):
'''Gets a random argument.'''
curr_well = individual[key]
curr_ords = _get_ords(curr_well)
while True:
new_ords = [(curr_ords[0] + randint(1, self.__rows)) %
self.__rows,
(curr_ords[1] + randint(1, self.__cols)) %
self.__cols + 1]
new_well = chr(new_ords[0] + ord('A')) + str(new_ords[1])
curr_comp = None
for comp, well in individual.iteritems():
if well == new_well:
curr_comp = comp
break
if curr_comp is not None:
individual[curr_comp] = curr_well
return new_well
def _procreate(self, male, female):
'''Procreate.'''
pos = randint(0, len(male))
child = male.copy()
for idx, fem_comp in enumerate(female.keys()):
if idx >= pos:
if female[fem_comp] not in child.values():
child[fem_comp] = female[fem_comp]
else:
for child_comp, child_well in child.iteritems():
if child_well == female[fem_comp]:
child[child_comp] = child[idx]
child[fem_comp] = female[fem_comp]
return child
def _fitness(self, individual):
'''Determine the fitness of an individual.'''
dists = []
for comp, well in individual.iteritems():
for dest_well in self.__comps_dest[comp]:
dists.append(sum(abs(e - s)
for s, e in zip(_get_ords(well),
_get_ords(dest_well))))
return sum(dists)
def _get_ords(well):
vals = re.split(r'(\d+)', well)
return [ord(vals[0]) - ord('A'), int(vals[1])]
def main():
'''main method.'''
comps_dest = defaultdict(list)
dest_comps = get_perfect_comps()
for dest_comp in dest_comps:
for comp in dest_comp[1]:
comps_dest[comp].append(dest_comp[0])
result, _ = optimise(comps_dest)
print
for dest_comp in dest_comps:
print dest_comp[0] + ': ' + str(dest_comp[1])
print
for comp, dest_wells in comps_dest.iteritems():
print str(comp) + ': ' + str(dest_wells)
print
for comp, well in result.iteritems():
print str(comp) + ': ' + str(well)
if __name__ == '__main__':
main()
|
neilswainston/development-py
|
synbiochemdev/assembly/optimise.py
|
Python
|
mit
| 4,724
|
[
"VisIt"
] |
068a93d25e0352efeb7a3dbd5b9578a62ef99603ae79f86b6d99797de531a214
|
from __future__ import print_function, division
from sympy import factorial, sqrt, exp, S, assoc_laguerre, Float
def R_nl(n, l, r, Z=1):
"""
Returns the Hydrogen radial wavefunction R_{nl}.
n, l
quantum numbers 'n' and 'l'
r
radial coordinate
Z
atomic number (1 for Hydrogen, 2 for Helium, ...)
Everything is in Hartree atomic units.
Examples
========
>>> from sympy.physics.hydrogen import R_nl
>>> from sympy import var
>>> var("r Z")
(r, Z)
>>> R_nl(1, 0, r, Z)
2*sqrt(Z**3)*exp(-Z*r)
>>> R_nl(2, 0, r, Z)
sqrt(2)*(-Z*r + 2)*sqrt(Z**3)*exp(-Z*r/2)/4
>>> R_nl(2, 1, r, Z)
sqrt(6)*Z*r*sqrt(Z**3)*exp(-Z*r/2)/12
For Hydrogen atom, you can just use the default value of Z=1:
>>> R_nl(1, 0, r)
2*exp(-r)
>>> R_nl(2, 0, r)
sqrt(2)*(-r + 2)*exp(-r/2)/4
>>> R_nl(3, 0, r)
2*sqrt(3)*(2*r**2/9 - 2*r + 3)*exp(-r/3)/27
For Silver atom, you would use Z=47:
>>> R_nl(1, 0, r, Z=47)
94*sqrt(47)*exp(-47*r)
>>> R_nl(2, 0, r, Z=47)
47*sqrt(94)*(-47*r + 2)*exp(-47*r/2)/4
>>> R_nl(3, 0, r, Z=47)
94*sqrt(141)*(4418*r**2/9 - 94*r + 3)*exp(-47*r/3)/27
The normalization of the radial wavefunction is:
>>> from sympy import integrate, oo
>>> integrate(R_nl(1, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r)**2 * r**2, (r, 0, oo))
1
It holds for any atomic number:
>>> integrate(R_nl(1, 0, r, Z=2)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r, Z=3)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r, Z=4)**2 * r**2, (r, 0, oo))
1
"""
# sympify arguments
n, l, r, Z = S(n), S(l), S(r), S(Z)
# radial quantum number
n_r = n - l - 1
# rescaled "r"
a = 1/Z # Bohr radius
r0 = 2 * r / (n * a)
# normalization coefficient
C = sqrt((S(2)/(n*a))**3 * factorial(n_r) / (2*n*factorial(n + l)))
# This is an equivalent normalization coefficient, that can be found in
# some books. Both coefficients seem to be the same fast:
# C = S(2)/n**2 * sqrt(1/a**3 * factorial(n_r) / (factorial(n+l)))
return C * r0**l * assoc_laguerre(n_r, 2*l + 1, r0).expand() * exp(-r0/2)
def E_nl(n, Z=1):
"""
Returns the energy of the state (n, l) in Hartree atomic units.
The energy doesn't depend on "l".
Examples
========
>>> from sympy import var
>>> from sympy.physics.hydrogen import E_nl
>>> var("n Z")
(n, Z)
>>> E_nl(n, Z)
-Z**2/(2*n**2)
>>> E_nl(1)
-1/2
>>> E_nl(2)
-1/8
>>> E_nl(3)
-1/18
>>> E_nl(3, 47)
-2209/18
"""
n, Z = S(n), S(Z)
if n.is_integer and (n < 1):
raise ValueError("'n' must be positive integer")
return -Z**2/(2*n**2)
def E_nl_dirac(n, l, spin_up=True, Z=1, c=Float("137.035999037")):
"""
Returns the relativistic energy of the state (n, l, spin) in Hartree atomic
units.
The energy is calculated from the Dirac equation. The rest mass energy is
*not* included.
n, l
quantum numbers 'n' and 'l'
spin_up
True if the electron spin is up (default), otherwise down
Z
atomic number (1 for Hydrogen, 2 for Helium, ...)
c
speed of light in atomic units. Default value is 137.035999037,
taken from: http://arxiv.org/abs/1012.3627
Examples
========
>>> from sympy.physics.hydrogen import E_nl_dirac
>>> E_nl_dirac(1, 0)
-0.500006656595360
>>> E_nl_dirac(2, 0)
-0.125002080189006
>>> E_nl_dirac(2, 1)
-0.125000416028342
>>> E_nl_dirac(2, 1, False)
-0.125002080189006
>>> E_nl_dirac(3, 0)
-0.0555562951740285
>>> E_nl_dirac(3, 1)
-0.0555558020932949
>>> E_nl_dirac(3, 1, False)
-0.0555562951740285
>>> E_nl_dirac(3, 2)
-0.0555556377366884
>>> E_nl_dirac(3, 2, False)
-0.0555558020932949
"""
if not (l >= 0):
raise ValueError("'l' must be positive or zero")
if not (n > l):
raise ValueError("'n' must be greater than 'l'")
if (l == 0 and spin_up is False):
raise ValueError("Spin must be up for l==0.")
# skappa is sign*kappa, where sign contains the correct sign
if spin_up:
skappa = -l - 1
else:
skappa = -l
c = S(c)
beta = sqrt(skappa**2 - Z**2/c**2)
return c**2/sqrt(1 + Z**2/(n + skappa + beta)**2/c**2) - c**2
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/sympy/physics/hydrogen.py
|
Python
|
mit
| 4,497
|
[
"DIRAC"
] |
eb001789f4908a3b3bdb9b00c5ded02f0b5ffa2e947998beff1c1f143538cee8
|
#!/usr/bin/env python
# encoding: utf-8
#--------- non-linear em --------------------------
import numpy as np
import pylab as plt
# -------- GLOBAL SCALAR DEFINITIONS -----------------------------
# ======== all definitions are in m,s,g unit system.
save_folder = '_test_ato_1'
n_frames = 10
x_lower = 0.
x_upper = 10e-9 # lenght [m]
# ........ material properties ...................................
# vacuum
eo = 8.854187817e-12 # vacuum permittivity - [F/m]
mo = 4e-7*np.pi # vacuum peremeability - [V.s/A.m]
co = 1.0/np.sqrt(eo*mo) # vacuum speed of light - [m/s]
zo = np.sqrt(mo/eo)
# material
mat_shape = 'atomic' # material definition: homogeneous, interface, rip (moving perturbation), multilayered
# background refractive index
bkg_er = 1.0 #1.5 #2.4
bkg_mr = 1.0 #1.5 #2.4
bkg_n = np.sqrt(bkg_er*bkg_mr)
bkg_e = eo*bkg_er
bkg_m = mo*bkg_mr
# if interface declare position
x_change = (x_upper-x_lower)/2
# atomic refractive index characteristics
r_a = 1.0e-10
eta_a = 1.0
alpha = 1.0
r_b = alpha*r_a
eta_b = 0.5
# set moving refractive index parameters
rip_vx_e = 0.61*co #0.0*co # replace here the value of x
rip_vx_m = rip_vx_e
rip_xoff_e = 15e-6
rip_xoff_m = rip_xoff_e
rip_xsig_e = 1e-6
rip_xsig_m = rip_xsig_e
s_x_e = rip_xsig_e**2
s_x_m = rip_xsig_m**2
prip = 0.1
deltan = prip*(bkg_n) # assumes epsilon = mu
d_e = deltan #*(2.0*1.5+deltan)
d_m = deltan #*(2.0*1.5+deltan)
# set multilayer parameters
# multilayered definition
n_layers = 2
layers = np.zeros([n_layers,7]) # _layer: eps mu N t chi2e chi2m chi3e chi3m
layers[0,0] = 1.0
layers[0,1] = 1.0
layers[0,2] = 11
layers[0,3] = 1.0
layers[1,0] = 0.5
layers[1,1] = 0.5
layers[1,2] = layers[0,2] - 1
layers[1,3] = 1.0
N_layers = int(np.sum(layers[:,2]))
print N_layers, n_layers
if mat_shape=='multilayer':
x_upper = N_layers*np.sum(layers[:,3])+layers[0,3]
tlp = np.sum(layers[:,3])
mlp = np.floor(tlp/.5e-10)
# set non-linear parameters of the material
chi2_e = 0.0 #0.01 #1e-2
chi3_e = 0.0 #0.001 #1e-4
chi2_m = 0.0 #0.01 #1e-2
chi3_m = 0.0 #0.001 #1e-4
# ........ excitation - initial conditoons .......................
ex_type = 'off'
alambda = 1e-6 # wavelength
ex_t_sig = 4.0e-14#1.0*alambda # width in time (pulse only)
ex_x_sig = 2.0*alambda # width in the x-direction (pulse)
ex_toff = 0.0 # offset in time
ex_xoff = 0.0 # offset in the x-direction
omega = 2.0*np.pi*co/alambda # frequency
k = 2.0*np.pi/alambda
amp_Ey = zo
amp_Hz = 1.0
# ........ pre-calculations for wave propagation .................
v_r = 1.0/(bkg_n-d_e)
v = co*v_r
ex_vx = v
ex_kx = k
# Grid - mesh settings
if mat_shape=='multilayer':
mx = np.floor((x_upper-x_lower)/.05e-10)
elif mat_shape=='atomic':
mx = np.floor(20.0*(x_upper-x_lower)/(r_a+r_b))+1
else:
mx = np.floor(250*(x_upper-x_lower)/alambda)
ddx = (x_upper-x_lower)/mx
ddt = 0.4/(co*np.sqrt(1.0/(ddx**2)))
max_steps = 1000000
t_final = (x_upper-x_lower)/v
print ddt
# -------- GLOBAL FUNCTION DEFINITIONS --------------
# refractive index map definition function
def etar(t,x):
"""
eta = etar(t,x)
This function returns the refractive index map based on general definitions set earlier,
Gaussian cases support moving RIPs.
x are the coordinate of the grid centers state.grid.e_j.centers, e_j = x
aux holds:
0: epsilon
1: mu
2: epsilon_t
3: mu_t
"""
eta = np.empty( [4,len(x)], order='F')
if mat_shape=='moving_gauss':
u_x_e = x - rip_vx_e*t - rip_xoff_e
u_x_m = x - rip_vx_m*t - rip_xoff_m
u_e = (u_x_e/rip_xsig_e)**2
u_m = (u_x_m/rip_xsig_m)**2
u_e_t = 2*((rip_vx_e*u_x_e)/(rip_xsig_e**2))
u_m_t = 2*((rip_vx_m*u_x_m)/(rip_xsig_m**2))
eta[0,:] = d_e*np.exp(-u_e) + bkg_er
eta[1,:] = d_m*np.exp(-u_m) + bkg_mr
eta[2,:] = u_e_t*d_e*np.exp(-u_e)
eta[3,:] = u_m_t*d_m*np.exp(-u_m)
elif mat_shape=='gaussian':
u_x_e = x - rip_xoff_e
u_x_m = x - rip_xoff_m
u_e = (u_x_e/rip_xsig_e)**2
u_m = (u_x_m/rip_xsig_m)**2
eta[0,:] = d_e*np.exp(-u_e) + bkg_er
eta[1,:] = d_m*np.exp(-u_m) + bkg_mr
eta[2,:] = 0.
eta[3,:] = 0.
elif mat_shape=='homogeneous':
eta[0,:] = bkg_er
eta[1,:] = bkg_mr
eta[2,:] = 0.
eta[3,:] = 0.
elif mat_shape=='interface':
eta[0,:] = 1*(x<x_change) + 4*(x>=x_change)
eta[1,:] = 1*(x<x_change) + 4*(x>=x_change)
eta[2,:] = 0.
eta[3,:] = 0.
elif mat_shape=='multilayer':
for n in range(0,N_layers):
xi = n*tlp
for m in range(0,n_layers):
if m==0:
eta[0,:] = eta[0,:] + layers[m,0]*(xi<x)*(x<=xi+layers[m,3])
eta[1,:] = eta[1,:] + layers[m,1]*(xi<x)*(x<=xi+layers[m,3])
else:
eta[0,:] = eta[0,:] + layers[m,0]*((xi+layers[m-1,3])<x)*(x<=(xi+layers[m,3]))
eta[1,:] = eta[1,:] + layers[m,1]*((xi+layers[m-1,3])<x)*(x<=(xi+layers[m,3]))
eta[0,:] = layers[0,0]*(N_layers*tlp<x)*(x<=N_layers*tlp+layers[0,3])
eta[1,:] = layers[0,1]*(N_layers*tlp<x)*(x<=N_layers*tlp+layers[0,3])
eta[2,:] = 0.0
eta[3,:] = 0.0
elif mat_shape=='tanh':
#-((2 A v)/(1+Cosh[2 t v-2 x+2 xo]))
u_x_e = x - rip_vx_e*t - rip_xoff_e
u_x_m = x - rip_vx_m*t - rip_xoff_m
u_e_t = -2.0*(u_x_e)
u_m_t = -2.0*(u_x_m)
eta[0,:] = (d_e/2.0)*np.tanh(u_x_e) + bkg_er + (d_e/2.0)
eta[1,:] = (d_m/2.0)*np.tanh(u_x_m) + bkg_mr + (d_m/2.0)
eta[2,:] = -(d_e*rip_vx_e)/(1+np.cosh(u_e_t))
eta[3,:] = -(d_m*rip_vx_m)/(1+np.cosh(u_m_t))
elif mat_shape=='custom':
dd = x_upper-x_lower
eta[0,:] = d_e*np.cos(0.3e6*(x-dd/2.0)) + bkg_er
eta[1,:] = d_m*np.cos(0.3e6*(x-dd/2.0)) + bkg_mr
eta[2,:] = 0.0
eta[3,:] = 0.0
elif mat_shape=='atomic':
eta[0:1,:] = 1.0
beta = 1.0
for j,xj in enumerate(x):
xj = xj - ddx
if xj>=((beta-1)*(r_a+r_b)) and xj<=(beta*(r_a+r_b)-r_b):
eta[0,j] = eta_a
# print j,xj
if xj>(beta*(r_a+r_b)-r_b) and xj<=(beta*(r_a+r_b)):
eta[0,j] = eta_b
# print j,xj
if xj>(beta*(r_a+r_b)):
beta = beta+1
# print beta
return eta
def update_aux(solver,state):
x = state.grid.x.centers
t = state.t
state.aux = setaux(t,x)
return state
# next function might be redundant since it already exists as deltan
def setaux(t,x):
aux = np.empty( [4,len(x)], order='F')
aux[:,:] = etar(t,x)
return aux
def setaux_lower(state,dim,t,auxbc,num_ghost):
x = state.grid.x.centers_with_ghost(num_ghost)[:num_ghost]
auxbc[:,:num_ghost] = etar(t,x)
return auxbc
def setaux_upper(state,dim,t,auxbc,num_ghost):
x = state.grid.x.centers_with_ghost(num_ghost)[-num_ghost:]
auxbc[:,-num_ghost:] = etar(t,x)
return auxbc
def scattering_bc(state,dim,t,qbc,num_ghost):
"""
EM scattering boundary conditions with three components Ey, Hz.
"""
grid = state.grid
x = grid.x.centers_with_ghost(num_ghost)[:num_ghost]
ts = state.t
t0 = 2.0e-14
if ex_type=='plane':
pulseshape = 1.0
harmonic = np.sin(ex_kx*x - omega*ts)
elif ex_type=='simple_pulse':
if t<=ex_t_sig:
pulseshape = 1.0
harmonic = np.sin(ex_kx*x - omega*ts)
else:
pulseshape = 0.0
harmonic = 0.0
elif ex_type=='off':
pulseshape = 0.0
harmonic = 0.0
else:
pulseshape = 0.0
harmonic = 0.0
qbc[0,:num_ghost] = amp_Ey*pulseshape*harmonic
qbc[1,:num_ghost] = amp_Hz*pulseshape*harmonic
return qbc
def qinit(state):
"""
Initial conditions in simulation grid for electromagnetic components q
"""
grid = state.grid
x = grid.x.centers
if ex_type=='off':
dd = (x_upper-x_lower)/2.0
# state.q[0,:] = zo*np.sin(k*x)
# state.q[1,:] = np.sin(k*x)
state.q[0,:] = zo*np.exp(-(x-dd/2.0)**2/((ex_x_sig)**2))
state.q[1,:] = np.exp(-(x-dd/2.0)**2/((ex_x_sig)**2))
elif ex_type=='gauss_cosine_pulse':
state.q[0,:] = zo*np.exp(-(x-3*ex_x_sig)**2/(ex_x_sig**2))*np.cos(ex_kx*(x-3*ex_x_sig))
state.q[1,:] = np.exp(-(x-3*ex_x_sig)**2/(ex_x_sig**2))*np.cos(ex_kx*(x-3*ex_x_sig))
elif ex_type=='gauss_pulse':
state.q[0,:] = zo*np.exp(-(x-3*ex_x_sig)**2/(ex_x_sig**2))
state.q[1,:] = np.exp(-(x-3*ex_x_sig)**2/(ex_x_sig**2))
else:
state.q[0,:] = 0.0
state.q[1,:] = 0.0
return state
# -------- MAIN SCRIPT --------------
def em1D(kernel_language='Fortran',iplot=False,htmlplot=False,use_petsc=True,save_outdir=save_folder,solver_type='sharpclaw',save_p='./_calculations',before_step=False,limiter=4,limiter_order=4):
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
# Solver settings
if solver_type=='classic':
solver=pyclaw.ClawSolver1D()
solver.dimensional_split=False
solver.limiters = pyclaw.limiters.tvd.MC
elif solver_type=='sharpclaw':
solver=pyclaw.SharpClawSolver1D()
solver.num_waves = 2
solver.weno_order = 5
solver.lim_type = 4
solver.interpolation_order = 4
solver.dt_initial = ddt/2
solver.dt_max = ddt
solver.max_steps = max_steps
import maxwell_1d_nl
solver.rp = maxwell_1d_nl
solver.fwave = True
solver.cfl_max = 0.55
solver.cfl_desired = 0.4
solver.dt_variable = True
print 'setup information:'
print 'v_wave=',v
print 'x_lim=',x_upper,' t_f=',t_final
print 'mx=',mx,'dx=',ddx,'dt=',ddt,'N_max=',max_steps
print 'lambda=',alambda,'freq=',omega
if before_step:
print 'update aux'
solver.call_before_step_each_stage = 1
solver.before_step = update_aux
# define number of waves (eqn) and aux (eps,mu)
num_eqn = 2
num_aux = 4
# print mx
# abstract domain and state setup
x_dime = pyclaw.Dimension('x',x_lower,x_upper,mx)
domain = pyclaw.Domain([x_dime])
state = pyclaw.State(domain,num_eqn,num_aux)
state.mp = 2
grid = state.grid
x = grid.x.centers
tini = state.t
state.aux = etar(tini,x)
plt.plot(state.aux[0,:])
plt.show()
state.problem_data['dx'] = x_dime.delta
state.problem_data['chi2_e_r'] = chi2_e
state.problem_data['chi3_e_r'] = chi3_e
state.problem_data['chi2_m_r'] = chi2_m
state.problem_data['chi3_m_r'] = chi3_m
state.problem_data['eo'] = eo
state.problem_data['mo'] = mo
state.problem_data['co'] = co
state.problem_data['zo'] = zo
# Boundary conditions
solver.bc_lower[0] = pyclaw.BC.custom
solver.bc_upper[0] = pyclaw.BC.extrap
solver.aux_bc_lower[0]=pyclaw.BC.custom
solver.aux_bc_upper[0]=pyclaw.BC.custom
solver.user_bc_lower = scattering_bc
solver.user_aux_bc_lower = setaux_lower
solver.user_aux_bc_upper = setaux_upper
print mx
#Initial condition
qinit(state)
#controller
claw = pyclaw.Controller()
claw.tfinal = t_final
claw.num_output_times = n_frames
claw.solver = solver
claw.solution = pyclaw.Solution(state,domain)
claw.write_aux_always = True
claw.outdir = save_outdir
# claw.output_style = 3
# claw.nstepout = 1
# claw.compute_p = ffields
# claw.outdir_p = save_p
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=save_outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=save_outdir)
return claw
if __name__=="__main__":
import sys
from clawpack.pyclaw.util import run_app_from_main
output = run_app_from_main(em1D)
|
nthakkar/emclaw
|
maxwell_1d_source/maxwell_ato.py
|
Python
|
gpl-2.0
| 12,127
|
[
"Gaussian"
] |
af5d7e9af5c8042c6c06c0aaf5e935351f213deb779405c6e2c0d06ead194733
|
"""
This is the XROOTD StorageClass
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Resources.Utilities.Utils import checkArgumentFormat
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
from DIRAC.Core.Utilities.File import getSize
import os
from types import StringType, ListType, DictType
from XRootD import client
from XRootD.client.flags import DirListFlags, OpenFlags, MkDirFlags, QueryCode, StatInfoFlags
class XROOTStorage( StorageBase ):
""" .. class:: XROOTStorage
Xroot interface to StorageElement using pyxrootd
"""
def __init__( self, storageName, protocol, rootdir, host, port, spaceToken, wspath ):
""" c'tor
:param self: self reference
:param str storageName: SE name
:param str protocol: protocol to use
:param str rootdir: base path for vo files
:param str host: SE host
:param int port: port to use to communicate with :host:
:param str spaceToken: space token
:param str wspath: location of SRM on :host:
"""
# # init base class
StorageBase.__init__( self, storageName, rootdir )
self.log = gLogger.getSubLogger( "XROOTStorage", True )
# self.log.setLevel( "DEBUG" )
self.protocolName = 'XROOT'
self.protocol = protocol
self.host = host
self.port = port
self.wspath = wspath
self.spaceToken = spaceToken
# Aweful hack to cope for the moment with the anability of RSS to deal with something else than SRM
self.port = ""
self.wspath = ""
self.spaceToken = ""
# The API instance to be used
self.xrootClient = client.FileSystem( host )
def exists( self, path ):
"""Check if the given path exists. The 'path' variable can be a string or a list of strings.
:param self: self reference
:param path: path (or list of path) on storage (it's a pfn root://blablabla)
:returns Failed dictionary: {pfn : errorMsg}
Successful dictionary: {pfn : bool}
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.exists: Checking the existence of %s path(s)" % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__singleExists( url )
# Check if there was a fatal error
if not res['OK']:
return res
# No fatal error, lets check if we could verify the existance
res = res['Value']
if res['OK']:
successful[url] = res['Value']
else: # something went wrong with the query
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __singleExists( self, path ):
"""Check if the given path exists. The 'path' variable can be a string or a list of strings.
:param self: self reference
:param path: path (only 1) on storage (it's a pfn root://blablabla)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (boolean exists)) a boolean whether it exists or not
S_OK (S_ERROR (errorMsg)) if there was a problem getting the information
"""
self.log.debug( "XROOTStorage.__singleExists: Determining whether %s exists." % path )
res = pfnparse( path )
if not res['OK']:
return res
pfnDict = res['Value']
xFilePath = '/'.join( [pfnDict['Path'], pfnDict['FileName']] )
status, statInfo = self.xrootClient.stat( xFilePath )
if status.ok:
self.log.debug( "XROOTStorage.__singleExists: Path exists." )
return S_OK( S_OK( True ) )
else:
# I don't know when the fatal flag is set, or if it is even ever set
if status.fatal:
errStr = "XROOTStorage.__singleExists: Completely failed to determine the existence of file."
self.log.fatal( errStr, "%s %s" % ( self.name, status.message ) )
return S_ERROR( errStr )
elif status.error:
# errno 3011 corresponds to the file not existing
if status.errno == 3011:
errStr = "XROOTStorage.__singleExists: Path does not exists"
self.log.debug( errStr, path )
return S_OK( S_OK( False ) )
else:
errStr = "XROOTStorage.__singleExists: Failed to determine the existence of file"
self.log.debug( errStr, status.message )
return S_OK( S_ERROR( errStr ) )
errStr = "XROOTStorage.__singleExists : reached end of method, should not happen"
self.log.error( errStr )
return S_ERROR ( errStr )
#############################################################
#
# These are the methods for file manipulation
#
def isFile( self, path ):
"""Check if the given path exists and it is a file
:param self: self reference
:param path: path (or list of path) on storage
:returns: Successful dict {path : boolean}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.isFile: Determining whether %s paths are files." % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__isSingleFile( url )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
if res['OK']:
successful[url] = res['Value']
else: # something went wrong with the query
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __isSingleFile( self, path ):
"""Check if the given path exists and it is a file
:param self: self reference
:param path: single path on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (boolean)) if it is a file or not
S_OK (S_ERROR (errorMsg)) if there was a problem getting the info
"""
self.log.debug( "XROOTStorage.__isSingleFile: Determining whether %s is a file." % path )
return self.__getSingleMetadata( path, 'File' )
def getFile( self, path, localPath = False ):
""" make a local copy of a storage :path:
:param self: self reference
:param str path: path (pfn root://) on storage
:param mixed localPath: if not specified, self.cwd
:returns Successful dict {path : size}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.getFile: Trying to download %s files." % len( urls ) )
failed = {}
successful = {}
for src_url in urls:
fileName = os.path.basename( src_url )
if localPath:
dest_file = "%s/%s" % ( localPath, fileName )
else:
# other plugins use os.getcwd insted of self.cwd
# -> error self.cwd is for remote, ot is os.getcwd the right one
dest_file = "%s/%s" % ( os.getcwd(), fileName )
res = self.__getSingleFile( src_url, dest_file )
if res['OK']:
successful[src_url] = res['Value']
else:
failed[src_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getSingleFile( self, src_url, dest_file ):
""" do a real copy of storage file :src_url: to local fs under :dest_file:
:param self: self reference
:param str src_url: SE url to cp (root://...)
:param str dest_file: local fs path
:returns: S_ERROR(errorMsg) in case of any problem
S_OK(size of file) if all goes well
"""
self.log.info( "XROOTStorage.__getSingleFile: Trying to download %s to %s" % ( src_url, dest_file ) )
if not os.path.exists( os.path.dirname( dest_file ) ):
self.log.debug( "XROOTStorage.__getSingleFile: Local directory does not yet exist. Creating...", os.path.dirname( dest_file ) )
try:
os.makedirs( os.path.dirname( dest_file ) )
except OSError, error:
errStr = "XROOTStorage.__getSingleFile: Exception creation the destination directory"
self.log.exception( errStr, error )
return S_ERROR( errStr )
# Fetch the remote file size
# I know that logicalLy I should create the local path first
# but this gives a more coherent errors in case it is a directory
# ("not a file" rather than "cannot delete local directory"
res = self.__getSingleFileSize( src_url )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
# Error getting the size
if not res['OK']:
errStr = "XROOTStorage.__getSingleFile: Error getting the file size."
self.log.exception( errStr, res['Message'] )
return S_ERROR( errStr )
remoteSize = res['Value']
# I could also just use the Force option of the copy method of the API...
if os.path.exists( dest_file ):
self.log.debug( "XROOTStorage.__getSingleFile: Local file already exists. Removing...", dest_file )
try:
os.remove( dest_file )
except OSError, error:
errStr = "XROOTStorage.__getSingleFile: Exception removing the file."
self.log.exception( errStr, "%s" % error )
return S_ERROR( errStr )
status = self.xrootClient.copy( src_url, dest_file )
# For some reason, the copy method returns a tuple (status,None)
status = status[0]
if status.ok:
self.log.debug( 'XROOTStorage.__getSingleFile: Got a file from storage.' )
localSize = getSize( dest_file )
if localSize == remoteSize:
self.log.debug( "XROOTStorage.__getSingleFile: Post transfer check successful." )
return S_OK( localSize )
errorMessage = "XROOTStorage.__getSingleFile: Source and destination file sizes do not match (%s vs %s)." % ( remoteSize, localSize )
self.log.error( errorMessage, src_url )
else:
errorMessage = "XROOTStorage.__getSingleFile: Failed to get file from storage."
errStr = "%s %s" % ( status.message, status.errno )
self.log.error( errorMessage, errStr )
if os.path.exists( dest_file ):
self.log.debug( "XROOTStorage.__getSingleFile: Removing local file %s." % dest_file )
try:
os.remove( dest_file )
except OSError, error:
errorMessage = "XROOTStorage.__getSingleFile: Exception removing local file "
self.log.exception( errorMessage, error )
return S_ERROR( errorMessage )
def putFile( self, path, sourceSize = 0 ):
"""Put a copy of the local file to the current directory on the
physical storage
:param path: dictionnary {pfn (root://...) : localFile}
:param sourceSize : size in B (NOT USED)
:returns Successful dict {path : size}
Failed dict {path : error message }
S_ERROR(errMsg) in case of arguments problems
"""
if type( path ) is StringType:
return S_ERROR ( "XROOTStorage.putFile: path argument must be a dictionary (or a list of dictionary) { url : local path}" )
elif type( path ) is ListType:
if not len( path ):
return S_OK( { 'Failed' : {}, 'Successful' : {} } )
else:
urls = dict( [( url, False ) for url in path] )
elif type( path ) is DictType:
if len( path ) != 1:
return S_ERROR ( "XROOTStorage.putFile: path argument must be a dictionary (or a list of dictionary) { url : local path}" )
urls = path
failed = {}
successful = {}
for dest_url, src_file in urls.items():
res = self.__putSingleFile( src_file, dest_url, sourceSize )
if res['OK']:
successful[dest_url] = res['Value']
else:
failed[dest_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __putSingleFile( self, src_file, dest_url, sourceSize = 0 ):
"""Put a copy of the local file to the current directory on the
physical storage
:param str dest_file: pfn (root://...)
:param str src_file: local file to copy
:param int sourceSize: size in B (NOT USED)
:returns: S_OK( file size ) if everything went fine, S_ERROR otherwise
"""
self.log.debug( "XROOTStorage.__putSingleFile: trying to upload %s to %s" % ( src_file, dest_url ) )
# We create the folder first
res = pfnparse( dest_url )
if not res['OK']:
return res
pfnDict = res['Value']
# There is a bug in xrootd-python-0.1.2-1 (fixed in master branch) which
# forbids the MAKEPATH flag to work.
status = self.xrootClient.mkdir( pfnDict['Path'], MkDirFlags.MAKEPATH )
# the API returns (status,None...)
status = status[0]
if status.fatal:
errStr = "XROOTStorage.__putSingleFile: Completely failed to create the destination folder."
gLogger.error( errStr, status.message )
return S_ERROR( errStr )
# if it is only an error(Folder exists...), we try to keep going
if status.error:
errStr = "XROOTStorage.__putSingleFile: failed to create the destination folder."
gLogger.debug( errStr, status.message )
# Now we check if there is already a remote file. If yes, we remove it
res = self.__singleExists( dest_url )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
if not res['OK']:
errStr = "XROOTStorage.__putSingleFile: failed to determine pre-existance of remote file."
gLogger.debug( errStr, res['Message'] )
# This is true only if the file exists. Then we remove it
if res['Value']:
self.log.debug( "XROOTStorage.__putSingleFile: Remote file exists and needs to be removed" )
res = self.__removeSingleFile( dest_url )
# Fatal error during removal
if not res['OK']:
return res
else:
res = res['Value']
if not res['OK']:
self.log.debug( "XROOTStorage.__putSingleFile: Failed to remove remote file", res['Message'] )
else:
self.log.debug( "XROOTStorage.__putSingleFile: Successfully removed remote file" )
# get the absolute path needed by the xroot api
src_file = os.path.abspath( src_file )
if not os.path.exists( src_file ):
errStr = "XROOTStorage.__putSingleFile: The local source file does not exist."
gLogger.error( errStr, src_file )
return S_ERROR( errStr )
sourceSize = getSize( src_file )
if sourceSize == -1:
errStr = "XROOTStorage.__putSingleFile: Failed to get file size."
gLogger.error( errStr, src_file )
return S_ERROR( errStr )
# Perform the copy with the API
status = self.xrootClient.copy( src_file, dest_url )
# For some reason, the copy method returns a tuple (status,None)
status = status[0]
if status.ok:
self.log.debug( 'XROOTStorage.__putSingleFile: Put file on storage.' )
res = self.__getSingleFileSize( dest_url )
# There was a fatal error
if not res['OK']:
return res
# No fatal error, let see if we could get the size
res = res['Value']
if res['OK']: # we could get the size for that url
remoteSize = res['Value']
else:
errMsg = "XROOTStorage.__putSingleFile: Could not get remote file size"
self.log.error( errMsg, res['Value'] )
return S_ERROR( "Could not get remote file size" )
if sourceSize == remoteSize:
self.log.debug( "XROOTStorage.__putSingleFile: Post transfer check successful." )
return S_OK( sourceSize )
errorMessage = "XROOTStorage.__putSingleFile: Source and destination file sizes do not match (%s vs %s)." % ( sourceSize, remoteSize )
self.log.error( errorMessage, src_file )
else:
errorMessage = "XROOTStorage.__putSingleFile: Failed to put file on storage."
errStr = "%s %s" % ( status.message, status.errno )
self.log.error( errorMessage, errStr )
res = self.__singleExists( dest_url )
if not res['OK']:
return res
# This is true only if the file exists. Then we remove it
if res['Value'] == True:
self.log.debug( "XROOTStorage.__putSingleFile: Removing remote residual file.", dest_url )
res = self.__removeSingleFile( dest_url )
# Fatal error during removal
if not res['OK']:
return res
else:
res = res['Value']
if res['OK']:
self.log.debug( "XROOTStorage.__putSingleFile: Failed to remove remote file.", dest_url )
else:
self.log.debug( "XROOTStorage.__putSingleFile: Successfully removed remote file.", dest_url )
return S_ERROR( errorMessage )
def removeFile( self, path ):
"""Remove physically the file specified by its path
A non existing file will be considered as successfully removed.
:param path: path (or list of path) on storage (pfn : root://...)
:returns Successful dict {path : True}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
gLogger.debug( "RFIOStorage.removeFile: Attempting to remove %s files." % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self.__removeSingleFile( url )
# The removal did not have a big problem
if res['OK']:
res = res['Value']
# We could perform the removal
if res['OK']:
successful[url] = res['Value']
else:
failed [url] = res['Message']
else:
return res
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __removeSingleFile( self, path ):
"""Remove physically the file specified by its path
:param path: path on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (True)) if the file is not present anymore (deleted or did not exist)
S_OK (S_ERROR (errorMsg)) if there was a problem removing the file
"""
res = pfnparse( path )
if not res['OK']:
return res
pfnDict = res['Value']
xFilePath = '/'.join( [pfnDict['Path'], pfnDict['FileName']] )
status = self.xrootClient.rm( xFilePath )
# For some reason, the rm method returns a tuple (status,None)
status = status[0]
if status.ok:
self.log.debug( "XROOTStorage.__removeSingleFile: Successfully removed file: %s" % path )
return S_OK( S_OK( True ) )
else:
# I don't know when the fatal flag is set, or if it is even ever set
if status.fatal:
errStr = "XROOTStorage.__removeSingleFile: Completely failed to remove the file."
self.log.fatal( errStr, "%s %s" % ( self.name, status.message ) )
return S_ERROR( errStr )
elif status.error:
# errno 3011 corresponds to the file not existing
if status.errno == 3011:
self.log.debug( "XROOTStorage.__removeSingleFile: File does not exist" )
return S_OK( S_OK( True ) )
else:
errStr = "XROOTStorage.__removeSingleFile: Failed to remove the file"
self.log.debug( errStr, status.message )
return S_OK( S_ERROR( errStr ) )
return S_ERROR ( "XROOTStorage.__removeSingleFile: reached the end of the method, should not happen" )
def getFileMetadata( self, path ):
""" Get metadata associated to the file(s)
:param self: self reference
:param path: path (or list of path) on storage (pfn : root://...)
:returns Successful dict {path : metadata}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for url in urls:
res = self.__getSingleFileMetadata( url )
if not res['OK']:
errStr = "XROOTStorage.getPathMetadata: Completely failed to get path metadata."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
# There were no fatal errors, so now we see if there were any other errors
res = res['Value']
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getSingleMetadata( self, path, expectedType = None ):
""" Fetches the metadata of a single file or directory
If expectedType is None (default), then we fetch the metadata and return them.
If it is set, then we return a S_OK(boolean) depending on whether the type matches or not
:param self: self reference
:param path: path (only 1) on storage (pfn : root://...)
:param: expectedType : type that we expect the path to be ('File' or 'Directory')
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (MetadataDict)) if we could get the metadata
S_OK (S_OK (Bool)) if we could get the metadata and is of type expectedType
S_OK (S_ERROR (errorMsg)) if there was a problem geting the metadata
"""
if expectedType and expectedType not in ['File', 'Directory']:
return S_ERROR( "XROOTStorage.__getSingleMetadata : the 'expectedType' argument must be either 'File' or 'Directory'" )
res = pfnparse( path )
if not res['OK']:
return res
pfnDict = res['Value']
xFilePath = '/'.join( [pfnDict['Path'], pfnDict['FileName']] )
status, statInfo = self.xrootClient.stat( xFilePath )
if status.ok:
# Transform the api output into a dictionary
metadataDict = self.__parseStatInfoFromApiOutput( statInfo )
# If we expect a given type, we return a boolean
if expectedType:
isExpectedType = metadataDict[expectedType]
return S_OK( S_OK( isExpectedType ) )
# otherwise we return the metadata dictionnary
return S_OK( S_OK( metadataDict ) )
else:
# I don't know when the fatal flag is set, or if it is even ever set
if status.fatal:
errStr = "XROOTStorage.__getSingleMetadata: Completely failed to get path metadata."
self.log.fatal( errStr, "%s %s" % ( self.name, status.message ) )
return S_ERROR( errStr )
elif status.error:
# errno 3011 corresponds to the file not existing
if status.errno == 3011:
errStr = "XROOTStorage.__getSingleMetadata: Path does not exist"
else:
errStr = "XROOTStorage.__getSingleMetadata: Error in querying: %s" % status.message
self.log.debug( errStr )
return S_OK( S_ERROR( errStr ) )
return S_ERROR( "XROOTStorage.__getSingeFileMetadata : reached end of method. Should not happen" )
def __getSingleFileMetadata( self, path ):
""" Fetch the metadata associated to the file
:param self: self reference
:param path: path (only 1) on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (MetadataDict)) if we could get the metadata
S_OK (S_ERROR (errorMsg)) if there was a problem getting the metadata or if it is not a file
"""
res = self.__getSingleMetadata( path )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
if not res['OK']:
return S_OK( res )
metadataDic = res['Value']
# If it is not a file
if not metadataDic['File']:
errStr = "XROOTStorage.__getSingleFileMetadata: Supplied path is not a file."
self.log.error( errStr, path )
return S_OK( S_ERROR( errStr ) )
return S_OK( S_OK( metadataDic ) )
def __parseStatInfoFromApiOutput( self, statInfo ):
""" Split the content of the statInfo object into a dictionary
:param self: self reference
:param statInfo: XRootD.client.responses.StatInfo returned by the API
:returns: a dictionary. List of keys :
ModTime (str)
ModTimeStr (str)
Id (int)
Size (int)
Executable (bool)
Directory (bool)
Other (bool)
File (bool)
Offline (bool)
PoscPending (bool)
Readable (bool)
Writable (bool)
"""
metadataDict = {'File' : False, 'Directory' : False}
metadataDict['ModTime'] = statInfo.modtime
metadataDict['ModTimeStr'] = statInfo.modtimestr
metadataDict['Id'] = statInfo.id
metadataDict['Size'] = statInfo.size
statFlags = statInfo.flags
metadataDict['Executable'] = bool( statFlags & StatInfoFlags.X_BIT_SET )
metadataDict['Directory'] = bool( statFlags & StatInfoFlags.IS_DIR )
metadataDict['Other'] = bool( statFlags & StatInfoFlags.OTHER )
metadataDict['File'] = ( not metadataDict['Other'] and not metadataDict['Directory'] )
metadataDict['Offline'] = bool( statFlags & StatInfoFlags.OFFLINE )
metadataDict['PoscPending'] = bool( statFlags & StatInfoFlags.POSC_PENDING )
metadataDict['Readable'] = bool( statFlags & StatInfoFlags.IS_READABLE )
metadataDict['Writable'] = bool( statFlags & StatInfoFlags.IS_WRITABLE )
return metadataDict
def getFileSize( self, path ):
"""Get the physical size of the given file
:param self: self reference
:param path: path (or list of path) on storage (pfn : root://...)
:returns Successful dict {path : size}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for url in urls:
res = self.__getSingleFileSize( url )
# if there is a fatal error getting the size
if not res['OK']:
errStr = "XROOTStorage.getFileSize: Completely failed to get file size."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
# There was no fatal error, so we see if we could get the size
res = res['Value']
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getSingleFileSize( self, path ):
"""Get the physical size of the given file
:param self: self reference
:param path: single path on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (size)) if we could get the size
S_OK (S_ERROR (errorMsg)) if there was a problem geting the size
"""
# We fetch all the metadata
res = self.__getSingleFileMetadata( path )
# If there was a fatal error
if not res['OK']:
errStr = "XROOTStorage.__getSingleFileSize: Completely failed to get file size."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
# No fatal error, so we check if the api called succeded
res = res['Value']
# We could not get the metadata
if not res['OK']:
return S_OK( S_ERROR( res['Message'] ) )
else:
return S_OK( S_OK( res['Value']['Size'] ) )
def getTransportURL( self, path, protocols = False ):
""" obtain the tURLs for the supplied path and protocols
:param self: self reference
:param str path: path on storage (pfn : root://...)
:param mixed protocols: protocols to use (must be or include 'root')
:returns Successful dict {path : path}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
if protocols:
if type( protocols ) is StringType:
if protocols != self.protocol:
return S_ERROR( "getTransportURL: Must supply desired protocols to this plug-in (%s)." % self.protocol )
elif type( protocols ) is ListType:
if self.protocol not in protocols:
return S_ERROR( "getTransportURL: Must supply desired protocols to this plug-in (%s)." % self.protocol )
# For the time being, I assume I should not check whether the file exists or not
# So I just return the list of urls keys
successful = dict( [rootUrl, rootUrl] for rootUrl in urls )
failed = {}
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
##################################################################
#
# DO NOT REALLY MAKE SENSE FOR XROOT
#
##################################################################
def prestageFile( self, *parms, **kws ):
""" Issue prestage request for file
"""
return S_ERROR( "XROOTStorage.prestageFile: not implemented!" )
def prestageFileStatus( self, *parms, **kws ):
""" Obtain the status of the prestage request
"""
return S_ERROR( "XROOTStorage.prestageFile: not implemented!" )
def pinFile( self, *parms, **kws ):
""" Pin the file on the destination storage element
"""
return S_ERROR( "XROOTStorage.prestageFile: not implemented!" )
def releaseFile( self, *parms, **kws ):
""" Release the file on the destination storage element
"""
return S_ERROR( "XROOTStorage.prestageFile: not implemented!" )
#
##################################################################
#############################################################
#
# These are the methods for directory manipulation
#
def isDirectory( self, path ):
"""Check if the given path exists and it is a directory
:param self: self reference
:param path: path (or list of path) on storage (pfn : root://...)
:returns: Successful dict {path : boolean}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.isDirectory: Determining whether %s paths are directories." % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__isSingleDirectory( url )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
if res['OK']:
successful[url] = res['Value']
else: # something went wrong with the query
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __isSingleDirectory( self, path ):
"""Check if the given path exists and it is a file
:param self: self reference
:param path: single path on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (boolean)) if it is a directory or not
S_OK (S_ERROR (errorMsg)) if there was a problem geting the info
We could have called 'not __isSingleFile', but since the API
offers Directory, File and Other, we don't take the risk
"""
return self.__getSingleMetadata( path, 'Directory' )
def getDirectory( self, path, localPath = False ):
"""Get locally a directory from the physical storage together with all its
files and subdirectories.
:param: path: path (or list of path) on storage (pfn : root://...)
:param: localPath: local path where to store what is downloaded
:return: successful and failed dictionaries. The keys are the pathes,
the values are dictionary {'Files': amount of files downloaded, 'Size': amount of data downloaded}
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.getDirectory: Attempting to get local copies of %s directories." % len( urls ) )
failed = {}
successful = {}
for src_dir in urls:
dirName = os.path.basename( src_dir )
if localPath:
dest_dir = "%s/%s" % ( localPath, dirName )
else:
# The other storage objects use os.getcwd(), I think it is a bug
# -> no, self.cwd is remote
dest_dir = "%s/%s" % ( os.getcwd(), dirName )
res = self.__getSingleDirectory( src_dir, dest_dir )
if res['OK']:
if res['Value']['AllGot']:
self.log.debug( "XROOTStorage.getDirectory: Successfully got local copy of %s" % src_dir )
successful[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "XROOTStorage.getDirectory: Failed to get entire directory.", src_dir )
failed[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "XROOTStorage.getDirectory: Completely failed to get local copy of directory.", src_dir )
failed[src_dir] = {'Files':0, 'Size':0}
return S_OK( {'Failed' : failed, 'Successful' : successful } )
def __getSingleDirectory( self, src_dir, dest_dir ):
"""Download a single directory recursively
:param self: self reference
:param src_dir : remote directory to download (root://...)
:param dest_dir: local destination path
:returns: S_ERROR if there is a fatal error
S_OK (statistics dictionary ) if we could download something :
'AllGot': boolean of whether we could download everything
'Files': amount of files received
'Size': amount of data received
"""
self.log.debug( "XROOTStorage.__getSingleDirectory: Attempting to download directory %s at %s" % ( src_dir, dest_dir ) )
filesReceived = 0 # counter for the amount of files received
sizeReceived = 0 # counter for the data size received
# Check the remote directory exists
res = self.__isSingleDirectory( src_dir )
if not res['OK']:
errStr = "XROOTStorage.__getSingleDirectory: Completely failed (fatal error) to find the supplied source directory."
self.log.error( errStr, src_dir )
return S_ERROR( errStr )
# No fatal error, nested return
res = res['Value']
if not res['OK']:
errStr = "XROOTStorage.__getSingleDirectory: Failed to find the supplied source directory."
self.log.error( errStr, src_dir )
return S_ERROR( errStr )
# res['Value'] is True if it is a directory
if not res['Value']:
errStr = "XROOTStorage.__getSingleDirectory: The supplied source is not a directory."
self.log.error( errStr, src_dir )
return S_ERROR( errStr )
# Check the local directory exists and create it if not
if not os.path.exists( dest_dir ):
try:
os.makedirs( dest_dir )
except OSError, error:
errStr = "XROOTStorage.__getSingleDirectory: Exception creation the destination directory %s" % error
self.log.exception( errStr )
return S_ERROR( errStr )
# Get the remote directory contents
res = self.__listSingleDirectory( src_dir )
if not res['OK']:
errStr = "XROOTStorage.__getSingleDirectory: Failed to list the source directory."
self.log.error( errStr, src_dir )
return S_ERROR( errStr )
sFilesDict = res['Value']['Files']
subDirsDict = res['Value']['SubDirs']
# First get all the files in the directory
receivedAllFiles = True
self.log.debug( "XROOTStorage.__getSingleDirectory: Trying to first download the %s files." % len( sFilesDict ) )
for sFile in sFilesDict:
# Returns S__OK(Filesize) if it worked
res = self.__getSingleFile( sFile, "/".join( [ dest_dir, os.path.basename( sFile ) ] ) )
if res['OK']:
filesReceived += 1
sizeReceived += res['Value']
else:
receivedAllFiles = False
# Then recursively get the sub directories
receivedAllDirs = True
self.log.debug( "XROOTStorage.__getSingleDirectory: Trying to recursively download the %s folder." % len( subDirsDict ) )
for subDir in subDirsDict:
subDirName = os.path.basename( subDir )
localPath = '%s/%s' % ( dest_dir, subDirName )
res = self.__getSingleDirectory( subDir, localPath )
if not res['OK']:
receivedAllDirs = False
if res['OK']:
if not res['Value']['AllGot']:
receivedAllDirs = False
filesReceived += res['Value']['Files']
sizeReceived += res['Value']['Size']
# Check whether all the operations were successful
if receivedAllDirs and receivedAllFiles:
allGot = True
else:
allGot = False
resDict = {'AllGot':allGot, 'Files':filesReceived, 'Size':sizeReceived}
return S_OK( resDict )
def putDirectory( self, path ):
""" puts a or several local directory to the physical storage together with all its files and subdirectories
:param self: self reference
:param str path: dictionnary {pfn (root://...) : local dir}
:return: successful and failed dictionaries. The keys are the pathes,
the values are dictionary {'Files': amount of files uploaded, 'Size': amount of data uploaded}
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
self.log.debug( "XROOTStorage.putDirectory: Attemping to put %s directories to remote storage." % len( urls ) )
for destDir, sourceDir in urls.items():
res = self.__putSingleDirectory( sourceDir, destDir )
if res['OK']:
if res['Value']['AllPut']:
self.log.debug( "XROOTStorage.putDirectory: Successfully put directory to remote storage: %s" % destDir )
successful[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "XROOTStorage.putDirectory: Failed to put entire directory to remote storage.", destDir )
failed[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "XROOTStorage.putDirectory: Completely failed to put directory to remote storage.", destDir )
failed[destDir] = { "Files" : 0, "Size" : 0 }
return S_OK( { "Failed" : failed, "Successful" : successful } )
def __putSingleDirectory( self, src_directory, dest_directory ):
""" puts one local directory to the physical storage together with all its files and subdirectories
:param self: self reference
:param src_directory : the local directory to copy
:param dest_directory: pfn (root://...) where to copy
:returns: S_ERROR if there is a fatal error
S_OK (statistics dictionary ) if we could upload something :
'AllPut': boolean of whether we could upload everything
'Files': amount of files uploaded
'Size': amount of data uploaded
"""
self.log.debug( "XROOTStorage.__putSingleDirectory: trying to upload %s to %s" % ( src_directory, dest_directory ) )
filesPut = 0
sizePut = 0
# Check the local directory exists
if not os.path.isdir( src_directory ):
errStr = "XROOTStorage.__putSingleDirectory: The supplied source directory does not exist or is not a directory."
self.log.error( errStr, src_directory )
return S_ERROR( errStr )
# Get the local directory contents
contents = os.listdir( src_directory )
allSuccessful = True
directoryFiles = {}
for fileName in contents:
self.log.debug( "FILENAME %s" % fileName )
localPath = '%s/%s' % ( src_directory, fileName )
remotePath = '%s/%s' % ( dest_directory, fileName )
if not os.path.isdir( localPath ):
directoryFiles[remotePath] = localPath
else:
res = self.__putSingleDirectory( localPath, remotePath )
if not res['OK']:
errStr = "XROOTStorage.__putSingleDirectory: Failed to put directory to storage."
self.log.error( errStr, res['Message'] )
else:
if not res['Value']['AllPut']:
allSuccessful = False
filesPut += res['Value']['Files']
sizePut += res['Value']['Size']
if directoryFiles:
res = self.putFile( directoryFiles )
if not res['OK']:
self.log.error( "XROOTStorage.__putSingleDirectory: Failed to put files to storage.", res['Message'] )
allSuccessful = False
else:
for fileSize in res['Value']['Successful'].itervalues():
filesPut += 1
sizePut += fileSize
if res['Value']['Failed']:
allSuccessful = False
return S_OK( { 'AllPut' : allSuccessful, 'Files' : filesPut, 'Size' : sizePut } )
def createDirectory( self, path ):
""" Make a/several new directory on the physical storage
This method creates all the intermediate directory
:param self: self reference
:param str path: path (or list of path) on storage (pfn : root://...)
:returns Successful dict {path : True}
Failed dict {path : error message }
"""
urls = checkArgumentFormat( path )
if not urls['OK']:
return urls
urls = urls['Value']
successful = {}
failed = {}
self.log.debug( "XROOTStorage.createDirectory: Attempting to create %s directories." % len( urls ) )
for url in urls:
res = self.__createSingleDirectory( url )
if res['OK']:
self.log.debug( "XROOTStorage.createDirectory: Successfully created directory on storage: %s" % url )
successful[url] = True
else:
self.log.error( "XROOTStorage.createDirectory: Failed to create directory on storage.",
"%s: %s" % ( url, res['Message'] ) )
failed[url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __createSingleDirectory( self, path ):
""" Make a new directory on the physical storage
This method creates all the intermediate directory
:param self: self reference
:param str path: single path on storage (pfn : root://...)
:returns S_OK() if all went well
S_ERROR(errMsg) in case of any problem
"""
self.log.debug( "XROOTStorage.__createSingleDirectory: Attempting to create directory %s." % path )
res = pfnparse( path )
if not res['OK']:
return res
pfnDict = res['Value']
xFilePath = '/'.join( [pfnDict['Path'], pfnDict['FileName']] )
status = self.xrootClient.mkdir( xFilePath, MkDirFlags.MAKEPATH )
if status.ok:
return S_OK()
else:
if status.fatal:
errMsg = "XROOTStorage.__createSingleDir : Completely failed to create directory"
else:
errMsg = "XROOTStorage.__createSingleDir : failed to create directory"
self.log.error( errMsg, status.message )
return S_ERROR( errMsg )
def removeDirectory( self, path, recursive = False ):
"""Remove a directory on the physical storage together with all its files and
subdirectories.
:param path : single or list of path (root://..)
:param recursive : if True, we recursively delete the subdir
:return: successful and failed dictionaries. The keys are the pathes,
the values are dictionary {'Files': amount of files deleted, 'Size': amount of data deleted}
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.removeDirectory: Attempting to remove %s directories." % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__removeSingleDirectory( url, recursive )
if res['OK']:
if res['Value']['AllRemoved']:
self.log.debug( "XROOTStorage.removeDirectory: Successfully removed %s" % url )
successful[url] = {'FilesRemoved':res['Value']['FilesRemoved'], 'SizeRemoved':res['Value']['SizeRemoved']}
else:
self.log.error( "XROOTStorage.removeDirectory: Failed to remove entire directory.", path )
failed[url] = {'FilesRemoved':res['Value']['FilesRemoved'], 'SizeRemoved':res['Value']['SizeRemoved']}
else:
self.log.error( "XROOTStorage.removeDirectory: Completely failed to remove directory.", url )
failed[url] = {'FilesRemoved':0, 'SizeRemoved':0}
return S_OK( {'Failed' : failed, 'Successful' : successful } )
def __removeSingleDirectory( self, path, recursive = False ):
"""Remove a directory on the physical storage together with all its files and
subdirectories.
:param path: pfn (root://...) of a directory to remove
:param recursive : if True, we recursively delete the subdir
:returns: S_ERROR if there is a fatal error
S_OK (statistics dictionary ) if we could upload something :
'AllRemoved': boolean of whether we could delete everything
'FilesRemoved': amount of files deleted
'SizeRemoved': amount of data deleted
"""
filesRemoved = 0
sizeRemoved = 0
# Check the remote directory exists
res = self.__isSingleDirectory( path )
if not res['OK']:
errStr = "XROOTStorage.__removeSingleDirectory: Completely failed (fatal error) to find the directory."
self.log.error( errStr, path )
return S_ERROR( errStr )
# No fatal error, nested return
res = res['Value']
if not res['OK']:
errStr = "XROOTStorage.__removeSingleDirectory: Failed to find the directory."
self.log.error( errStr, path )
return S_ERROR( errStr )
# res['Value'] is True if it is a directory
if not res['Value']:
errStr = "XROOTStorage.__removeSingleDirectory: The supplied path is not a directory."
self.log.error( errStr, path )
return S_ERROR( errStr )
# Get the remote directory contents
res = self.__listSingleDirectory( path )
if not res['OK']:
errStr = "XROOTStorage.__removeSingleDirectory: Failed to list the directory."
self.log.error( errStr, path )
return S_ERROR( errStr )
sFilesDict = res['Value']['Files']
subDirsDict = res['Value']['SubDirs']
removedAllFiles = True
removedAllDirs = True
allRemoved = True
# if recursive, we call ourselves on all the subdirs
if recursive:
# Recursively remove the sub directories
self.log.debug( "XROOTStorage.__removeSingleDirectory: Trying to recursively remove %s folder." % len( subDirsDict ) )
for subDir in subDirsDict:
subDirName = os.path.basename( subDir )
localPath = '%s/%s' % ( path, subDirName )
res = self.__removeSingleDirectory( localPath, recursive ) # recursive should be true..
if not res['OK']:
removedAllDirs = False
if res['OK']:
if not res['Value']['AllRemoved']:
removedAllDirs = False
filesRemoved += res['Value']['FilesRemoved']
sizeRemoved += res['Value']['SizeRemoved']
# Remove all the files in the directory
self.log.debug( "XROOTStorage.__removeSingleDirectory: Trying to remove %s files." % len( sFilesDict ) )
for sFile in sFilesDict:
# Returns S__OK(Filesize) if it worked
res = self.__removeSingleFile( sFile )
if not res['OK']:
return res
# Nothing fatal, nested structure
res = res['Value']
if res['OK']:
filesRemoved += 1
sizeRemoved += sFilesDict[sFile]['Size']
else:
removedAllFiles = False
# Check whether all the operations were successful
if removedAllDirs and removedAllFiles:
allRemoved = True
else:
allRemoved = False
# Now I try to remove the directory itself
# We do it only if :
# - we go recursive, and everything was deleted
# - we don't go recursive, but we deleted all files and there are no subfolders
if ( recursive and allRemoved ) or ( not recursive and removedAllFiles and ( len( subDirsDict ) == 0 ) ):
res = pfnparse( path )
if not res['OK']:
return res
pfnDict = res['Value']
xFilePath = '/'.join( [pfnDict['Path'], pfnDict['FileName']] )
status = self.xrootClient.rmdir( xFilePath )
# For some reason, the rmdir method returns a tuple (status,None)
status = status[0]
if not status.ok:
if status.errno == 3011:
errStr = "XROOTStorage.__removeSingleDirectory: File does not exist"
self.log.debug( errStr )
else:
errStr = "XROOTStorage.__removeSingleDirectory: Error in querying: %s" % status.message
self.log.debug( errStr )
allRemoved = False
resDict = {'AllRemoved': allRemoved, 'FilesRemoved': filesRemoved, 'SizeRemoved': sizeRemoved}
return S_OK( resDict )
def listDirectory( self, path ):
""" List the supplied path
CAUTION : It is not recursive!
:param path : single or list of path (root://..)
:return: successful and failed dictionaries. The keys are the pathes,
the values are dictionary 'SubDirs' and 'Files'. Each are dictionaries with
path as key and metadata as values (for Files only, SubDirs has just True as value)
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.listDirectory: Attempting to list %s directories." % len( urls ) )
res = self.isDirectory( urls )
if not res['OK']:
return res
successful = {}
failed = res['Value']['Failed']
directories = []
for url, isDirectory in res['Value']['Successful'].items():
if isDirectory:
directories.append( url )
else:
errStr = "XROOTStorage.listDirectory: path is not a directory."
gLogger.error( errStr, url )
failed[url] = errStr
for directory in directories:
res = self.__listSingleDirectory( directory )
if not res['OK']:
failed[directory] = res['Message']
continue
successful[directory] = res['Value']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __listSingleDirectory( self, path ):
"""List the content of a single directory, NOT RECURSIVE
:param self: self reference
:param path: single path on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is an error (fatal or not)
S_OK (dictionary)) The dictionnary has 2 keys : SubDirs and Files
The values of Files are dictionary with Filename as key and metadata as value
The values of SubDirs are just Dirname as key and True as value
"""
res = pfnparse( path )
if not res['OK']:
return res
self.log.debug( "XROOTStorage.__listSingleDirectory: Attempting to list directory %s." % path )
pfnDict = res['Value']
xFilePath = '/'.join( [pfnDict['Path'], pfnDict['FileName']] )
status, listing = self.xrootClient.dirlist( xFilePath, DirListFlags.STAT )
if not status.ok:
errorMsg = "XROOTStorage.__listSingleDirectory : could not list the directory content"
self.log.error( errorMsg, status.message )
return S_ERROR ( errorMsg )
files = {}
subDirs = {}
for entry in listing:
fullPath = "root://%s%s%s" % ( self.host, xFilePath, entry.name )
metadataDict = self.__parseStatInfoFromApiOutput( entry.statinfo )
if metadataDict['Directory']:
subDirs[fullPath] = True
continue
elif metadataDict['File']:
files[fullPath] = metadataDict
else: # This "other", whatever that is
self.log.debug( "XROOTStorage.__listSingleDirectory : found an item which is not a file nor a directory", fullPath )
return S_OK( {'SubDirs' : subDirs, 'Files' : files } )
def __getSingleDirectoryMetadata( self, path ):
""" Fetch the metadata associated to the directory
:param self: self reference
:param path: path (only 1) on storage (pfn : root://...)
:returns: A 2 level nested S_ structure :
S_ERROR if there is a fatal error
S_OK (S_OK (MetadataDict)) if we could get the metadata
S_OK (S_ERROR (errorMsg)) if there was a problem getting the metadata or if it is not a directory
"""
self.log.debug( "XROOTStorage.__getSingleDirectoryMetadata: Fetching metadata of directory %s." % path )
res = self.__getSingleMetadata( path )
if not res['OK']:
return res
# No fatal error, nested structure
res = res['Value']
if not res['OK']:
return S_OK( res )
metadataDic = res['Value']
# If it is not a file
if not metadataDic['Directory']:
errStr = "XROOTStorage.__getSingleDirectoryMetadata: Supplied path is not a directory."
self.log.error( errStr, path )
return S_OK( S_ERROR( errStr ) )
return S_OK( S_OK( metadataDic ) )
def getDirectoryMetadata( self, path ):
""" Get metadata associated to the directory(ies)
:param self: self reference
:param path: path (or list of path) on storage (pfn : root://...)
:returns Successful dict {path : metadata}
Failed dict {path : error message }
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.getDirectoryMetadata: Attempting to fetch metadata of %s directories." % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self.__getSingleDirectoryMetadata( url )
if not res['OK']:
errStr = "XROOTStorage.getDirectoryMetadata: Completely failed to get path metadata."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
# There were no fatal errors, so now we see if there were any other errors
res = res['Value']
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getSingleDirectorySize( self, path ):
""" Get the size of the directory on the storage
CAUTION : the size is not recursive, and does not go into subfolders
:param self: self reference
:param path: path (single) on storage (pfn : root://...)
:return: S_ERROR in case of problem
S_OK (Dictionary) Files : amount of files in the directory
Size : summed up size of files
subDirs : amount of sub directories
"""
self.log.debug( "XROOTStorage.__getSingleDirectorySize: Attempting to get the size of directory %s" % path )
res = self.__listSingleDirectory( path )
if not res['OK']:
return res
directorySize = 0
directoryFiles = 0
# itervalues returns a list of values of the dictionnary
for fileDict in res['Value']['Files'].itervalues():
directorySize += fileDict['Size']
directoryFiles += 1
self.log.debug( "XROOTStorage.__getSingleDirectorySize: Successfully obtained size of %s." % path )
subDirectories = len( res['Value']['SubDirs'] )
return S_OK( { 'Files' : directoryFiles, 'Size' : directorySize, 'SubDirs' : subDirectories } )
def getDirectorySize( self, path ):
""" Get the size of the directory on the storage
CAUTION : the size is not recursive, and does not go into subfolders
:param self: self reference
:param path: path (or list of path) on storage (pfn : root://...)
:returns: list of successfull and failed dictionnary, both indexed by the path
In the failed, the value is the error message
In the successful the values are dictionnaries : Files : amount of files in the directory
Size : summed up size of files
subDirs : amount of sub directories
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "XROOTStorage.getDirectorySize: Attempting to get size of %s directories." % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self.__getSingleDirectorySize( url )
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
#############################################################
#
# These are the methods for manipulating the client
#
##################################################################
#
# ALL INHERITED FROM StorageBase.py
#
##################################################################
# def isOK( self ):
# return self.isok
#
# def changeDirectory( self, newdir ):
# """ Change the current directory
# """
# self.cwd = newdir
# return S_OK()
#
# def getCurrentDirectory( self ):
# """ Get the current directory
# """
# return S_OK( self.cwd )
#
# def getName( self ):
# """ The name with which the storage was instantiated
# """
# return S_OK( self.name )
#
# def setParameters( self, parameters ):
# """ Set extra storage parameters, non-mandatory method
# """
# return S_OK()
##################################################################
def getParameters( self ):
""" This gets all the storage specific parameters pass when instantiating the storage
:returns Dictionary with keys : StorageName, ProtocolName, Protocol, Host, Path, Port, SpaceToken, WSUrl
"""
parameterDict = {}
parameterDict['StorageName'] = self.name
parameterDict['ProtocolName'] = self.protocolName
parameterDict['Protocol'] = self.protocol
parameterDict['Host'] = self.host
parameterDict['Path'] = self.rootdir # I don't know why it's called rootdir in the baseclass and Path here...
parameterDict['Port'] = self.port
parameterDict['SpaceToken'] = self.spaceToken
parameterDict['WSUrl'] = self.wspath
return S_OK( parameterDict )
def getProtocolPfn( self, pfnDict, withPort ):
""" Get the PFN for the protocol with or without the port
:param self:
:param pfnDict: dictionary where the keys/values are the different part of the surl
:param bool withPort: include port information
:returns S_OK(pfn)
"""
pfnDict['Protocol'] = self.protocol
pfnDict['Host'] = self.host
if not pfnDict['Path'].startswith( self.rootdir ):
pfnDict['Path'] = os.path.join( self.rootdir, pfnDict['Path'].rstrip( '/' ) )
# These lines should be checked
if withPort:
pfnDict['Port'] = self.port
# pfnDict['WSUrl'] = self.wspath
###################3
# pfnunparse does not take into account the double // so I have to trick it
# The problem is that I cannot take into account the port, which is always empty (it seems..)
return S_OK( 'root://%s%s/%s' % ( self.host, pfnDict['Path'], pfnDict['FileName'] ) )
def getCurrentURL( self, fileName ):
""" Create the full URL for the storage using the configuration, self.cwd and the fileName
:param fileName : name of the file for which we want the URL
:returns full URL
"""
if fileName:
if fileName[0] == '/':
fileName = fileName.lstrip( '/' )
try:
fullUrl = "%s://%s/%s/%s" % ( self.protocol, self.host, self.cwd, fileName )
fullUrl = fullUrl.rstrip( "/" )
return S_OK( fullUrl )
except TypeError, error:
return S_ERROR( "Failed to create URL %s" % error )
def getPFNBase( self, withPort = False ):
""" This will get the pfn base. This is then appended with the LFN in DIRAC convention.
:param self: self reference
:param bool withPort: flag to include port
:returns PFN
"""
return S_OK( { True : 'root://%s:%s/%s' % ( self.host, self.port, self.rootdir ),
False : 'root://%s/%s' % ( self.host, self.rootdir ) }[withPort] )
|
sposs/DIRAC
|
Resources/Storage/XROOTStorage.py
|
Python
|
gpl-3.0
| 60,297
|
[
"DIRAC"
] |
a5d03f08f544e5dda23c9334429854231f2b9abb0f80b9e1b854f59cfbc2a964
|
import sys
import numpy as np
import datetime
from collections import defaultdict
import os
# from sklearn.metrics import confusion_matrix
import glob
import keras
from Bio import pairwise2
import _pickle as cPickle
import copy
from ..features.helpers import scale_clean, scale_clean_two
from .helper import lrd
import keras.backend as K
def print_stats(o):
stats = defaultdict(int)
for x in o:
stats[x] += 1
print(stats)
def flatten2(x):
return x.reshape((x.shape[0] * x.shape[1], -1))
def find_closest(start, Index, factor=3.5):
# Return the first element != N which correspond to the index of seqs
start_index = min(int(start / factor), len(Index) - 1)
# print(start,start_index,Index[start_index])
if Index[start_index] >= start:
while start_index >= 0 and Index[start_index] >= start:
start_index -= 1
return max(0, start_index)
if Index[start_index] < start:
while start_index <= len(Index) - 1 and Index[start_index] < start:
start_index += 1
if start_index <= len(Index) - 1 and start_index > 0:
if abs(Index[start_index] - start) > abs(Index[start_index - 1] - start):
start_index -= 1
# print(start_index,Index[start_index])
# print(start_index,min(start_index,len(Index)-1),Index[min(start_index,len(Index)-1)])
return min(start_index, len(Index) - 1)
def get_segment(alignment, start_index_on_seqs, end_index_on_seqs):
s1, s2 = alignment
count = 0
# print(s1,s2)
startf = False
end = None
# found_end =
for N, (c1, c2) in enumerate(zip(s1, s2)):
# print(count)
if count == start_index_on_seqs and not startf:
start = 0 + N
startf = True
if count == end_index_on_seqs + 1:
end = 0 + N
break
if c2 != "-":
count += 1
# print(start,end)
if not startf:
return "", "", "", 0
return s1[start:end].replace("-", ""), s1[start:end], s2[start:end], 1
def realignment():
ntwk.save_weights(os.path.join(
args.root, 'tmp.h5'))
predictor.load_weights(os.path.join(
args.root, 'tmp.h5'))
# predictor.load_weights("data/training/my_model_weights-1990.h5")
print("Realign")
New_seq = []
change = 0
old_length = 0
new_length = 0
total_length = 0
current_length = 0
switch = 0
for s in range(len(data_x)):
new_seq = np.argmax(predictor.predict(np.array([data_x[s]]))[0], axis=-1)
# print(args.Nbases)
if args.Nbases == 8:
alph = "ACGTBLEIN" # use T to Align
if args.Nbases == 5:
alph = "ACGTBN" # use T to Align
if args.Nbases == 4:
alph = "ACGTN"
New_seq.append("".join(list(map(lambda x: alph[x], new_seq))))
nc = {}
for l in ["B", "L", "E", "I", "T"]:
nc[l] = New_seq[-1].count(l)
for l in ["B", "L", "E", "I"]:
New_seq[-1] = New_seq[-1].replace(l, "T")
# Here maybe realign with bwa
# for s in range(len(data_x)):
type_sub = "T"
subts = False
ref = "" + refs[s]
for l in ["B", "L", "E", "I"]:
if l in refs[s]:
type_sub = l
subts = True
break
if subts:
ref = ref.replace(type_sub, "T")
re_align = True
if re_align:
old_align = data_alignment[s]
# new_align = pairwise2.align.globalxx(ref, New_seq[s].replace("N", ""))[0][:2]
new_align = pairwise2.align.globalxx(ref, New_seq[s].replace("N", ""))
if len(new_align) == 0 or len(new_align[0]) < 2:
new_length += len(old_align[0])
print()
continue
new_align = new_align[0][:2]
print("Old", len(old_align[0]), "New", len(new_align[0]), subts, len(
ref), (len(ref) - len(New_seq[s].replace("N", ""))) / len(ref), nc[type_sub] / (nc["T"] + 1))
old_length += len(old_align[0])
total_length += len(ref)
current_length += len(New_seq[s].replace("N", ""))
if len(new_align[0]) < len(old_align[0]) and (len(ref) - len(New_seq[s].replace("N", ""))) / len(ref) < 0.05:
print("Keep!")
change += 1
data_alignment[s] = new_align
data_index[s] = np.arange(len(New_seq[s]))[
np.array([ss for ss in New_seq[s]]) != "N"]
new_length += len(new_align[0])
else:
new_length += len(old_align[0])
print()
if subts and nc[type_sub] / (nc["T"] + nc[type_sub]) < 0.2:
if args.force_clean and type_sub != "B":
continue
refs[s] = refs[s].replace(type_sub, "T")
switch += 1
print("Swich")
print("Change", change, len(data_x))
with open(os.path.join(
args.root, "Allignements-bis-%i" % epoch), "wb") as f:
cPickle.dump([data_x, data_index,
data_alignment, refs, names], f)
with open(log_total_length, "a") as f:
f.writelines("%i,%i,%i,%i,%i,%i,%i\n" %
(epoch, old_length, new_length, total_length, current_length, change, switch))
# print "out", np.min(out_gc), np.median(out_gc), np.max(out_gc), len(out_gc)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--Nbases', type=int, choices=[4, 5, 8], default=4)
parser.add_argument('--root', type=str, default="data/training/")
parser.add_argument('--test', dest='test', action='store_true')
parser.add_argument('--size', type=int, default=20)
parser.add_argument('directories', type=str, nargs='*')
parser.add_argument('--from-pre-trained', dest='from_pre_trained', action='store_true')
parser.add_argument('--pre-trained-weight', dest='pre_trained_weight', type=str, default=None)
parser.add_argument('--pre-trained-dir-list', dest='pre_trained_dir_list', type=str)
parser.add_argument('--deltaseq', dest='deltaseq', type=int, default=10)
parser.add_argument('--forcelength', dest='forcelength', type=float, default=0.5)
parser.add_argument('--oversampleb', dest='oversampleb', type=int, default=3)
parser.add_argument('--ref-from-file', dest="ref_from_file", type=bool, default=False)
parser.add_argument('--select-agree', dest="select_agree", action="store_true")
parser.add_argument('--max-file', dest="max_file", type=int, default=None)
parser.add_argument('--ctc', dest='ctc', action="store_true")
parser.add_argument('--convert-to-t', dest='convert_to_t', type=float, default=None)
parser.add_argument('--n-input', dest="n_input", type=int, default=1)
parser.add_argument('--n-output', dest="n_output", type=int, default=1)
parser.add_argument('--n-output-network', dest="n_output_network", type=int, default=1)
parser.add_argument('--f-size', nargs='+', dest="f_size", type=int, default=None)
parser.add_argument('--skip-new', dest="skip_new", action="store_true")
parser.add_argument('--force-clean', dest="force_clean", action="store_true")
parser.add_argument('--filter', nargs='+', dest="filter", type=str, default=[])
parser.add_argument('--ctc-length', dest="ctc_length", type=int, default=20)
parser.add_argument('--normalize-window-length', dest="nwl", action="store_true")
parser.add_argument('--attention', dest="attention", action="store_true")
parser.add_argument('--residual', dest="res", action="store_true")
parser.add_argument('--all-file', dest="allignment_file", default="Allignements-bis")
parser.add_argument('--fraction', dest="fraction", type=float, default=None)
parser.add_argument('--fractions', nargs='+', dest="fractions", type=float, default=[])
parser.add_argument('--include-short', dest="include_short", action="store_true")
parser.add_argument('--old', dest="old", action="store_true")
parser.add_argument('--clean', dest="clean", action="store_true")
args = parser.parse_args()
if args.allignment_file == "Allignements-bis":
allignment_file = os.path.join(args.root, "Allignements-bis")
else:
allignment_file = args.allignment_file
print(args.filter)
data_x = []
data_original = []
data_y = []
data_y2 = []
data_index = []
data_alignment = []
refs = []
names = []
convert = []
log_total_length = os.path.join(args.root, "total_length.log")
if keras.backend.backend() != 'tensorflow':
print("Must use tensorflow to train")
exit()
mapping = {"A": 0, "C": 1, "G": 2, "T": 3, "B": 4, "L": 5, "E": 6, "I": 7, "N": 8} # Modif
n_classes = len(mapping.keys())
n_output_network = args.n_output_network
n_output = args.n_output
n_input = args.n_input
subseq_size = args.ctc_length
from .model import build_models
ctc_length = subseq_size
input_length = None
if n_output_network == 2:
input_length = subseq_size
ctc_length = 2 * subseq_size
n_feat = 4
if args.clean:
n_feat = 3
if args.Nbases == 8:
old_predictor, old_ntwk = build_models(
args.size, nbase=1, ctc_length=ctc_length, input_length=input_length, n_output=n_output_network, n_feat=n_feat)
os.makedirs(args.root, exist_ok=True)
end = None
if args.test:
end = 80
if not args.from_pre_trained:
list_files = []
for folder in args.directories:
fiches = glob.glob(folder + "/*")
fiches.sort()
list_files += fiches[:args.max_file]
list_files.sort()
for fn in list_files[:end]:
f = open(fn)
ref = f.readline()
ref = ref.replace("\n", "")
if len(ref) > 30000:
print("out", len(ref))
continue
X = []
Y = []
seq = []
for l in f:
its = l.strip().split()
X.append(list(map(float, its[:-1])))
if n_output == 2:
Y.append(mapping[its[-1][0]])
Y.append(mapping[its[-1][1]])
else:
Y.append(mapping[its[-1][1]])
if n_input == 2:
X.append(list(map(float, its[:-1])))
seq.append(its[-1])
if len(X) < subseq_size:
print("out (too small (to include must set a smaller subseq_size))", fn)
continue
refs.append(ref.strip())
names.append(fn)
data_x.append(np.array(X, dtype=np.float32))
data_y.append(np.array(Y, dtype=np.int32))
if args.convert_to_t:
p = np.sum(data_y[-1] == 5) / len(Y)
if p > args.convert_to_t:
print(np.sum(data_y[-1] == mapping["B"]))
data_y[-1][data_y[-1] == mapping["B"]] = mapping["T"]
print(np.sum(data_y[-1] == mapping["B"]))
print("Converted")
print(fn, np.sum(data_y[-1] == 5) / len(Y))
# print(data_y2[-1][:20])
# print(data_y[-1][:20])
if args.ctc:
on_ref = False
if on_ref:
seq = "".join(seq)
# print(seq)
seq = seq[1::2]
# print(seq)
data_index.append(np.arange(len(seq))[np.array([s for s in seq]) != "N"])
seqs = seq.replace("N", "")
alignments = pairwise2.align.globalxx(ref, seqs)
data_alignment.append(alignments[0][:2])
# print(len(seqs), len(ref))
print(len(alignments[0][0]), len(ref), len(seqs), alignments[0][2:])
else:
seq = "".join(seq)
if n_output == 1:
seq = seq[1::2]
# print(seq)
# print(seq)
data_index.append(np.arange(len(seq))[np.array([s for s in seq]) != "N"])
seqs = seq.replace("N", "")
data_alignment.append([seqs, seqs])
if not args.ctc:
with open(os.path.join(args.root, "Allignements-bis"), "wb") as f:
cPickle.dump([data_x, data_y, data_y2, refs, names], f)
else:
with open(os.path.join(args.root, "Allignements-bis"), "wb") as f:
cPickle.dump([data_x, data_index, data_alignment, refs, names], f)
else:
predictor, ntwk = build_models(args.size, nbase=args.Nbases - 4,
ctc_length=ctc_length,
input_length=input_length, n_output=n_output_network,
res=args.res, attention=args.attention, n_feat=n_feat)
ntwk.load_weights(args.pre_trained_weight)
predictor.load_weights(args.pre_trained_weight)
from ..features.extract_events import extract_events, scale
import h5py
import subprocess
from ..features.bwa_tools import get_seq
end = None
if args.test:
end = 10
with open(args.pre_trained_dir_list, "r") as f:
idirect = 0
for iline, line in enumerate(f.readlines()):
print(line)
if not args.ref_from_file:
if len(line.split()) not in [2, 3]:
print("Skipping ", line)
continue
if len(line.split()) == 2:
direct, type_sub = line.split()
else:
direct, type_sub, ref_file = line.split()
else:
if len(line.split()) != 3:
print("Skipping ", line)
continue
direct, type_sub, ref_file = line.split()
idirect += 1
sub = None
type_sub = type_sub.strip()
if type_sub != "T":
sub = type_sub
if sub not in mapping:
raise "Invalid substitution"
all_files = glob.glob(direct + "/*")
for ifilename, filename in enumerate(all_files):
print(filename)
if args.max_file is not None and ifilename > args.max_file:
continue
if args.fraction is not None and ifilename / len(all_files) > args.fraction:
break
if args.fractions is not None and len(args.fractions) == 2:
tmp_frac = ifilename / len(all_files)
if not(tmp_frac > args.fractions[0] and tmp_frac < args.fractions[1]):
continue
try:
h5 = h5py.File(filename, "r")
except OSError:
print("Invalid file")
if args.f_size is not None:
events = extract_events(h5, "rf", window_size=args.f_size[
iline], old=args.old)
else:
events = extract_events(h5, "r9.5")
if events is None:
print("No events in file %s" % filename)
h5.close()
continue
if not args.include_short and len(events) < 300:
print("Read %s too short, not basecalling" % filename)
h5.close()
continue
# print(len(events))
if args.test and len(events) > 2500:
print("Skip test")
continue
if args.test and len(data_x) > (iline + 1) * 10:
break
events = events[1:-1]
if len(events) > 40000:
events = events[:40000]
mean = events["mean"]
std = events["stdv"]
length = events["length"]
Original = np.array(
np.vstack([mean, mean * mean, std, length]).T, dtype=np.float32)
if not args.clean:
x = scale(Original)
else:
x = scale_clean_two(Original)
o1 = predictor.predict(np.array(x)[np.newaxis, ::, ::])
# print("New", o1[0].shape)
# print("Old", o1[0].shape)
o1 = o1[0]
om = np.argmax(o1, axis=-1)
conv = False
percent = None
if sub is not None:
oml = om.tolist()
percent = oml.count(
mapping[sub]) / (oml.count(mapping["T"]) +
oml.count(mapping["B"]) +
oml.count(mapping["I"]) +
oml.count(mapping["E"]) +
oml.count(mapping["I"]) + 0.05)
if args.force_clean and percent < 0.1:
conv = True
alph = "ACGTN"
if args.Nbases in [5, 8]:
alph = "ACGTTN"
if args.Nbases == 8:
alph = "ACGTTTTTN"
seq = "".join(map(lambda x: alph[x], om))
seqs = seq.replace("N", "")
print(seqs)
# write fasta
with open(args.root + "/tmp.fasta", "w") as output_file:
output_file.writelines(">%s_template_deepnano\n" % filename)
output_file.writelines(seqs + "\n")
# execute bwa
if not args.ref_from_file or args.select_agree:
ref = "data/external/ref/S288C_reference_sequence_R64-2-1_20150113.fa"
exex = "bwa mem -x ont2d %s %s/tmp.fasta > %s/tmp.sam" % (
ref, args.root, args.root)
subprocess.call(exex, shell=True)
# read from bwa
ref, succes, X1, P1 = get_seq(
args.root + "/tmp.sam", ref="data/external/ref/S288C_reference_sequence_R64-2-1_20150113.fa", ret_pos=True)
if not succes:
continue
if args.ref_from_file or args.select_agree:
k = filename.split("/")[-1]
read, ch = k.split("_")[9], k.split("_")[11]
succes = False
Ref = []
with open(ref_file, "r") as f:
for line in f.readlines():
sp = line.split()
if len(sp) > 1 and sp[0].startswith("@ch"):
kp = sp[0].split("/")[-1]
chp = kp.split("_")[0][3:]
readp = kp.split("_")[1][4:]
if read == readp and ch == chp:
print(k, kp)
if sp[2] == '*' or "chr" not in sp[2]:
continue
X2 = int(sp[2][3:])
P2 = int(sp[3])
ref = sp[9]
Ref.append(["" + ref, X2, P2])
succes = True
# break
if succes:
if not args.select_agree:
ref = list(sorted(Ref, key=lambda x: len(x[0])))[-1][0]
print([len(iRef[0]) for iRef in Ref])
print(len(ref), len(seqs))
else:
found = False
for seq2, X2, P2 in Ref:
if X1 == X2 and abs(P1 - P2) < 5000:
found = True
print("Agreee")
if not found:
continue
else:
continue
if abs(len(ref) - len(seqs)) > 1000:
succes = False
if not succes:
continue
if args.test:
print(len(data_x), "LEN")
if len(ref) > 2000 or len(seqs) > 2000:
continue
if len(data_x) > 20 * idirect:
break
# if len(ref) > 30000:
# print("out", len(ref))
# continue
bio = True
if not succes:
continue
if bio:
delta = np.abs(len(ref) - len(seq.replace("N", ""))) / len(ref)
if delta > 0.15:
print("Delta too large", delta)
continue
alignments = pairwise2.align.globalxx(
ref, seqs, one_alignment_only=True)
# print("la", len(alignments), len(alignments[0]))
if len(alignments) > 0 and len(alignments[0]) >= 2:
names.append(filename)
data_original.append(Original)
data_x.append(x)
data_index.append(np.arange(len(seq))[
np.array([s for s in seq]) != "N"])
data_alignment.append(alignments[0][:2])
if sub is not None and not conv:
ref = ref.replace("T", sub)
convert.append([conv, sub, percent, delta])
# print(ref)
refs.append(ref)
# print(len(seqs), len(ref))
print(len(alignments[0][0]), len(ref), len(seqs), alignments[0][2:])
else:
start, end, seq_all, ref_all, success = get_al(seq, ref)
if not success:
continue
nb = 0
for istart, iseq in enumerate(seq):
if iseq != "N":
nb += 1
if nb == start:
break
if end is None:
end = 0
end = -end
for iend, iseq in enumerate(seq[::-1]):
if iseq != "N":
nb += 1
if nb == end:
break
data_x.append(x[istart:iend])
data_index.append(np.arange(len(seq[istart:iend]))[
np.array([s for s in seq[istart:iend]]) != "N"])
data_alignment.append([ref_all, seq_all])
if sub is not None:
ref_all = ref_all.replace("T", sub)
# print(ref)
refs.append(ref_all)
with open(os.path.join(args.root, "Allignements-bis"), "wb") as f:
cPickle.dump([data_original, convert, data_x,
data_index, data_alignment, refs, names], f)
#
# x_clean = scale_clean_two(
# np.array(np.vstack([mean, mean * mean, std, length]).T, dtype=np.float32))
#
# ntwk.load_weights("./my_model_weights.h5")
|
jeammimi/deepnano5bases
|
src/models/generate_training_data.py
|
Python
|
mit
| 24,803
|
[
"BWA"
] |
2b656d9031275a2301af1824fb4eb31d7af008bdf9917228ae741f3df58a5d70
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Optimize QED with different max number of steps per episodes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
from absl import app
from absl import flags
from rdkit import Chem
from rdkit.Chem import QED
from tensorflow.compat.v1 import gfile
from mol_dqn.chemgraph.mcts import deep_q_networks
from mol_dqn.chemgraph.mcts import molecules as molecules_mdp
from mol_dqn.chemgraph.mcts import run_dqn
from mol_dqn.chemgraph.tensorflow import core
flags.DEFINE_float('gamma', 0.999, 'discount')
flags.DEFINE_integer('max_steps_per_episode', 20, 'max_steps')
FLAGS = flags.FLAGS
class Molecule(molecules_mdp.Molecule):
"""QED reward Molecule."""
def _reward(self):
molecule = Chem.MolFromSmiles(self._state)
if molecule is None:
return 0.0
try:
qed = QED.qed(molecule)
except ValueError:
qed = 0
return qed * FLAGS.gamma**(self.max_steps - self._counter)
def main(argv):
del argv
if FLAGS.hparams is not None:
with gfile.Open(FLAGS.hparams, 'r') as f:
hparams = deep_q_networks.get_hparams(**json.load(f))
else:
hparams = deep_q_networks.get_hparams()
hparams.override_from_dict({
'max_steps_per_episode': FLAGS.max_steps_per_episode
})
environment = Molecule(
atom_types=set(hparams.atom_types),
init_mol=FLAGS.start_molecule,
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
run_dqn.run_training(
hparams=hparams,
environment=environment,
dqn=dqn,
)
core.write_hparams(hparams, os.path.join(FLAGS.model_dir, 'config.json'))
if __name__ == '__main__':
app.run(main)
|
google-research/google-research
|
mol_dqn/experimental/optimize_qed_max_steps.py
|
Python
|
apache-2.0
| 2,813
|
[
"RDKit"
] |
7b5658d48776d79226462b4a00c2b06792e65ae2e9e2f8151085da1d6c5a5a78
|
import os
import pickle
import random
import re
import sys
import time
import warnings
import webbrowser
import twitter # pip install python-twitter
import gender_guesser.detector as gender # pip install gender-guesser
from requests_oauthlib import OAuth1Session
from unidecode import unidecode # pip install unidecode
if os.path.exists("detector.pickle"):
detector = pickle.load(open("detector.pickle", "rb"))
else:
detector = gender.Detector(case_sensitive=False)
with open("detector.pickle", "wb+") as f:
pickle.dump(detector, f)
def split(s):
try:
return s.split()[0]
except IndexError:
return s
def rm_punctuation(s, _pat=re.compile(r"\W+")):
return _pat.sub(" ", s)
def make_pronoun_patterns():
for p, g in [
("non binary", "nonbinary"),
("non-binary", "nonbinary"),
("nonbinary", "nonbinary"),
("enby", "nonbinary"),
("nb", "nonbinary"),
("genderqueer", "nonbinary"),
("man", "male"),
("male", "male"),
("boy", "male"),
("guy", "male"),
("woman", "female"),
("womanist", "female"),
("female", "female"),
("girl", "female"),
("gal", "female"),
("latina", "female"),
("latino", "male"),
("dad", "male"),
("mum", "female"),
("mom", "female"),
("father", "male"),
("grandfather", "male"),
("mother", "female"),
("grandmother", "female"),
("they", "nonbinary"),
("xe", "nonbinary"),
("xi", "nonbinary"),
("xir", "nonbinary"),
("ze", "nonbinary"),
("zie", "nonbinary"),
("zir", "nonbinary"),
("hir", "nonbinary"),
("she", "female"),
("hers", "female"),
("her", "female"),
("he", "male"),
("his", "male"),
("him", "male"),
]:
for text in (
r"\b" + p + r"\b",
r"\b" + p + r"/",
r"\b" + p + r" /",
r"pronoun\.is/" + p,
):
yield re.compile(text), g
_PRONOUN_PATTERNS = list(make_pronoun_patterns())
class Cache(object):
def __init__(self):
self._users = {}
self._hits = self._misses = 0
@property
def hit_percentage(self):
return (100 * self._hits) / (self._hits + self._misses)
def UsersLookup(self, user_ids):
rv = [self._users[uid] for uid in user_ids if uid in self._users]
self._hits += len(rv)
self._misses += len(user_ids) - len(rv)
return rv
def UncachedUsers(self, user_ids):
return list(set(user_ids) - set(self._users))
def AddUsers(self, profiles):
for p in profiles:
self._users[p.id] = p
def declared_gender(description):
dl = description.lower()
if "pronoun.is" in dl and "pronoun.is/she" not in dl and "pronoun.is/he" not in dl:
return "nonbinary"
guesses = set()
for p, g in _PRONOUN_PATTERNS:
if p.search(dl):
guesses.add(g)
if len(guesses) > 1:
return "andy" # Several guesses: don't know.
if len(guesses) == 1:
return next(iter(guesses))
return "andy" # Zero or several guesses: don't know.
def analyze_user(user, verbose=False):
"""Get (gender, declared) tuple.
gender is "male", "female", "nonbinary", or "andy" meaning unknown.
declared is True or False.
"""
with warnings.catch_warnings():
# Suppress unidecode warning "Surrogate character will be ignored".
warnings.filterwarnings("ignore")
g = declared_gender(user.description)
if g != "andy":
return g, True
# We haven't found a preferred pronoun.
for name, country in [
(split(user.name), "usa"),
(user.name, "usa"),
(split(unidecode(user.name)), "usa"),
(unidecode(user.name), "usa"),
(split(user.name), None),
(user.name, None),
(unidecode(user.name), None),
(split(unidecode(user.name)), None),
]:
g = detector.get_gender(name, country)
if g != "andy":
# Not androgynous.
break
g = detector.get_gender(rm_punctuation(name), country)
if g != "andy":
# Not androgynous.
break
if verbose:
print(
"{:20s}\t{:40s}\t{:s}".format(
user.screen_name.encode("utf-8"), user.name.encode("utf-8"), g
)
)
if g.startswith("mostly_"):
g = g.split("mostly_")[1]
return g, False
def div(num, denom):
if denom:
return num / float(denom)
return 0
class Stat(object):
def __init__(self):
self.n = 0
self.n_declared = 0
class Analysis(object):
def __init__(self, ids_sampled, ids_fetched):
self.nonbinary = Stat()
self.male = Stat()
self.female = Stat()
self.andy = Stat()
self.ids_sampled = ids_sampled
self.ids_fetched = ids_fetched
def update(self, gender, declared):
# Elide gender-unknown and androgynous names.
attr = getattr(self, "andy" if gender == "unknown" else gender)
attr.n += 1
if declared:
attr.n_declared += 1
def guessed(self, gender=None):
if gender:
attr = getattr(self, gender)
return attr.n - attr.n_declared
return self.guessed("nonbinary") + self.guessed("male") + self.guessed("female")
def declared(self, gender=None):
if gender:
attr = getattr(self, gender)
return attr.n_declared
return self.nonbinary.n_declared + self.male.n_declared + self.female.n_declared
def pct(self, gender):
attr = getattr(self, gender)
return div(100 * attr.n, self.nonbinary.n + self.male.n + self.female.n)
def dry_run_analysis():
friends = Analysis(250, 400)
friends.nonbinary.n = 10
friends.nonbinary.n_declared = 10
friends.male.n = 200
friends.male.n_declared = 20
friends.female.n = 40
friends.female.n_declared = 5
friends.andy.n = 250
followers = Analysis(250, 400)
followers.nonbinary.n = 10
followers.nonbinary.n_declared = 10
followers.male.n = 200
followers.male.n_declared = 20
followers.female.n = 40
followers.female.n_declared = 5
followers.andy.n = 250
timeline = Analysis(250, 400)
timeline.nonbinary.n = 10
timeline.nonbinary.n_declared = 10
timeline.male.n = 200
timeline.male.n_declared = 20
timeline.female.n = 40
timeline.female.n_declared = 5
timeline.andy.n = 250
return friends, followers, timeline
def analyze_users(users, ids_fetched=None):
an = Analysis(ids_sampled=len(users), ids_fetched=ids_fetched)
for user in users:
g, declared = analyze_user(user)
an.update(g, declared)
return an
def batch(it, size):
for i in range(0, len(it), size):
yield it[i : i + size]
def get_twitter_api(consumer_key, consumer_secret, oauth_token, oauth_token_secret):
return twitter.Api(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=oauth_token,
access_token_secret=oauth_token_secret,
sleep_on_rate_limit=True,
)
# 5000 ids per call.
MAX_GET_FRIEND_IDS_CALLS = 10
MAX_GET_FOLLOWER_IDS_CALLS = 10
# 100 users per call.
MAX_USERS_LOOKUP_CALLS = 30
def get_friends_lists(
user_id, consumer_key, consumer_secret, oauth_token, oauth_token_secret
):
api = get_twitter_api(
consumer_key, consumer_secret, oauth_token, oauth_token_secret
)
# Only store what we need, avoid oversized session cookie.
def process_lists():
for l in reversed(api.GetLists()):
as_dict = l.AsDict()
yield {"id": as_dict.get("id"), "name": as_dict.get("name")}
return list(process_lists())
def analyze_self(user_id, api):
users = api.UsersLookup(screen_name=[user_id])
return analyze_user(users[0])
def fetch_users(user_ids, api, cache):
users = []
users.extend(cache.UsersLookup(user_ids))
for ids in batch(cache.UncachedUsers(user_ids), 100):
results = api.UsersLookup(ids)
cache.AddUsers(results)
users.extend(results)
return users
def analyze_friends(user_id, list_id, api, cache):
nxt = -1
friend_ids = []
for _ in range(MAX_GET_FRIEND_IDS_CALLS):
if list_id is not None:
nxt, prev, data = api.GetListMembersPaged(list_id=list_id, cursor=nxt)
friend_ids.extend([fr.id for fr in data])
else:
nxt, prev, data = api.GetFriendIDsPaged(screen_name=user_id, cursor=nxt)
friend_ids.extend(data)
if nxt == 0 or nxt == prev:
break
# We can fetch users' details 100 at a time.
if len(friend_ids) > 100 * MAX_USERS_LOOKUP_CALLS:
friend_id_sample = random.sample(friend_ids, 100 * MAX_USERS_LOOKUP_CALLS)
else:
friend_id_sample = friend_ids
users = fetch_users(friend_id_sample, api, cache)
return analyze_users(users, ids_fetched=len(friend_ids))
def analyze_followers(user_id, api, cache):
nxt = -1
follower_ids = []
for _ in range(MAX_GET_FOLLOWER_IDS_CALLS):
nxt, prev, data = api.GetFollowerIDsPaged(screen_name=user_id, cursor=nxt)
follower_ids.extend(data)
if nxt == 0 or nxt == prev:
break
# We can fetch users' details 100 at a time.
if len(follower_ids) > 100 * MAX_USERS_LOOKUP_CALLS:
follower_id_sample = random.sample(follower_ids, 100 * MAX_USERS_LOOKUP_CALLS)
else:
follower_id_sample = follower_ids
users = fetch_users(follower_id_sample, api, cache)
return analyze_users(users, ids_fetched=len(follower_ids))
def analyze_timeline(user_id, list_id, api, cache):
# Timeline-functions are limited to 200 statuses
if list_id is not None:
statuses = api.GetListTimeline(list_id=list_id, count=200)
else:
statuses = api.GetHomeTimeline(count=200)
timeline_ids = []
for s in statuses:
# Skip the current user's own tweets.
if s.user.screen_name != user_id:
timeline_ids.append(s.user.id)
# Reduce to unique list of ids
timeline_ids = list(set(timeline_ids))
users = fetch_users(timeline_ids, api, cache)
return analyze_users(users, ids_fetched=len(timeline_ids))
def analyze_my_timeline(user_id, api, cache):
# Timeline-functions are limited to 200 statuses
statuses = api.GetUserTimeline(
screen_name=user_id,
count=200,
include_rts=True,
trim_user=False,
exclude_replies=False,
)
max_id = 0
# Max 2000 tweets.
for i in range(1, 10):
if max_id == statuses[-1].id - 1:
# Already fetched all tweets in timeline.
break
max_id = statuses[-1].id - 1
statuses = statuses + api.GetUserTimeline(
screen_name=user_id,
count=200,
max_id=max_id,
include_rts=True,
trim_user=False,
exclude_replies=False,
)
retweet_ids = []
reply_ids = []
quotes_ids = []
timeline_ids = []
for s in statuses:
if s.retweeted_status is not None:
retweet_ids.append(s.retweeted_status.user.id)
elif s.in_reply_to_status_id is not None:
for i in s.user_mentions:
reply_ids.append(i.id)
elif s.quoted_status is not None:
quotes_ids.append(s.quoted_status.user.id)
elif len(s.user_mentions) > 0:
for i in s.user_mentions:
timeline_ids.append(i.id)
outdict = {
"retweets": retweet_ids,
"replies": reply_ids,
"quotes": quotes_ids,
"mentions": timeline_ids,
}
newdict = {}
for ids in outdict.keys():
users = fetch_users(outdict.get(ids), api, cache)
newdict[ids] = analyze_users(users, ids_fetched=len(outdict.get(ids)))
return newdict
# From https://github.com/bear/python-twitter/blob/master/get_access_token.py
def get_access_token(consumer_key, consumer_secret):
REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
AUTHORIZATION_URL = "https://api.twitter.com/oauth/authorize"
oauth_client = OAuth1Session(
consumer_key, client_secret=consumer_secret, callback_uri="oob"
)
print("\nRequesting temp token from Twitter...\n")
try:
resp = oauth_client.fetch_request_token(REQUEST_TOKEN_URL)
except ValueError as e:
raise ValueError(
"Invalid response from Twitter requesting temp token: {0}".format(e)
)
url = oauth_client.authorization_url(AUTHORIZATION_URL)
print(
"I will try to start a browser to visit the following Twitter page "
"if a browser will not start, copy the URL to your browser "
"and retrieve the pincode to be used "
"in the next step to obtaining an Authentication Token: \n"
"\n\t{0}".format(url)
)
webbrowser.open(url)
pincode = input("\nEnter your pincode? ")
print("\nGenerating and signing request for an access token...\n")
oauth_client = OAuth1Session(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=resp.get("oauth_token"),
resource_owner_secret=resp.get("oauth_token_secret"),
verifier=pincode,
)
try:
resp = oauth_client.fetch_access_token(ACCESS_TOKEN_URL)
except ValueError as e:
msg = ("Invalid response from Twitter requesting " "temp token: {0}").format(e)
raise ValueError(msg)
#
# print('''Your tokens/keys are as follows:
# consumer_key = {ck}
# consumer_secret = {cs}
# access_token_key = {atk}
# access_token_secret = {ats}'''.format(
# ck=consumer_key,
# cs=consumer_secret,
# atk=resp.get('oauth_token'),
# ats=resp.get('oauth_token_secret')))
return resp.get("oauth_token"), resp.get("oauth_token_secret")
if __name__ == "__main__":
import argparse
p = argparse.ArgumentParser(
description="Estimate gender distribution of "
"Twitter friends, followers and"
"your timeline"
)
p.add_argument("user_id", nargs=1)
p.add_argument(
"--self", help="perform gender analysis on user_id itself", action="store_true"
)
p.add_argument("--dry-run", help="fake results", action="store_true")
args = p.parse_args()
[user_id] = args.user_id
consumer_key = os.environ.get("CONSUMER_KEY") or input("Enter your consumer key: ")
consumer_secret = os.environ.get("CONSUMER_SECRET") or input(
"Enter your consumer secret: "
)
if args.dry_run:
tok, tok_secret = None, None
else:
tok, tok_secret = get_access_token(consumer_key, consumer_secret)
if args.self:
if args.dry_run:
g, declared = "male", True
else:
api = get_twitter_api(consumer_key, consumer_secret, tok, tok_secret)
g, declared = analyze_self(user_id, api)
print("{} ({})".format(g, "declared pronoun" if declared else "guess"))
sys.exit()
print(
"{:>25s}\t{:>10s}\t{:>10s}\t{:>10s}\t{:>10s}".format(
"", "NONBINARY", "MEN", "WOMEN", "UNKNOWN"
)
)
start = time.time()
cache = Cache()
if args.dry_run:
friends, followers, timeline = dry_run_analysis()
else:
api = get_twitter_api(consumer_key, consumer_secret, tok, tok_secret)
friends = analyze_friends(user_id, None, api, cache)
followers = analyze_followers(user_id, api, cache)
timeline = analyze_timeline(user_id, None, api, cache)
mytimeline = analyze_my_timeline(user_id, api, cache)
retweets = mytimeline.get("retweets")
replies = mytimeline.get("replies")
quotes = mytimeline.get("quotes")
mentions = mytimeline.get("mentions")
duration = time.time() - start
for user_type, an in [
("friends", friends),
("followers", followers),
("timeline", timeline),
("retweets", retweets),
("replies", replies),
("quotes", quotes),
("mentions", mentions),
]:
nb, men, women, andy = an.nonbinary.n, an.male.n, an.female.n, an.andy.n
print(
"{:>25s}\t{:>10.2f}%\t{:10.2f}%\t{:10.2f}%".format(
user_type, an.pct("nonbinary"), an.pct("male"), an.pct("female")
)
)
print(
"{:>25s}\t{:>10d} \t{:10d} \t{:10d} \t{:10d}".format(
"Guessed from name:",
an.guessed("nonbinary"),
an.guessed("male"),
an.guessed("female"),
an.andy.n,
)
)
print(
"{:>25s}\t{:>10d} \t{:10d} \t{:10d}".format(
"Declared pronouns:",
an.declared("nonbinary"),
an.declared("male"),
an.declared("female"),
)
)
print("")
print(
"Analysis took {:.2f} seconds, cache hit ratio {}%".format(
duration, cache.hit_percentage
)
)
|
ajdavis/twitter-gender-distribution
|
analyze.py
|
Python
|
apache-2.0
| 17,566
|
[
"VisIt"
] |
5319ba6b5cdd1702054cd89295803cc252d7ae00f91cc217c81a74f8b31cf655
|
from functools import partial
from typing import Callable, List, Union
import tensorflow as tf
from tensorflow import Tensor
from tensorflow_probability.python.distributions import (NOT_REPARAMETERIZED,
Distribution)
from odin.backend.interpolation import linear
from odin.backend.types_helpers import Coefficient
from odin.bay.random_variable import RVconf
from odin.bay.vi.autoencoder.beta_vae import BetaVAE
from odin.bay.vi.losses import maximum_mean_discrepancy
from odin.utils import as_tuple
# ===========================================================================
# Helpers
# ===========================================================================
def _clip_binary(x, eps=1e-7):
# this is ad-hoc value, tested 1e-8 but return NaN for RelaxedSigmoid
# all the time
return tf.clip_by_value(x, eps, 1. - eps)
# ===========================================================================
# InfoVAE
# ===========================================================================
class InfoVAE(BetaVAE):
""" For MNIST, the authors used scaling coefficient `lambda=1000`,
and information preference `alpha=0`.
Increase `np` (number of prior samples) in `divergence_kw` to reduce the
variance of MMD estimation.
Parameters
----------
alpha : float
Equal to `1 - beta`. Higher value of alpha places lower weight
on the KL-divergence
lamda : float
This is the value of lambda in the paper.
Higher value of lambda place more weight on the Info-divergence
(i.e. MMD)
divergence : a Callable.
Divergences families, for now only support 'mmd'
i.e. maximum-mean discrepancy.
References
----------
Zhao, S., Song, J., Ermon, S., et al. "infoVAE: Balancing Learning and
Inference in Variational Autoencoders".
Shengjia Zhao. "A Tutorial on Information Maximizing Variational
Autoencoders (infoVAE)".
https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders
"""
def __init__(
self,
alpha: float = 0.0,
lamda: float = 100.0,
divergence: Callable[[Distribution, Distribution],
Tensor] = partial(maximum_mean_discrepancy,
kernel='gaussian',
q_sample_shape=None,
p_sample_shape=100),
name='InfoVAE',
**kwargs,
):
kwargs.pop('beta')
super().__init__(beta=1 - alpha, name=name, **kwargs)
self.lamda = tf.convert_to_tensor(lamda, dtype=self.dtype, name='lambda')
# select right divergence
assert callable(divergence), \
f"divergence must be callable, but given: {type(divergence)}"
self.divergence = divergence
@property
def alpha(self):
return 1 - self.beta
@alpha.setter
def alpha(self, alpha):
self.beta = 1 - alpha
def elbo_components(self, inputs, training=None, mask=None, **kwargs):
llk, kl = super().elbo_components(inputs, mask=mask, training=training)
px_z, qz_x = self.last_outputs
# repeat for each latent
for layer, qz in zip(as_tuple(self.latents), as_tuple(qz_x)):
# div(qZ||pZ)
info_div = self.divergence(qz, qz.KL_divergence.prior)
kl[f'div_{layer.name}'] = (self.lamda - self.beta) * info_div
return llk, kl
# ===========================================================================
# Mutual Information VAE
# ===========================================================================
class MIVAE(BetaVAE):
""" Mutual-information VAE
The algorithm of MI-VAE is as following:
```
1. Compute q(z,c|x) and the KL-Divergence from the prior p(z).
2. Generatea sample (z, c) from the approximate posterior q.
3. Compute the conditional p(x|z) and incur the reconstruction loss.
---
4. Resample (z_prime, c_prime) ~ p(c,z) from the prior.
5. Recompute the conditional p(x|z_prime, c_prime) and generate a sample x_prime.
6. Recompute the approximate posterior q(c|x_prime)
7. Incur the loss for the MI lower bound q(c|x_prime).log_prob(c_prime).
```
Parameters
----------
minimize_kl_codes : a Boolean (default: True).
If False, only maximize the mutual information of the factors code
`q(c|X)` and the input `p(X|z, c)`, this is the original configuration
in the paper.
If True, encourage mutual code to be factorized as well by minimizing
the KL divergence to the multivariate diagonal Gaussian piror.
References
----------
Ducau, F.N., Trénous, S. "Mutual Information in Variational Autoencoders".
(2017) https://github.com/fducau/infoVAE.
Chen, X., Chen, X., Duan, Y., et al. (2016) "InfoGAN: Interpretable
Representation Learning by Information Maximizing Generative
Adversarial Nets". URL : http://arxiv.org/ abs/1606.03657.
Ducau, F.N. Code: https://github.com/fducau/infoVAE
"""
def __init__(
self,
mi_coef: Coefficient = 0.2,
latents: RVconf = RVconf(32, 'mvndiag', projection=True, name='latents'),
mutual_codes: RVconf = RVconf(10,
'mvndiag',
projection=True,
name='codes'),
steps_without_mi: int = 100,
beta: Coefficient = linear(vmin=1e-6, vmax=1., steps=2000),
beta_codes: Coefficient = 0.,
name: str = 'MutualInfoVAE',
**kwargs,
):
super().__init__(beta=beta, latents=latents, name=name, **kwargs)
self.is_binary_code = mutual_codes.is_binary
if isinstance(mutual_codes, RVconf):
mutual_codes = mutual_codes.create_posterior()
self.mutual_codes = mutual_codes
self._mi_coef = mi_coef
self._beta_codes = beta_codes
self.steps_without_mi = int(steps_without_mi)
@classmethod
def is_hierarchical(self) -> bool:
return True
@property
def beta_codes(self) -> tf.Tensor:
if callable(self._beta_codes):
return self._beta_codes(self.step)
return tf.constant(self._beta_codes, dtype=self.dtype)
@property
def mi_coef(self) -> tf.Tensor:
if callable(self._mi_coef):
return self._mi_coef(self.step)
return tf.constant(self._mi_coef, dtype=self.dtype)
def sample_prior(self,
sample_shape: Union[int, List[int]] = (),
seed: int = 1) -> Tensor:
r""" Sampling from prior distribution """
z1 = super().sample_prior(sample_shape=sample_shape, seed=seed)
z2 = self.mutual_codes.prior.sample(sample_shape, seed=seed)
return (z1, z2)
def encode(self, inputs, **kwargs):
h_e = self.encoder(inputs, **kwargs)
# create the latents distribution
qz_x = self.latents(h_e, **kwargs)
qc_x = self.mutual_codes(h_e, **kwargs)
# need to keep the keras mask
mask = kwargs.get('mask', None)
qz_x._keras_mask = mask
qc_x._keras_mask = mask
return (qz_x, qc_x)
def decode(self, latents, **kwargs):
latents = tf.concat(latents, axis=-1)
return super().decode(latents, **kwargs)
def elbo_components(self, inputs, training=None, mask=None):
# NOTE: the original implementation does not take KL(qC_X||pC),
# only maximize the mutual information of q(c|X)
llk, kl = super().elbo_components(inputs, mask=mask, training=training)
px_z, (qz_x, qc_x) = self.last_outputs
## factorizing the mutual codes if required
kl_c = qc_x.KL_divergence(free_bits=self.free_bits)
kl[f'kl_{self.mutual_codes.name}'] = tf.cond(
self.beta_codes > 1e-8, # for numerical stability
true_fn=lambda: self.beta_codes * kl_c,
false_fn=lambda: tf.stop_gradient(kl_c),
)
## This approach is not working!
# z_prime = tf.stop_gradient(tf.convert_to_tensor(qz_x))
# batch_shape = z_prime.shape[:-1]
# c_prime = qc_x.KL_divergence.prior.sample(batch_shape)
## sampling for maximizing I(X;Z)
batch_shape = px_z.batch_shape
z_prime = qz_x.KL_divergence.prior.sample(batch_shape)
c_prime = qc_x.KL_divergence.prior.sample(batch_shape)
## clip to prevent underflow for relaxed-bernoulli
if self.is_binary_code:
c_prime = _clip_binary(c_prime)
## decoding
px = self.decode([z_prime, c_prime], training=training)
if px.reparameterization_type == NOT_REPARAMETERIZED:
x = px.mean()
else:
x = tf.convert_to_tensor(px)
qz_xprime, qc_xprime = self.encode(x, training=training)
## mutual information (we want to maximize this, hence, add it to the llk)
llk['mi_codes'] = self.mi_coef * tf.cond(
self.step > self.steps_without_mi,
true_fn=lambda: qc_xprime.log_prob(c_prime),
false_fn=lambda: 0.)
## this value is just for monitoring
mi_z = tf.stop_gradient(tf.reduce_mean(qz_xprime.log_prob(z_prime)))
llk['mi_latents'] = tf.cond(
tf.logical_or(tf.math.is_nan(mi_z), tf.math.is_inf(mi_z)),
true_fn=lambda: 0.,
false_fn=lambda: mi_z,
)
return llk, kl
# class InfoNCEVAE(betaVAE):
# r""" Mutual information bound based on Noise-Contrastive Estimation
# Reference:
# Tschannen, M., Djolonga, J., Rubenstein, P.K., Gelly, S., Lucic, M., 2019.
# "On Mutual Information Maximization for Representation Learning".
# arXiv:1907.13625 [cs, stat].
# https://github.com/google-research/google-research/tree/master/mutual_information_representation_learning
# """
# class IFVAE(betaVAE):
# r""" Adversarial information factorized VAE
# Reference:
# Creswell, A., Mohamied, Y., Sengupta, B., Bharath, A.A., 2018.
# "Adversarial Information Factorization". arXiv:1711.05175 [cs].
# """
# class InfoMaxVAE(betaVAE):
# r"""
# Reference:
# Rezaabad, A.L., Vishwanath, S., 2020. "Learning Representations by
# Maximizing Mutual Information in Variational Autoencoders".
# arXiv:1912.13361 [cs, stat].
# Hjelm, R.D., Fedorov, A., et al. 2019. "Learning Deep Representations by
# Mutual Information Estimation and Maximization". ICLR'19.
# """
|
imito/odin
|
odin/bay/vi/autoencoder/info_vae.py
|
Python
|
mit
| 10,029
|
[
"Gaussian"
] |
1905ff5cdcdbf15c75a54cc15b330f1fa33a6f9bcaf5109fdbe419b8d0ff194e
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/lj_liquid_distribution.py")
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/scripts/samples/test_lj_liquid_distribution.py
|
Python
|
gpl-3.0
| 995
|
[
"ESPResSo"
] |
68c1431c6f1bbd65dc61cee1283e059dbcc55d14db653bc07915371f701781fa
|
from pynet.datasets.mnist import Mnist, Mnist_Blocks
import pynet.layer as layer
from pynet.model import *
import pynet.datasets.spec as spec
import pynet.datasets.mnist as mnist
import pynet.datasets.i2r as i2r
import pynet.datasets.recsys as recsys
import pynet.datasets.preprocessor as preproc
import pynet.learning_method as learning_methods
from pynet.learning_rule import LearningRule
from pynet.log import Log
from pynet.cost import Cost
import cPickle
import os
import pynet.datasets.dataset_noise as data_noise
import pynet.layer_noise as layer_noises
import theano
from theano.sandbox.cuda.var import CudaNdarraySharedVariable
floatX = theano.config.floatX
class Model(object):
def __init__(self, state):
self.state = state
def build_log(self, save_to_database=None, id=None):
log = Log(experiment_name = id is not None and '%s_%s'%(self.state.log.experiment_name,id) \
or self.state.log.experiment_name,
description = self.state.log.description,
save_outputs = self.state.log.save_outputs,
save_learning_rule = self.state.log.save_learning_rule,
save_model = self.state.log.save_model,
save_epoch_error = self.state.log.save_epoch_error,
save_to_database = save_to_database)
return log
def build_noise(self, noise):
noise_obj = None if noise.type is None else \
getattr(layer_noises, noise.type)()
if noise.type in ['BlackOut', 'MaskOut', 'BatchOut']:
noise_obj.ratio = noise.ratio
elif noise.type == 'Gaussian':
noise_obj.std = noise.std
noise_obj.mean = noise.mean
return noise_obj
def build_layer(self, layer_name):
output_noise = self.build_noise(layer_name.layer_noise)
output = getattr(layer, layer_name.type)(dim=layer_name.dim,
name=layer_name.name,
dropout_below=layer_name.dropout_below,
noise=output_noise)
return output
def build_learning_method(self):
if self.state.learning_method.type == 'SGD':
learn_method = getattr(learning_methods,
self.state.learning_method.type)(
learning_rate = self.state.learning_method.learning_rate,
momentum = self.state.learning_method.momentum)
elif self.state.learning_method.type == 'AdaGrad':
learn_method = getattr(learning_methods,
self.state.learning_method.type)(
learning_rate = self.state.learning_method.learning_rate,
momentum = self.state.learning_method.momentum)
elif self.state.learning_method.type == 'AdaDelta':
learn_method = getattr(learning_methods,
self.state.learning_method.type)(
rho = self.state.learning_method.rho,
eps = self.state.learning_method.eps)
else:
raise TypeError("not SGD, AdaGrad or AdaDelta")
return learn_method
def build_learning_rule(self):
learning_rule = LearningRule(max_col_norm = self.state.learning_rule.max_col_norm,
L1_lambda = self.state.learning_rule.L1_lambda,
L2_lambda = self.state.learning_rule.L2_lambda,
training_cost = Cost(type = self.state.learning_rule.cost),
stopping_criteria = {'max_epoch' : self.state.learning_rule.stopping_criteria.max_epoch,
'epoch_look_back' : self.state.learning_rule.stopping_criteria.epoch_look_back,
'cost' : Cost(type=self.state.learning_rule.stopping_criteria.cost),
'percent_decrease' : self.state.learning_rule.stopping_criteria.percent_decrease})
return learning_rule
def build_database(self, dataset, learning_rule, learning_method, model):
save_to_database = {'name' : self.state.log.save_to_database_name,
'records' : {'Dataset' : dataset.__class__.__name__,
'max_col_norm' : learning_rule.max_col_norm,
'Weight_Init_Seed' : model.rand_seed,
'Dropout_Below' : str([layer.dropout_below for layer in model.layers]),
'Learning_Method' : learning_method.__class__.__name__,
'Batch_Size' : dataset.batch_size,
'Dataset_Noise' : dataset.noise.__class__.__name__,
# 'Dataset_Dir' : dataset.data_dir,
'Feature_Size' : dataset.feature_size(),
'nblocks' : dataset.nblocks(),
'Layer_Types' : str([layer.__class__.__name__ for layer in model.layers]),
'Layer_Dim' : str([layer.dim for layer in model.layers]),
'Preprocessor' : dataset.preprocessor.__class__.__name__,
'Training_Cost' : learning_rule.cost.type,
'Stopping_Cost' : learning_rule.stopping_criteria['cost'].type}
}
if learning_method.__class__.__name__ == "SGD":
save_to_database["records"]["Learning_rate"] = learning_method.learning_rate
save_to_database["records"]["Momentum"] = learning_method.momentum
elif learning_method.__class__.__name__ == "AdaGrad":
save_to_database["records"]["Learning_rate"] = learning_method.learning_rate
save_to_database["records"]["Momentum"] = learning_method.momentum
elif learning_method.__class__.__name__ == "AdaDelta":
save_to_database["records"]["rho"] = float(learning_method.rho.get_value())
save_to_database["records"]["eps"] = float(learning_method.eps.get_value())
else:
raise TypeError("not SGD, AdaGrad or AdaDelta")
layer_noise = []
layer_noise_params = []
for layer in model.layers:
layer_noise.append(layer.noise.__class__.__name__)
if layer.noise.__class__.__name__ in ['BlackOut', 'MaskOut', 'BatchOut']:
layer_noise_params.append(layer.noise.ratio)
elif layer.noise.__class__.__name__ is 'Gaussian':
layer_noise_params.append((layer.noise.mean, layer.noise.std))
else:
layer_noise_params.append(None)
save_to_database["records"]["Layer_Noise"] = str(layer_noise)
save_to_database["records"]["Layer_Noise_Params"] = str(layer_noise_params)
save_to_database["records"]["Preprocessor_Params"] = ""
if dataset.preprocessor.__class__.__name__ == 'Scale':
save_to_database["records"]["Preprocessor_Params"] = str({'buffer': dataset.preprocessor.buffer,
'max': dataset.preprocessor.max,
'min': dataset.preprocessor.min,
'scale_range': dataset.preprocessor.scale_range})
return save_to_database
class NeuralNet(Model):
def build_dataset(self):
dataset = None
preprocessor = None if self.state.dataset.preprocessor.type is None else \
getattr(preproc, self.state.dataset.preprocessor.type)()
if self.state.dataset.preprocessor.type == 'Scale':
preprocessor.max = self.state.dataset.preprocessor.global_max
preprocessor.min = self.state.dataset.preprocessor.global_min
preprocessor.buffer = self.state.dataset.preprocessor.buffer
preprocessor.scale_range = self.state.dataset.preprocessor.scale_range
if self.state.dataset.type == 'Mnist':
dataset = Mnist(train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
if self.state.dataset.type[:11] == 'TransFactor':
dataset = getattr(tf, self.state.dataset.type)(
# feature_size = self.state.dataset.feature_size,
# target_size = self.state.dataset.target_size,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
elif self.state.dataset.type[:12] == 'Mnist_Blocks':
dataset = getattr(mnist, self.state.dataset.type)(
feature_size = self.state.dataset.feature_size,
target_size = self.state.dataset.feature_size,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
elif self.state.dataset.type[:20] == 'I2R_Posterior_Blocks':
dataset = getattr(i2r, self.state.dataset.type)(
feature_size = self.state.dataset.feature_size,
target_size = self.state.dataset.feature_size,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
one_hot = self.state.dataset.one_hot,
num_blocks = self.state.dataset.num_blocks,
preprocessor = preprocessor,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
elif self.state.dataset.type[:13] == 'I2R_Posterior':
dataset = getattr(i2r, self.state.dataset.type)(
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
elif self.state.dataset.type[:6] == 'RecSys':
dataset = getattr(recsys, self.state.dataset.type)(
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
return dataset
class AE(Model):
def build_dataset(self):
dataset = None
preprocessor = None if self.state.dataset.preprocessor.type is None else \
getattr(preproc, self.state.dataset.preprocessor.type)()
noise = None if self.state.dataset.dataset_noise.type is None else \
getattr(data_noise, self.state.dataset.dataset_noise.type)()
if self.state.dataset.dataset_noise.type == 'Gaussian':
noise.std = self.state.dataset.dataset_noise.std
if self.state.dataset.preprocessor.type == 'Scale':
preprocessor.max = self.state.dataset.preprocessor.global_max
preprocessor.min = self.state.dataset.preprocessor.global_min
preprocessor.buffer = self.state.dataset.preprocessor.buffer
preprocessor.scale_range = self.state.dataset.preprocessor.scale_range
if self.state.dataset.type == 'Mnist':
dataset = Mnist(train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
train = dataset.get_train()
dataset.set_train(train.y, train.y)
valid = dataset.get_valid()
dataset.set_valid(valid.y, valid.y)
test = dataset.get_test()
dataset.set_test(test.y, test.y)
elif self.state.dataset.type[:12] == 'Mnist_Blocks':
dataset = getattr(mnist, self.state.dataset.type)(
feature_size = self.state.dataset.feature_size,
target_size = self.state.dataset.feature_size,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
elif self.state.dataset.type[:4] == 'P276':
dataset = getattr(spec, self.state.dataset.type)(
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
train = dataset.get_train()
dataset.set_train(train.X, train.X)
valid = dataset.get_valid()
dataset.set_valid(valid.X, valid.X)
test = dataset.get_test()
dataset.set_test(test.X, test.X)
elif self.state.dataset.type[:5] == 'Laura':
dataset = getattr(spec, self.state.dataset.type)(
feature_size = self.state.dataset.feature_size,
target_size = self.state.dataset.feature_size,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
num_blocks = self.state.dataset.num_blocks,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
elif self.state.dataset.type[:18] == 'TransFactor_Blocks':
dataset = getattr(tf, self.state.dataset.type)(
feature_size = self.state.dataset.feature_size,
target_size = self.state.dataset.feature_size,
one_hot = self.state.dataset.one_hot,
num_blocks = self.state.dataset.num_blocks,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
elif self.state.dataset.type[:11] == 'TransFactor':
dataset = getattr(tf, self.state.dataset.type)(
# feature_size = self.state.dataset.feature_size,
# target_size = self.state.dataset.feature_size,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
train = dataset.get_train()
dataset.set_train(train.X, train.X)
valid = dataset.get_valid()
dataset.set_valid(valid.X, valid.X)
test = dataset.get_test()
dataset.set_test(test.X, test.X)
elif self.state.dataset.type[:13] == 'I2R_Posterior':
dataset = getattr(i2r, self.state.dataset.type)(
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
return dataset
def build_one_hid_model(self, input_dim):
model = AutoEncoder(input_dim=input_dim, rand_seed=self.state.model.rand_seed)
h1_noise = self.build_noise(self.state.hidden1.layer_noise)
hidden1 = getattr(layer, self.state.hidden1.type)(dim=self.state.hidden1.dim,
name=self.state.hidden1.name,
dropout_below=self.state.hidden1.dropout_below,
noise=h1_noise)
model.add_encode_layer(hidden1)
h1_mirror = getattr(layer, self.state.h1_mirror.type)(dim=input_dim,
name=self.state.h1_mirror.name,
W=hidden1.W.T,
dropout_below=self.state.h1_mirror.dropout_below)
model.add_decode_layer(h1_mirror)
return model
def build_two_hid_model(self, input_dim):
model = AutoEncoder(input_dim=input_dim, rand_seed=self.state.model.rand_seed)
h1_noise = self.build_noise(self.state.hidden1.layer_noise)
hidden1 = getattr(layer, self.state.hidden1.type)(dim=self.state.hidden1.dim,
name=self.state.hidden1.name,
dropout_below=self.state.hidden1.dropout_below,
noise=h1_noise)
model.add_encode_layer(hidden1)
h2_noise = self.build_noise(self.state.hidden2.layer_noise)
hidden2 = getattr(layer, self.state.hidden2.type)(dim=self.state.hidden2.dim,
name=self.state.hidden2.name,
dropout_below=self.state.hidden2.dropout_below,
noise=h2_noise)
model.add_encode_layer(hidden2)
hidden2_mirror = getattr(layer, self.state.h2_mirror.type)(dim=self.state.hidden1.dim,
name=self.state.h2_mirror.name,
dropout_below=self.state.h2_mirror.dropout_below,
W = hidden2.W.T)
model.add_decode_layer(hidden2_mirror)
hidden1_mirror = getattr(layer, self.state.h1_mirror.type)(dim=input_dim,
name=self.state.h1_mirror.name,
dropout_below=self.state.h1_mirror.dropout_below,
W = hidden1.W.T)
model.add_decode_layer(hidden1_mirror)
return model
def build_three_hid_model(self, input_dim):
model = AutoEncoder(input_dim=input_dim, rand_seed=self.state.model.rand_seed)
hidden1 = self.build_layer(self.state.hidden1)
hidden2 = self.build_layer(self.state.hidden2)
hidden3 = self.build_layer(self.state.hidden3)
model.add_encode_layer(hidden1)
model.add_encode_layer(hidden2)
model.add_encode_layer(hidden3)
h3_mirror = getattr(layer, self.state.h3_mirror.type)(dim=hidden2.dim,
name=self.state.h3_mirror.name,
dropout_below=self.state.h3_mirror.dropout_below,
W = hidden3.W.T)
h2_mirror = getattr(layer, self.state.h2_mirror.type)(dim=hidden1.dim,
name=self.state.h2_mirror.name,
dropout_below=self.state.h2_mirror.dropout_below,
W = hidden2.W.T)
h1_mirror = getattr(layer, self.state.h1_mirror.type)(dim=input_dim,
name=self.state.h1_mirror.name,
dropout_below=self.state.h1_mirror.dropout_below,
W = hidden1.W.T)
model.add_decode_layer(h3_mirror)
model.add_decode_layer(h2_mirror)
model.add_decode_layer(h1_mirror)
return model
|
hycis/Pynet
|
hps/models/model.py
|
Python
|
apache-2.0
| 23,549
|
[
"Gaussian"
] |
706a8a350d694048dacc4f93088baef9c03d78fbd5bcc2a7fd64d09d646a0c52
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2011 Carlos Abalde <carlos.abalde@gmail.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os.path
import string
import urllib
import duplicity.backend
from duplicity.errors import BackendException
class GDocsBackend(duplicity.backend.Backend):
"""Connect to remote store using Google Google Documents List API"""
ROOT_FOLDER_ID = 'folder%3Aroot'
BACKUP_DOCUMENT_TYPE = 'application/binary'
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
# Import Google Data APIs libraries.
try:
global atom
global gdata
import atom.data
import gdata.client
import gdata.docs.client
import gdata.docs.data
except ImportError as e:
raise BackendException("""\
Google Docs backend requires Google Data APIs Python Client Library (see http://code.google.com/p/gdata-python-client/).
Exception: %s""" % str(e))
# Setup client instance.
self.client = gdata.docs.client.DocsClient(source='duplicity $version')
self.client.ssl = True
self.client.http_client.debug = False
self._authorize(parsed_url.username + '@' + parsed_url.hostname, self.get_password())
# Fetch destination folder entry (and crete hierarchy if required).
folder_names = string.split(parsed_url.path[1:], '/')
parent_folder = None
parent_folder_id = GDocsBackend.ROOT_FOLDER_ID
for folder_name in folder_names:
entries = self._fetch_entries(parent_folder_id, 'folder', folder_name)
if entries is not None:
if len(entries) == 1:
parent_folder = entries[0]
elif len(entries) == 0:
folder = gdata.docs.data.Resource(type='folder', title=folder_name)
parent_folder = self.client.create_resource(folder, collection=parent_folder)
else:
parent_folder = None
if parent_folder:
parent_folder_id = parent_folder.resource_id.text
else:
raise BackendException("Error while creating destination folder '%s'." % folder_name)
else:
raise BackendException("Error while fetching destination folder '%s'." % folder_name)
self.folder = parent_folder
def _put(self, source_path, remote_filename):
self._delete(remote_filename)
# Set uploader instance. Note that resumable uploads are required in order to
# enable uploads for all file types.
# (see http://googleappsdeveloper.blogspot.com/2011/05/upload-all-file-types-to-any-google.html)
file = source_path.open()
uploader = gdata.client.ResumableUploader(
self.client, file,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
os.path.getsize(file.name),
chunk_size=gdata.client.ResumableUploader.DEFAULT_CHUNK_SIZE,
desired_class=gdata.docs.data.Resource)
if uploader:
# Chunked upload.
entry = gdata.docs.data.Resource(title=atom.data.Title(text=remote_filename))
uri = self.folder.get_resumable_create_media_link().href + '?convert=false'
entry = uploader.UploadFile(uri, entry=entry)
if not entry:
raise BackendException("Failed to upload file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text))
else:
raise BackendException("Failed to initialize upload of file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text))
assert not file.close()
def _get(self, remote_filename, local_path):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
remote_filename)
if len(entries) == 1:
entry = entries[0]
self.client.DownloadResource(entry, local_path.name)
else:
raise BackendException("Failed to find file '%s' in remote folder '%s'"
% (remote_filename, self.folder.title.text))
def _list(self):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE)
return [entry.title.text for entry in entries]
def _delete(self, filename):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
filename)
for entry in entries:
self.client.delete(entry.get_edit_link().href + '?delete=true', force=True)
def _authorize(self, email, password, captcha_token=None, captcha_response=None):
try:
self.client.client_login(email,
password,
source='duplicity $version',
service='writely',
captcha_token=captcha_token,
captcha_response=captcha_response)
except gdata.client.CaptchaChallenge as challenge:
print('A captcha challenge in required. Please visit ' + challenge.captcha_url)
answer = None
while not answer:
answer = raw_input('Answer to the challenge? ')
self._authorize(email, password, challenge.captcha_token, answer)
except gdata.client.BadAuthentication:
raise BackendException(
'Invalid user credentials given. Be aware that accounts '
'that use 2-step verification require creating an application specific '
'access code for using this Duplicity backend. Follow the instruction in '
'http://www.google.com/support/accounts/bin/static.py?page=guide.cs&guide=1056283&topic=1056286 '
'and create your application-specific password to run duplicity backups.')
def _fetch_entries(self, folder_id, type, title=None):
# Build URI.
uri = '/feeds/default/private/full/%s/contents' % folder_id
if type == 'folder':
uri += '/-/folder?showfolders=true'
elif type == GDocsBackend.BACKUP_DOCUMENT_TYPE:
uri += '?showfolders=false'
else:
uri += '?showfolders=true'
if title:
uri += '&title=' + urllib.quote(title) + '&title-exact=true'
# Fetch entries.
entries = self.client.get_all_resources(uri=uri)
# When filtering by entry title, API is returning (don't know why) documents in other
# folders (apart from folder_id) matching the title, so some extra filtering is required.
if title:
result = []
for entry in entries:
resource_type = entry.get_resource_type()
if (not type) \
or (type == 'folder' and resource_type == 'folder') \
or (type == GDocsBackend.BACKUP_DOCUMENT_TYPE and resource_type != 'folder'):
if folder_id != GDocsBackend.ROOT_FOLDER_ID:
for link in entry.in_collections():
folder_entry = self.client.get_entry(link.href, None, None,
desired_class=gdata.docs.data.Resource)
if folder_entry and (folder_entry.resource_id.text == folder_id):
result.append(entry)
elif len(entry.in_collections()) == 0:
result.append(entry)
else:
result = entries
# Done!
return result
""" gdata is an alternate way to access gdocs, currently 05/2015 lacking OAuth support """
duplicity.backend.register_backend('gdata+gdocs', GDocsBackend)
duplicity.backend.uses_netloc.extend(['gdata+gdocs'])
|
nils-tekampe/duplicity
|
duplicity/backends/gdocsbackend.py
|
Python
|
gpl-2.0
| 8,939
|
[
"VisIt"
] |
98bb027746d4acffffcfe2df9fa0f5fa7f644c7cd66350d37876f05665fe7ec5
|
"""
.. _sfm-track:
==================================================
Tracking with the Sparse Fascicle Model
==================================================
Tracking requires a per-voxel model. Here, the model is the Sparse Fascicle
Model, described in [Rokem2015]_. This model reconstructs the diffusion signal
as a combination of the signals from different fascicles (see also
:ref:`sfm-reconst`).
To begin, we read the Stanford HARDI data-set into memory:
"""
from dipy.data import read_stanford_labels
hardi_img, gtab, labels_img = read_stanford_labels()
data = hardi_img.get_data()
labels = labels_img.get_data()
affine = hardi_img.affine
"""
This dataset provides a label map (generated using Freesurfer), in which the
white matter voxels are labeled as either 1 or 2:
"""
white_matter = (labels == 1) | (labels == 2)
"""
The first step in tracking is generating a model from which tracking directions
can be extracted in every voxel.
For the SFM, this requires first that we define a canonical response function
that will be used to deconvolve the signal in every voxel
"""
from dipy.reconst.csdeconv import auto_response
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
"""
We initialize an SFM model object, using this response function and using the
default sphere (362 vertices, symmetrically distributed on the surface of the
sphere):
"""
from dipy.data import get_sphere
sphere = get_sphere()
from dipy.reconst import sfm
sf_model = sfm.SparseFascicleModel(gtab, sphere=sphere,
l1_ratio=0.5, alpha=0.001,
response=response[0])
"""
We fit this model to the data in each voxel in the white-matter mask, so that
we can use these directions in tracking:
"""
from dipy.direction.peaks import peaks_from_model
pnm = peaks_from_model(sf_model, data, sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
mask=white_matter,
parallel=True
)
"""
A ThresholdTissueClassifier object is used to segment the data to track only
through areas in which the Generalized Fractional Anisotropy (GFA) is
sufficiently high.
"""
from dipy.tracking.local import ThresholdTissueClassifier
classifier = ThresholdTissueClassifier(pnm.gfa, .25)
"""
Tracking will be started from a set of seeds evenly distributed in the white
matter:
"""
from dipy.tracking import utils
seeds = utils.seeds_from_mask(white_matter, density=[2, 2, 2], affine=affine)
"""
For the sake of brevity, we will take only the first 1000 seeds, generating
only 1000 streamlines. Remove this line to track from many more points in all of
the white matter
"""
seeds = seeds[:1000]
"""
We now have the necessary components to construct a tracking pipeline and
execute the tracking
"""
from dipy.tracking.local import LocalTracking
streamlines = LocalTracking(pnm, classifier, seeds, affine, step_size=.5)
streamlines = list(streamlines)
"""
Next, we will create a visualization of these streamlines, relative to this
subject's T1-weighted anatomy:
"""
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
from dipy.data import read_stanford_t1
from dipy.tracking.utils import move_streamlines
from numpy.linalg import inv
t1 = read_stanford_t1()
t1_data = t1.get_data()
t1_aff = t1.affine
color = line_colors(streamlines)
"""
To speed up visualization, we will select a random sub-set of streamlines to
display. This is particularly important, if you track from seeds throughout the
entire white matter, generating many streamlines. In this case, for
demonstration purposes, we subselect 900 streamlines.
"""
from dipy.tracking.streamline import select_random_set_of_streamlines
plot_streamlines = select_random_set_of_streamlines(streamlines, 900)
streamlines_actor = fvtk.streamtube(
list(move_streamlines(plot_streamlines, inv(t1_aff))),
line_colors(streamlines), linewidth=0.1)
vol_actor = fvtk.slicer(t1_data)
vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)
ren = fvtk.ren()
fvtk.add(ren, streamlines_actor)
fvtk.add(ren, vol_actor)
fvtk.add(ren, vol_actor2)
fvtk.record(ren, n_frames=1, out_path='sfm_streamlines.png',
size=(800, 800))
"""
.. figure:: sfm_streamlines.png
:align: center
**Sparse Fascicle Model tracks**
Finally, we can save these streamlines to a 'trk' file, for use in other
software, or for further analysis.
"""
from dipy.io.trackvis import save_trk
save_trk("sfm_detr.trk", streamlines, affine, labels.shape)
"""
References
----------
.. [Rokem2015] Ariel Rokem, Jason D. Yeatman, Franco Pestilli, Kendrick
N. Kay, Aviv Mezer, Stefan van der Walt, Brian A. Wandell
(2015). Evaluating the accuracy of diffusion MRI models in white
matter. PLoS ONE 10(4): e0123272. doi:10.1371/journal.pone.0123272
"""
|
villalonreina/dipy
|
doc/examples/sfm_tracking.py
|
Python
|
bsd-3-clause
| 4,942
|
[
"Brian"
] |
383984a1991fd4ef53b513251e3dcfc9ce468c235d16d38bc7625557415b55d2
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2022, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Overlap computation
######################
Utilities for computing the overlap between gaussian type functions.
"""
import numpy as np
from numba import jit, prange
from .numerical import fac, fac2, dfac21, sdist, choose
from .car2sph import car2sph_scaled
from exatomic.base import nbche
#################################
# Primitive cartesian integrals #
#################################
@jit(nopython=True, nogil=True, cache=nbche)
def _fj(j, l, m, a, b):
"""From Handbook of Computational Quantum Chemistry by David B. Cook
in chapter 7.7.1 -- Essentially a FOILing of the pre-exponential
cartesian power dependence in one dimension."""
tot, i, f = 0., max(0, j - m), min(j, l) + 1
for k in prange(i, f):
tot += (choose(l, k) *
choose(m, int(j - k)) *
a ** (l - k) *
b ** (m + k - j))
return tot
@jit(nopython=True,nogil=True, cache=nbche)
def _nin(l, m, pa, pb, p, N):
"""From Handbook of Computational Quantum Chemistry by David B. Cook
in chapter 7.7.1 -- Sums the result of _fj over the total angular momentum
in one dimension."""
ltot = l + m
if not ltot: return N
tot = 0.
for j in prange(int(ltot // 2 + 1)):
tot += (_fj(2 * j, l, m, pa, pb) *
dfac21(j) / (2 * p) ** j)
return tot * N
@jit(nopython=True, nogil=True, cache=nbche)
def _gaussian_product(a, b, ax, ay, az, bx, by, bz):
"""
From Molecular Electronic-Structure Theory by Trygve Helgaker et al.
Computes a product gaussian following section 9.2.3; see equations
9.2.10 through 9.2.15.
"""
p = a + b
mu = a * b / p
px = (a * ax + b * bx) / p
py = (a * ay + b * by) / p
pz = (a * az + b * bz) / p
ab2 = sdist(ax, ay, az, bx, by, bz)
return (np.sqrt(np.pi / p), p, mu, ab2,
px - ax, py - ay, pz - az,
px - bx, py - by, pz - bz)
@jit(nopython=True, nogil=True, cache=nbche)
def _primitive_overlap_product(l1, m1, n1, l2, m2, n2,
N, p, mu, ab2, pax, pay, paz, pbx, pby, pbz):
"""Compute primitive cartesian overlap integral in terms of a gaussian product."""
return (np.exp(-mu * ab2) * _nin(l1, l2, pax, pbx, p, N)
* _nin(m1, m2, pay, pby, p, N)
* _nin(n1, n2, paz, pbz, p, N))
@jit(nopython=True, nogil=True, cache=nbche)
def _primitive_overlap(a1, a2, ax, ay, az, bx, by, bz, l1, m1, n1, l2, m2, n2):
"""Compute a primitive cartesian overlap integral."""
#N, p, mu, ab2, pax, pay, paz, pbx, pby, pbz = \
p = _gaussian_product(a1, a2, ax, ay, az, bx, by, bz)
return _primitive_overlap_product(l1, m1, n1, l2, m2, n2, *p)
@jit(nopython=True, nogil=True, cache=nbche)
def _primitive_kinetic(a1, a2, ax, ay, az, bx, by, bz, l1, m1, n1, l2, m2, n2):
"""Compute the kinetic energy as a linear combination of overlap terms."""
#N, p, mu, ab2, pax, pay, paz, pbx, pby, pbz = \
p = _gaussian_product(a1, a2, ax, ay, az, bx, by, bz)
t = 4 * a1 * a2 * _primitive_overlap_product(l1 - 1, m1, n1, l2 - 1, m2, n2, *p)
t += 4 * a1 * a2 * _primitive_overlap_product(l1, m1 - 1, n1, l2, m2 - 2, n2, *p)
t += 4 * a1 * a2 * _primitive_overlap_product(l1, m1, n1 - 1, l2, m2, n2 - 1, *p)
if l1 and l2:
t += l1 * l2 * _primitive_overlap_product(l1 - 1, m1, n1, l2 - 1, m2, n2, *p)
if m1 and m2:
t += l1 * l2 * _primitive_overlap_product(l1, m1 - 1, n1, l2, m2 - 1, n2, *p)
if n1 and n2:
t += l1 * l2 * _primitive_overlap_product(l1, m1, n1 - 1, l2, m2, n2 - 1, *p)
if l1: t -= 2 * a2 * l1 * _primitive_overlap_product(l1 - 1, m1, n1, l2 + 1, m2, n2, *p)
if l2: t -= 2 * a1 * l2 * _primitive_overlap_product(l1 + 1, m1, n1, l2 - 1, m2, n2, *p)
if m1: t -= 2 * a2 * m1 * _primitive_overlap_product(l1, m1 - 1, n1, l2, m2 + 1, n2, *p)
if m2: t -= 2 * a1 * m2 * _primitive_overlap_product(l1, m1 + 1, n1, l2, m2 - 1, n2, *p)
if n1: t -= 2 * a2 * n1 * _primitive_overlap_product(l1, m1, n1 - 1, l2, m2, n2 + 1, *p)
if n2: t -= 2 * a1 * n2 * _primitive_overlap_product(l1, m1, n1 + 1, l2, m2, n2 - 1, *p)
return t / 2
######################################
# Generators over shells/shell-pairs #
######################################
@jit(nopython=True, nogil=True, cache=False)
def _iter_atom_shells(ptrs, xyzs, *shls):
"""Generator yielding indices, atomic coordinates and basis set shells."""
nshl = len(ptrs)
for i in range(nshl):
pa, pi = ptrs[i]
yield (i, xyzs[pa][0], xyzs[pa][1], xyzs[pa][2], shls[pi])
@jit(nopython=True, nogil=True, cache=False)
def _iter_atom_shell_pairs(ptrs, xyzs, *shls):
"""Generator yielding indices, atomic coordinates and basis set
shells in block-pair order."""
nshl = len(ptrs)
for i in range(nshl):
for j in range(i + 1):
pa, pi = ptrs[i]
pb, pj = ptrs[j]
yield (i, j, xyzs[pa][0], xyzs[pa][1], xyzs[pa][2],
xyzs[pb][0], xyzs[pb][1], xyzs[pb][2],
shls[pi], shls[pj])
############################################
# Integral processing for of Shell objects #
############################################
@jit(nopython=True, nogil=True, cache=nbche)
def _cartesian_overlap_shell(xa, ya, za, xb, yb, zb,
li, mi, ni, lj, mj, nj,
ialpha, jalpha):
"""Compute pairwise cartesian integrals exponents in a block-pair."""
pints = np.empty((len(ialpha), len(jalpha)))
for i, ia in enumerate(ialpha):
for j, ja in enumerate(jalpha):
pints[i, j] = _primitive_overlap(ia, ja,
xa, ya, za, xb, yb, zb,
li, mi, ni, lj, mj, nj)
return pints
@jit(nopython=True, nogil=True, cache=nbche)
def _cartesian_shell_pair(ax, ay, az, bx, by, bz, ishl, jshl):
"""Compute fully contracted block-pair integrals including
expansion of angular momentum dependence."""
inrm = ishl.norm_contract()
jnrm = jshl.norm_contract()
ideg = (ishl.L + 1) * (ishl.L + 2) // 2
jdeg = (jshl.L + 1) * (jshl.L + 2) // 2
pint = np.empty((ideg * ishl.nprim, jdeg * jshl.nprim))
for magi, (li, mi, ni) in enumerate(ishl.enum_cartesian()):
for magj, (lj, mj, nj) in enumerate(jshl.enum_cartesian()):
ianc = magi * ishl.nprim
janc = magj * jshl.nprim
pint[ianc : ianc + ishl.nprim,
janc : janc + jshl.nprim] = \
_cartesian_overlap_shell(ax, ay, az, bx, by, bz,
li, mi, ni, lj, mj, nj,
ishl.alphas, jshl.alphas)
if ishl.L:
inrm = np.kron(np.eye(ideg), inrm)
if ishl.spherical:
inrm = np.dot(inrm, np.kron(car2sph_scaled(ishl.L),
np.eye(ishl.ncont)))
if jshl.L:
jnrm = np.kron(np.eye(jdeg), jnrm)
if jshl.spherical:
jnrm = np.dot(jnrm, np.kron(car2sph_scaled(jshl.L),
np.eye(jshl.ncont)))
return np.dot(inrm.T, np.dot(pint, jnrm))
@jit(nopython=True, nogil=True, cache=False)
def _cartesian_shell_pairs(ndim, ptrs, xyzs, *shls):
"""Construct a full square (overlap) integral matrix."""
cart = np.zeros((ndim, ndim))
ii = 0
for i, j, ax, ay, az, bx, by, bz, ishl, jshl \
in _iter_atom_shell_pairs(ptrs, xyzs, *shls):
if not j: jj = 0
cint = _cartesian_shell_pair(ax, ay, az, bx, by, bz, ishl, jshl)
iblk, jblk = cint.shape
cart[ii : ii + iblk, jj : jj + jblk] = cint
if i != j: cart[jj : jj + jblk, ii : ii + iblk] = cint.T
else: ii += iblk
jj += jblk
return cart
##################################
# Obara-Saika recursion relation #
##################################
@jit(nopython=True, nogil=True, cache=nbche)
def _obara_s_recurr(p, l, m, pa, pb, s):
"""There is a bug in this function. Do not use."""
if not l + m: return s
p2 = 1 / (2 * p)
s0 = np.zeros((l + 1, m + 1))
s0[0, 0] = s
if l: s0[1, 0] = pa * s
if m: s0[0, 1] = pb * s
if l and m: s0[1, 1] = pb * s0[1, 0] + p2 * s
for i in range(1, l):
for j in range(1, m):
mul = p2 * (i * s0[i - 1, j] + j * s0[i, j - 1])
s0[i + 1, j] = pa * s0[i, j] + mul
s0[i, j + 1] = pb * s0[i, j] + mul
s0[i + 1, j + 1] = pa * s0[i, j + 1] + p2 * ((i + 1) * s0[i, j] + j * s0[i + 1, j])
return s0[l, m]
@jit(nopython=True, nogil=True, cache=nbche)
def _nin(o1, o2, po1, po2, gamma, pg12):
"""Helper function for gaussian overlap between 2 centers."""
otot = o1 + o2
if not otot: return pg12
if otot % 2: otot -= 1
oio = 0.
for i in range(otot // 2 + 1):
k = 2 * i
prod = pg12 * fac2(k - 1) / ((2 * gamma) ** i)
qlo = max(-k, (k - 2 * o2))
qhi = min( k, (2 * o1 - k)) + 1
fk = 0.
for q in range(qlo, qhi, 2):
xx = (k + q) // 2
zz = (k - q) // 2
newt1 = fac(o1) / fac(xx) / fac(o1 - xx)
newt2 = fac(o2) / fac(zz) / fac(o2 - zz)
fk += newt1 * newt2 * (po1 ** (o1 - xx)) * (po2 ** (o2 - zz))
oio += prod * fk
return oio
|
exa-analytics/exatomic
|
exatomic/algorithms/overlap.py
|
Python
|
apache-2.0
| 9,560
|
[
"Gaussian"
] |
20833bdcb10e82740b3e42e705ede911185d6c528db752a683f129b2a15084ce
|
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import range
import os
import re
import sys
import glob
import shutil
import argparse
import itertools
import numpy as np
import mdtraj as md
from AdaptivePELE.atomset import atomset
from AdaptivePELE.freeEnergies import utils
from AdaptivePELE.utilities import utilities
PARALELLIZATION = True
try:
import multiprocessing as mp
except ImportError:
PARALELLIZATION = False
PRODY = True
try:
import prody as pd
except ImportError:
PRODY = False
MDTRAJ_FORMATS = set(['.xtc', '.dcd', '.dtr', '.trr', 'mdcrd', 'nc'])
VALID_CM_MODES = ["p-lig", "p-p"]
# consider most extreme atoms
EXTRA_ATOMS = {u"ALA": u"empty", u"VAL": u"CG1", u"LEU": u"CD1", u"ILE": u"CD1",
u"MET": u"CE", u"PRO": u"empty", u"PHE": u"CE1", u"TYR": u"CE1",
u"TRP": u"CZ3", u"SER": u"OG", u"THR": u"OG1", u"CYS": u"SG",
u"ASN": u"OD1", u"GLN": u"OE1", u"LYS": u"NZ", u"HIS": u"CE1",
u"HIE": u"CE1", u"HID": u"CE1", u"HIP": u"CE1", u"ARG": u"NH1",
u"ASP": u"OD1", u"GLU": u"OE1", u"GLY": u"empty"}
# consider more central atoms in the side-chain
EXTRA_ATOMS_CENTRAL = {u"ALA": u"empty", u"VAL": u"empty", u"LEU": u"CG", u"ILE": u"CG2",
u"MET": u"CG", u"PRO": u"empty", u"PHE": u"CZ", u"TYR": u"CZ",
u"TRP": u"CE2", u"SER": u"OG", u"THR": u"OG1", u"CYS": u"SG",
u"ASN": u"CG", u"GLN": u"CG", u"LYS": u"CG", u"HIS": u"CG",
u"HIE": u"CG", u"HID": u"CG", u"HIP": u"CG", u"ARG": u"CD",
u"ASP": u"CG", u"GLU": u"CG", u"GLY": u"empty"}
class Constants(object):
def __init__(self):
self.extractedTrajectoryFolder = "%s/extractedCoordinates"
self.baseExtractedTrajectoryName = "coord_"
self.reportName = '*report_'
self.baseGatheredFilename = "traj_*.dat"
self.outputTrajectoryFolder = "%s/repeatedExtractedCoordinates"
self.ligandTrajectoryFolder = "ligand_trajs"
self.ligandTrajectoryBasename = os.path.join(self.ligandTrajectoryFolder, "traj_ligand_%s.pdb")
self.gatherTrajsFolder = "allTrajs"
self.gatherTrajsFilename = os.path.join(self.gatherTrajsFolder, "traj_%s_%s.dat")
self.gatherNonRepeatedFolder = os.path.join(self.gatherTrajsFolder, "extractedCoordinates")
self.gatherNonRepeatedTrajsFilename = os.path.join(self.gatherNonRepeatedFolder, "traj_%s_%s.dat")
class ParamsHandler(object):
def __init__(self, folderWithTrajs, atom_id, lig_name, total_steps, sequential, writeLigandTrajectory, set_number, protein_CA, noRepeat, numProcessors, parallelize, topol, sidechains, sidechains_folder, CM, use_extra_atoms, CM_mode, dihedrals, dihedrals_projection):
self.folder_name = folderWithTrajs
self.atomIds = atom_id
self.lig_resname = lig_name
self.numtotalSteps = total_steps
self.enforceSequential_run = sequential
self.writeLigandTrajectory = writeLigandTrajectory
self.setNumber = set_number
self.protein_CA = protein_CA
self.non_Repeat = noRepeat
self.nProcessors = numProcessors
self.parallelize = parallelize
self.topology = topol
self.sidechains = sidechains
self.sidechain_folder = sidechains_folder
self.contact_map = CM
self.extra_atoms = use_extra_atoms
self.cm_mode = CM_mode
self.dihedrals = dihedrals
self.dihedrals_projection = dihedrals_projection
if self.contact_map and self.cm_mode == "p-lig" and self.lig_resname == "":
raise ValueError("Ligand resname needed for protein-ligand contact map")
if self.contact_map and self.cm_mode not in VALID_CM_MODES:
raise ValueError("Unrecognized type of contact map, valids are: %s" " ".join(VALID_CM_MODES))
self.com = not self.protein_CA and (self.atomIds is None or len(self.atomIds) == 0) and not self.sidechains and not self.contact_map and not self.dihedrals
def parseArguments():
desc = "Program that extracts residue coordinates for a posterior MSM analysis.\
It either extracts the resname COM coordinates or those of an atomId, depending on the input.\
It then fills the rejected steps, which is not done by PELE.\
Finally, trajectories are gathered together in the same allTrajs folder.\
It automatically detects whether it is an adaptive or a sequential PELE run by looking for folders\
with numeric names."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-f", "--folderWithTrajs", default=".",
help="Folder with trajectories (or epochs)")
# parser.add_argument("-atomId", action=AtomIdAction, help="serial:atomName:resname, e.g. 2048:C1:AIN")
parser.add_argument("-atomIds", nargs='*', help="serial:atomName:resname, e.g. 2048:C1:AIN. May contain more than one atomId")
parser.add_argument("-resname", default="", help="Ligand resname")
parser.add_argument("-CA", "--proteinCA", action="store_true", help="Extract protein alpha carbons coordinates")
parser.add_argument("-s", "--enforceSequential", action="store_true", help="Force the consideration as sequential run (non-adaptive)")
parser.add_argument("--setNum", type=int, default=0, help="Sets the number to appear in gathered trajectory in order to avoid clashes between different sequential runs. Ignored in adaptive runs.")
parser.add_argument("-w", "--writeLigandTrajectory", action="store_true", help="It writes a traj_ligand_XXX.pdb file with the ligand coordinates. The user must delete the original trajectory (if wanted)")
parser.add_argument("-t", "--totalSteps", type=int, default=0, help="Total number of steps in traj. Equivalent to epoch length in adaptive runs")
parser.add_argument("-nR", "--noRepeat", action="store_true", help="Flag to avoid repeating the rejected steps")
parser.add_argument("-n", "--numProcessors", type=int, default=None, help="Number of cpus to use")
parser.add_argument("--top", type=str, default=None, help="Topology file for non-pdb trajectories or path to Adaptive topology object")
parser.add_argument("--sidechains", action="store_true", help="Flag to extract sidechain coordinates")
parser.add_argument("-sf", "--sidechains_folder", default=".", type=str, help="Folder with the structures to obtain the sidechains to extract")
parser.add_argument("--serial", action="store_true", help="Flag to deactivate parallelization")
parser.add_argument("--contact_map", action="store_true", help="Flag to activate contact map creation")
parser.add_argument("--extra_atoms", action="store_true", help="Flag to use extra atoms in contact map creation (in addition to alpha carbons)")
parser.add_argument("--dihedrals", action="store_true", help="Flag to activate dihedral angles calculations")
parser.add_argument("--dihedrals_projection", action="store_true", help="Flag to project dihedral angles calculations into their cos and sin")
parser.add_argument("--cm_mode", default="p-lig", help="Type of contact map to create (p-lig for protein-ligand or p-p protein-protein)")
args = parser.parse_args()
return args.folderWithTrajs, args.atomIds, args.resname, args.proteinCA, args.enforceSequential, args.writeLigandTrajectory, args.totalSteps, args.setNum, args.noRepeat, args.numProcessors, args.top, args.sidechains, args.sidechains_folder, args.serial, args.contact_map, args.extra_atoms, args.cm_mode, args.dihedrals, args.dihedrals_projection
def loadAllResnameAtomsInPdb(filename, params):
prunedFileContent = []
sidechains_bool = bool(params.sidechains)
with open(filename) as f:
prunedSnapshot = []
for line in f:
if utils.is_model(line):
prunedFileContent.append("".join(prunedSnapshot))
prunedSnapshot = []
elif utils.is_end(line) or utils.is_remark(line) or utils.is_cryst(line):
continue
elif line[17:20] == params.lig_resname or utils.isAlphaCarbon(line, params.protein_CA or params.contact_map) or utils.isSidechain(line, sidechains_bool, params.sidechains) or (params.contact_map and params.extra_atoms and utils.extraAtomCheck(line, EXTRA_ATOMS)):
prunedSnapshot.append(line)
if prunedSnapshot:
prunedFileContent.append("".join(prunedSnapshot))
return prunedFileContent
def extractFilenumber(filename):
last = filename.rfind('.')
first = filename.rfind('_')
number = re.sub("[^0-9]", "", filename[first+1:last])
return number
def getOutputFilename(directory, filename, baseOutputFilename):
filenumber = extractFilenumber(filename)
return os.path.join(directory, baseOutputFilename+filenumber+".dat")
def extractContactMapCoordinatesPDB(allCoordinates, params):
trajCoords = []
for coordinates in allCoordinates:
if params.cm_mode == "p-lig":
PDB = atomset.PDB()
PDB.initialise(coordinates, resname=params.lig_resname)
snapshotCoords = [coord for at in PDB.atomList for coord in PDB.atoms[at].getAtomCoords()]
else:
snapshotCoords = []
PDBCA = atomset.PDB()
if params.extra_atoms:
PDBCA.initialise(coordinates, type=u"PROTEIN", extra_atoms=EXTRA_ATOMS)
else:
PDBCA.initialise(coordinates, type=u"PROTEIN")
snapshotCoords.extend([coord for at in PDBCA.atomList for coord in PDBCA.atoms[at].getAtomCoords()])
trajCoords.append(snapshotCoords)
return trajCoords
def getLigandAlphaCarbonsCoords(allCoordinates, lig_resname, sidechains=False):
trajCoords = []
for coordinates in allCoordinates:
PDB = atomset.PDB()
PDB.initialise(coordinates, resname=lig_resname)
snapshotCoords = [coord for at in PDB.atomList for coord in PDB.atoms[at].getAtomCoords()]
PDBCA = atomset.PDB()
if not sidechains:
PDBCA.initialise(coordinates, type="PROTEIN")
else:
PDBCA.initialise(coordinates, type="PROTEIN", heavyAtoms=True)
snapshotCoords.extend([coord for at in PDBCA.atomList for coord in PDBCA.atoms[at].getAtomCoords()])
trajCoords.append(snapshotCoords)
return trajCoords
def getPDBCOM(allCoordinates, lig_resname):
COMs = []
for coordinates in allCoordinates:
pdb = atomset.PDB()
pdb.initialise(coordinates, resname=lig_resname, heavyAtoms=True)
COMs.append(pdb.extractCOM())
return COMs
def getAtomCoord(allCoordinates, lig_resname, atom_Ids):
coords = []
# If ever need to speed this up, build a Trajectory class that inherits from PDB
# and loads the atom according to the position in the snapshot, rather than looking
# for the atom
for coordinates in allCoordinates:
pdb = atomset.PDB()
pdb.initialise(coordinates, resname=lig_resname, heavyAtoms=True)
snapshotcoords = []
for atomId in atom_Ids:
snapshotcoords.extend(pdb.getAtom(atomId).getAtomCoords())
coords.append(snapshotcoords)
return coords
def writeToFile(COMs, outputFilename):
with open(outputFilename, 'w') as f:
for i, line in enumerate(COMs):
f.write(str(i) + ' ')
for i in range(len(line) - 1):
f.write(str(line[i]) + ' ')
f.write(str(line[-1]) + '\n')
def extractIndexesTopology_CM(topology, lig_resname, CM_mode, use_extra_atoms):
selection = []
iline = 0
if CM_mode == "p-p":
check_ligand = False
else:
check_ligand = True
with open(topology) as f:
for line in f:
if not (line.startswith("ATOM") or line.startswith("HETATM")):
continue
if line[76:80].strip().upper() != "H" and (check_ligand and line[17:20] == lig_resname or utils.isAlphaCarbon(line, True) or use_extra_atoms and utils.extraAtomCheck(line, EXTRA_ATOMS)):
selection.append(iline)
iline += 1
return selection
def extractIndexesTopology(topology, lig_resname, atoms, writeCA, sidechains):
selection = []
if atoms:
atoms_set = set(atoms)
template = "%s:%s:%s"
iline = 0
bool_sidechains = bool(sidechains)
with open(topology) as f:
for line in f:
if not (line.startswith("ATOM") or line.startswith("HETATM")):
continue
if atoms:
serial_num = line[6:11].strip()
atom_name = line[12:16].strip()
residue_name = line[17:20].strip()
if template % (serial_num, atom_name, residue_name) in atoms_set:
selection.append(iline)
elif (line[17:20] == lig_resname or utils.isAlphaCarbon(line, writeCA) or utils.isSidechain(line, bool_sidechains, sidechains)) and line[76:80].strip().upper() != "H":
selection.append(iline)
iline += 1
return selection
def contactMapNonPDB(file_name, params, topology, selected_indices):
trajectory = md.load(file_name, top=topology)
atom_pairs = list(itertools.combinations(selected_indices, 2))
return 10*md.compute_distances(trajectory, atom_pairs, periodic=True)
def calculateDihedrals(file_name, params, topology):
trajectory = md.load(file_name, top=topology)
_, psi_angles = md.compute_psi(trajectory, periodic=True)
_, phi_angles = md.compute_phi(trajectory, periodic=True)
return np.hstack((psi_angles, phi_angles))
def projectDihedrals(dihedrals):
cos_proj = np.cos(dihedrals)
sin_proj = np.sin(dihedrals)
return np.hstack((cos_proj, sin_proj))
def extractCoordinatesXTCFile(file_name, params, topology, selected_indices):
trajectory = md.load(file_name, top=topology)
if params.com:
# getCOM case
# convert nm to A
coordinates = 10*md.compute_center_of_mass(trajectory.atom_slice(selected_indices))
else:
coordinates = 10*trajectory.xyz[:, selected_indices, :].reshape((trajectory.n_frames, -1))
return coordinates
def writeFilenameExtractedCoordinates(filename, params, pathFolder, constants, topology, indexes=None):
"""
Process the coordinates of a trajectory
"""
if params.dihedrals:
coords = calculateDihedrals(filename, params, topology)
if params.dihedrals_projection:
coords = projectDihedrals(coords)
outputFilename = getOutputFilename(constants.extractedTrajectoryFolder, filename,
constants.baseExtractedTrajectoryName)
writeToFile(coords, outputFilename % pathFolder)
return
ext = utilities.getFileExtension(filename)
if ext == ".pdb":
allCoordinates = loadAllResnameAtomsInPdb(filename, params)
if params.writeLigandTrajectory:
outputFilename = os.path.join(pathFolder, constants.ligandTrajectoryBasename % extractFilenumber(filename))
with open(outputFilename, 'w') as f:
f.write("\nENDMDL\n".join(allCoordinates))
if params.protein_CA:
coords = getLigandAlphaCarbonsCoords(allCoordinates, params.lig_resname)
elif params.sidechains:
coords = getLigandAlphaCarbonsCoords(allCoordinates, params.lig_resname, sidechains=params.sidechains)
else:
if params.com:
coords = getPDBCOM(allCoordinates, params.lig_resname)
elif params.contact_map:
coords = np.array(extractContactMapCoordinatesPDB(allCoordinates, params))
coords = utils.contactMap(coords, int(coords.shape[1]/3))
else:
coords = getAtomCoord(allCoordinates, params.lig_resname, params.atomIds)
elif ext in MDTRAJ_FORMATS:
if not indexes:
raise ValueError("Empty selection!!!")
if params.contact_map:
coords = contactMapNonPDB(filename, params, topology, indexes)
else:
coords = extractCoordinatesXTCFile(filename, params, topology, indexes)
else:
raise ValueError("Unrecongnized file extension for %s" % filename)
outputFilename = getOutputFilename(constants.extractedTrajectoryFolder, filename,
constants.baseExtractedTrajectoryName)
writeToFile(coords, outputFilename % pathFolder)
def writeFilenamesExtractedCoordinates(pathFolder, params, constants, pool=None):
if not os.path.exists(constants.extractedTrajectoryFolder % pathFolder):
os.makedirs(constants.extractedTrajectoryFolder % pathFolder)
originalPDBfiles = glob.glob(os.path.join(pathFolder, '*traj*.*'))
ext = os.path.splitext(originalPDBfiles[0])[1]
if ext in MDTRAJ_FORMATS:
if params.topology is None:
raise ValueError("Necessary topology not provided!")
# get topology for the first trajectory
top_file = params.topology.getTopologyFile(0, 1)
if params.contact_map:
indexes = extractIndexesTopology_CM(top_file, params.lig_resname, params.cm_mode, params.extra_atoms)
else:
indexes = extractIndexesTopology(top_file, params.lig_resname, params.atomIds, params.protein_CA, params.sidechains)
else:
indexes = None
workers = []
for filename in originalPDBfiles:
if params.topology is not None:
epoch, traj_num = get_epoch_traj_num(filename)
topology_file = params.topology.getTopologyFile(epoch, traj_num)
else:
topology_file = None
if pool is None:
# serial version
writeFilenameExtractedCoordinates(filename, params, pathFolder, constants, topology_file, indexes=indexes)
else:
# multiprocessor version
workers.append(pool.apply_async(writeFilenameExtractedCoordinates, args=(filename, params, pathFolder, constants, topology_file, indexes)))
for w in workers:
w.get()
def parseResname(atom_Ids, lig_resname, CM, CM_mode, dihedrals):
if dihedrals:
return ""
if atom_Ids is not None and len(atom_Ids) > 0:
differentResnames = {atomId.split(":")[-1] for atomId in atom_Ids}
if len(differentResnames) > 1:
sys.exit("Error! Different resnames provided in atomIds!")
elif len(differentResnames) == 1:
extractedResname = differentResnames.pop()
if CM:
if CM_mode == "p-lig":
if lig_resname == "":
sys.exit("Ligand resname should be provided for the protein-ligand contact map")
else:
return lig_resname
else:
return ""
if (atom_Ids is None or len(atom_Ids) == 0) and lig_resname == "":
sys.exit("Either resname or atomId should be provided")
elif lig_resname == "":
lig_resname = extractedResname # the atom Id last element is the resname
elif atom_Ids is not None and len(atom_Ids) > 0:
if extractedResname != lig_resname:
sys.exit("Residue name in resname and atomId do not match!")
return lig_resname
def buildFullTrajectory(steps, trajectory, numtotalSteps, inputTrajectory):
completeTrajectory = []
counter = 0
if len(trajectory) > 0:
sthWrongInTraj = False
for i in range(len(steps) - 1):
repeated = steps[i+1, 0] - steps[i, 0]
for _ in range(repeated):
try:
snapshot = trajectory[steps[i, 1]].split()
except IndexError:
print("sth wrong in trajectory %s. This is likely to disagreement between report and trajectory files. Please, fix it manually" % inputTrajectory)
sthWrongInTraj = True
break
snapshot[0] = str(counter)
snapshot = ' '.join(snapshot)
completeTrajectory.append(snapshot)
counter += 1
if sthWrongInTraj:
return completeTrajectory
if numtotalSteps == 0:
iterations = range(1)
else:
# PELE write the initial structure as step 0, so an extra step is
# always needed
iterations = range(numtotalSteps + 1 - counter)
snapshot = trajectory[-1].split()
for i in iterations:
snapshot[0] = str(counter)
completeTrajectory.append(' '.join(snapshot))
counter += 1
return completeTrajectory
def repeatExtractedSnapshotsInTrajectory(inputTrajectory, constants, numtotalSteps):
extractedTrajFolder, trajFilename = os.path.split(inputTrajectory)
trajectoryNumber = re.sub(r'\.dat$', '', trajFilename)
trajectoryNumber = re.sub(constants.baseExtractedTrajectoryName, '', trajectoryNumber)
origDataFolder = re.sub(constants.extractedTrajectoryFolder % "", "", extractedTrajFolder)
try:
reportFile = glob.glob(os.path.join(origDataFolder, constants.reportName + trajectoryNumber))[0]
except IndexError:
sys.exit("Couldn't find file that matches: %s" % os.path.join(origDataFolder, constants.reportName + trajectoryNumber))
with open(inputTrajectory) as f:
trajectory = f.read().splitlines()
acceptedSteps = np.loadtxt(reportFile, dtype='int', comments='#', usecols=(1, 2))
if len(acceptedSteps.shape) < 2:
acceptedSteps = acceptedSteps[np.newaxis, :]
fullTrajectory = buildFullTrajectory(acceptedSteps, trajectory, numtotalSteps, inputTrajectory)
if len(fullTrajectory) > 0:
outputFilename = os.path.join(constants.outputTrajectoryFolder % origDataFolder, constants.baseExtractedTrajectoryName + trajectoryNumber + '.dat')
with open(outputFilename, "w") as outputFile:
for snapshot in fullTrajectory:
outputFile.write("%s\n" % snapshot)
def repeatExtractedSnapshotsInFolder(folder_name, constants, numtotalSteps, pool=None):
inputTrajectoryFolder = constants.extractedTrajectoryFolder % folder_name
outputTrajectoryFolder = constants.outputTrajectoryFolder % folder_name
if not os.path.exists(outputTrajectoryFolder):
os.makedirs(outputTrajectoryFolder)
inputTrajectories = glob.glob(os.path.join(inputTrajectoryFolder, constants.baseExtractedTrajectoryName + '*'))
workers = []
for inputTrajectory in inputTrajectories:
if pool is None:
# serial version
repeatExtractedSnapshotsInTrajectory(inputTrajectory, constants, numtotalSteps)
else:
# multiprocessor version
workers.append(pool.apply_async(repeatExtractedSnapshotsInTrajectory, args=(inputTrajectory, constants, numtotalSteps)))
for w in workers:
w.get()
def makeGatheredTrajsFolder(constants):
if not os.path.exists(constants.gatherTrajsFolder):
os.makedirs(constants.gatherTrajsFolder)
if not os.path.exists(constants.gatherNonRepeatedFolder):
os.makedirs(constants.gatherNonRepeatedFolder)
def copyTrajectories(traj_names, destFolderTempletized, folderName, setNumber=0, epochNum=None):
for inputTrajectory in traj_names:
trajectoryNumber = extractFilenumber(os.path.split(inputTrajectory)[1])
if folderName != ".": # if not sequential
setNumber = folderName
if epochNum is not None and epochNum != ".":
setNumber = epochNum
shutil.copyfile(inputTrajectory, destFolderTempletized % (setNumber, trajectoryNumber))
def gatherTrajs(constants, folder_name, setNumber, non_Repeat, epochNum=None):
nonRepeatedTrajs = glob.glob(os.path.join(constants.extractedTrajectoryFolder % folder_name, constants.baseExtractedTrajectoryName + "*"))
copyTrajectories(nonRepeatedTrajs, constants.gatherNonRepeatedTrajsFilename, folder_name, setNumber, epochNum=epochNum)
if not non_Repeat:
# copy the repeated coordinates to the allTrajs folder
trajectoriesFilenames = os.path.join(constants.outputTrajectoryFolder % folder_name, constants.baseExtractedTrajectoryName + "*")
trajectories = glob.glob(trajectoriesFilenames)
copyTrajectories(trajectories, constants.gatherTrajsFilename, folder_name, setNumber, epochNum=epochNum)
else:
# if we ask to not repeat trajectories, copy the non-repeated to the
# allTrajs folder
copyTrajectories(nonRepeatedTrajs, constants.gatherTrajsFilename, folder_name, setNumber, epochNum=epochNum)
def extractSidechainIndexes_prody(traj, ligand_resname, topology=None):
if not PRODY:
raise utilities.UnsatisfiedDependencyException("Prody module not found, will not be able to extract sidechain coordinates")
atoms = pd.parsePDB(traj)
sidechains = atoms.select("protein within 5 of resname {}".format(ligand_resname))
return [atom.getIndex() for atom in sidechains]
def extractSidechainIndexes_mdtraj(traj, lig_resname, topology=None):
atoms = md.load(traj, top=topology)
ligand_indices = atoms.top.select("resname '{lig}'".format(lig=lig_resname))
water_indices = set(atoms.top.select("not protein or not resname '{lig}'".format(lig=lig_resname)))
# the distance is specified in nm
sidechains = md.compute_neighbors(atoms, 0.5, ligand_indices)
sidechains_trajs = []
for _, sidechain in enumerate(sidechains):
sidechains_trajs.extend(list(set(sidechain.tolist())-water_indices))
return sidechains_trajs
def extractSidechainIndexes(params, pool=None):
trajs = glob.glob(params.sidechain_folder)
sidechains_trajs = []
workers = []
for traj in trajs:
ext = utilities.getFileExtension(traj)
if ext == ".pdb":
if PRODY:
if pool is None:
sidechains_trajs.extend(extractSidechainIndexes_prody(traj, params.lig_resname))
else:
workers.append(pool.apply_async(extractSidechainIndexes_prody, args=(traj, params.lig_resname)))
else:
if pool is None:
sidechains_trajs.extend(extractSidechainIndexes_mdtraj(traj, params.lig_resname))
else:
workers.append(pool.apply_async(extractSidechainIndexes_mdtraj, args=(traj, params.lig_resname)))
elif ext in MDTRAJ_FORMATS:
epoch, traj_num = get_epoch_traj_num(traj)
if pool is None:
sidechains_trajs.extend(extractSidechainIndexes_mdtraj(traj, params.lig_resname, topology=params.topology.getTopologyFile(epoch, traj_num)))
else:
workers.append(pool.apply_async(extractSidechainIndexes_mdtraj(traj, params.lig_resname, params.topology)))
else:
raise ValueError("Unrecongnized file extension for %s" % traj)
for w in workers:
sidechains_trajs.extend(w.get())
return list(set(sidechains_trajs))
def get_epoch_traj_num(filename):
# assumes trajectories come from an Adaptive simulation
path, traj_name = os.path.split(filename)
try:
epoch = int(os.path.split(path)[-1])
except ValueError:
# if for some reason epoch number can't be inferred, assume first
# epoch
epoch = 0
try:
traj_num = utilities.getTrajNum(traj_name)
except ValueError:
# if for some reason trajectory number can't be inferred, assume
# first trajectory
traj_num = 1
return epoch, traj_num
def main(folder_name=".", atom_Ids="", lig_resname="", numtotalSteps=0, enforceSequential_run=0, writeLigandTrajectory=True, setNumber=0, protein_CA=0, non_Repeat=False, nProcessors=None, parallelize=True, topology=None, sidechains=False, sidechain_folder=".", cm=False, use_extra_atoms=False, CM_mode="p-lig", calc_dihedrals=False, dihedrals_projection=False):
params = ParamsHandler(folder_name, atom_Ids, lig_resname, numtotalSteps, enforceSequential_run, writeLigandTrajectory, setNumber, protein_CA, non_Repeat, nProcessors, parallelize, topology, sidechains, sidechain_folder, cm, use_extra_atoms, CM_mode, calc_dihedrals, dihedrals_projection)
constants = Constants()
if params.topology is not None:
params.topology = utilities.getTopologyObject(params.topology)
params.lig_resname = parseResname(params.atomIds, params.lig_resname, params.contact_map, params.cm_mode, params.dihedrals)
folderWithTrajs = params.folder_name
makeGatheredTrajsFolder(constants)
if params.enforceSequential_run:
folders = ["."]
else:
folders = utilities.get_epoch_folders(folderWithTrajs)
if len(folders) == 0:
folders = ["."]
# if multiprocess is not available, turn off parallelization
params.parallelize &= PARALELLIZATION
if params.parallelize:
if params.nProcessors is None:
params.nProcessors = utilities.getCpuCount()
params.nProcessors = max(1, params.nProcessors)
print("Running extractCoords with %d cores" % (params.nProcessors))
pool = mp.Pool(params.nProcessors)
else:
pool = None
params.sidechains = extractSidechainIndexes(params, pool=pool) if params.sidechains else []
for folder_it in folders:
pathFolder = os.path.join(folderWithTrajs, folder_it)
print("Extracting coords from folder %s" % folder_it)
ligand_trajs_folder = os.path.join(pathFolder, constants.ligandTrajectoryFolder)
if params.writeLigandTrajectory and not os.path.exists(ligand_trajs_folder):
os.makedirs(ligand_trajs_folder)
writeFilenamesExtractedCoordinates(pathFolder, params, constants, pool=pool)
if not params.non_Repeat:
print("Repeating snapshots from folder %s" % folder_it)
repeatExtractedSnapshotsInFolder(pathFolder, constants, params.numtotalSteps, pool=None)
print("Gathering trajs in %s" % constants.gatherTrajsFolder)
gatherTrajs(constants, pathFolder, params.setNumber, params.non_Repeat, epochNum=folder_it)
if __name__ == "__main__":
folder, atomIds, resname, proteinCA, enforceSequential, writeLigandTraj, totalSteps, setNum, nonRepeat, n_processors, top, side_chains, sideChain_folder, serial, contact_map, extra_atoms, cm_mode, dihedral_angles, dihedrals_proj = parseArguments()
main(folder, atomIds, resname, totalSteps, enforceSequential, writeLigandTraj, setNum, proteinCA, nonRepeat, n_processors, topology=top, sidechains=side_chains, sidechain_folder=sideChain_folder, parallelize=(not serial), cm=contact_map, use_extra_atoms=extra_atoms, CM_mode=cm_mode, calc_dihedrals=dihedral_angles, dihedrals_projection=dihedrals_proj)
|
AdaptivePELE/AdaptivePELE
|
AdaptivePELE/freeEnergies/extractCoords.py
|
Python
|
mit
| 30,818
|
[
"MDTraj"
] |
d759a09aad8618fa0c9f450af8b1fa3a56071d3ceb29d1d84df2bcb3bd437f3f
|
import scipy as sp
import scipy.linalg as LA
import scipy.spatial.distance as spdist
import warnings
def v2min_image_v(dr, cell, pbc=None, shifts_out=False):
"""
------
Authors: matscipy authors - https://github.com/libAtoms/matscipy
------
Apply minimum image convention to an array of distance vectors.
Parameters
----------
dr : array_like
Array of distance vectors.
cell : array_like, shape (n_dim,)
Cell extent in each direction.
pbc : array_like, optional, type bool
Periodic boundary conditions directions. Default is to
assume periodic boundaries in all directions.
Returns
-------
dr : array
Array of distance vectors, wrapped according to the minimum image
convention.
"""
# Check where distance larger than 1/2 cell. Particles have crossed
# periodic boundaries then and need to be unwrapped.
n_dim = len(cell)
rec = sp.diag(1. / sp.asarray(cell))
cell = sp.diag(sp.asarray(cell))
if pbc is not None:
rec *= sp.array(pbc, dtype=int).reshape(n_dim, 1)
dri = sp.round_(sp.dot(dr, rec))
shifts = sp.dot(dri, cell)
# Unwrap
if shifts_out:
return dr - shifts, shifts
else:
return dr - shifts
def round_vector(vec, precision = 0.05):
"""
Rounds an array with the required precision.
"""
return ((vec + 0.5 * precision) / precision).astype('int') * precision
# def unique_vectors(v):
# """
# Unique vectors of a list of vectors.
# """
# vstr = [str(x) for x in v]
# unique_vstr, unique_idx = sp.unique(vstr, return_index = True)
# unique_v = v[unique_idx]
# return unique_v
def where_a_in_b(a, b):
a, b = sp.atleast_2d(a), sp.atleast_2d(b)
indices = spdist.cdist(a, b)
indices = sp.where(indices < 1.0e-8)[1] # get indices of array b
return indices
class IgnoranceField:
def __init__(self, X_grid, y_threshold=1.0e-1, **kwargs):
"""
Parameters:
----------
X_grid: array_like, shape (n_samples, n_features)
Array of points that tessellate a patch of space
y_threshold: real
Value above which a "cost wall" is detected
kwargs:
----------
cell : array_like, shape (n_features,)
Cell extent in each direction.
pbc : array_like, type bool
Periodic boundary conditions directions. Default is to
assume periodic boundaries in all directions.
X_grid_spacing: real
Grid spacing between points in X
boundaries: array_like, shape(n_features,)
max - min in each direction of X
cutoff: real, > 0
distance within which points are interacting with current point.
"""
self.X_grid = X_grid # 2D array: [[x0min, x0max],...,[xNmin, xNmax]]
self.y_threshold = y_threshold
self.pbc = kwargs.get('pbc', None)
# spacing between grid points is the minimum nonzero distance between them.
spacing_inferred = sp.sort(spdist.cdist(X_grid, sp.atleast_2d(X_grid[0])))[1].item()
self.X_grid_spacing = kwargs.get('X_grid_spacing', spacing_inferred)
#
boundaries_inferred = sp.array([x.max() - x.min() for x in X_grid.T])
self.boundaries = kwargs.get('boundaries', boundaries_inferred)
self.cutoff = kwargs.get('cutoff', None)
self.y_grid = self.y_threshold * sp.ones(len(X_grid)) - 0.1
self.n_grid = sp.ones(len(X_grid))
self.wall = sp.zeros(len(X_grid), dtype=bool)
def set_cost_grid(self, y_grid, n_grid=None):
"""
On a given grid in the X plane, set the values in y and
the ignorance for each value.
Parameters:
----------
y_grid: array_like, shape (n_samples,)
Array of scalar-valued function on the grid points X_grid that
determines the "cost" of each point. If cost > threshold, the point forms a wall
n_grid: array_like, shape (n_samples,)
Array of scalar-valued function on the grid points X_grid that
sets the ignorance level for each point. Higher values attract more.
Other properties:
----------
wall: array_like, shape (n_samples,), type boolean
Denotes the presence or not of a wall for each point in X_grid.
"""
assert len(self.X_grid) == len(y_grid)
if n_grid is None or y_grid.shape != n_grid.shape:
n_grid = sp.ones(y_grid.shape)
self.y_grid = y_grid.flatten()
self.n_grid = n_grid
self.wall = (y_grid > self.y_threshold)
def update_cost(self, X, y):
"""
For an ignorance function 1/N, where N is number of times
the system passed on a point X, update ignorance and wall.
+1 the n_grid
"""
Xs = sp.atleast_2d(X)
ys = sp.atleast_1d(y)
for X, y in zip(Xs, ys):
X = X.flatten()
index = where_a_in_b(X, self.X_grid).item()
self.y_grid[index] = y
self.wall[index] = (y > self.y_threshold)
self.n_grid[index] = 1. / (self.n_grid[index] + 1)
def distance_vectors_not_walled(self, X0):
"""
Parameters:
----------
X0: array_like, shape (n_features,)
Point with respect to which the ignorance is calculated.
Returns:
----------
vectors_mic: array_like, shape (n_vecs, n_features)
Array of distance vectors connecting point X0 and
points on a grid self.X_grid which do not cross a
region for which y > threshold.
dists: array_like, shape (n_vecs,)
Euclidean length of vectors in dist_vs
n_grid: array_like, shape (n_vecs,)
Ignorance value for each vectors in vectors_mic.
"""
# distance vectors between current point X0 and all points on a grid,
# calculated in PBC minimum image convention.
vectors_mic, shifts = v2min_image_v(
self.X_grid - X0, self.boundaries, shifts_out=True)
# norms of distance vectors
dists = sp.array(map(sp.linalg.norm, vectors_mic))
if self.cutoff is not None:
mask = dists <= self.cutoff
vectors_mic = vectors_mic[mask]
dists = dists[mask]
wall = self.wall[mask]
n_grid = self.n_grid[mask]
else:
wall = self.wall
n_grid = self.n_grid
# vectors which contain a wall
X_wall = vectors_mic[wall]
# distances between each wall point and X0
X_wall_dists = sp.array(map(LA.norm, X_wall))
# indices of vectors that are in the direction of a wall
cosines = spdist.cdist(vectors_mic, X_wall, metric='cosine')
wall_direction = sp.where(cosines < 1.0e-3) # arbitrary tolerance
# element by element comparison: is point beyond the wall or not?
beyond_walls = (X_wall_dists[wall_direction[1]] <= dists[wall_direction[0]])
# indices of points that are in the direction of a wall, and their distances
# are larger that the distance of the corresponding wall point.
walled_indices = wall_direction[0][sp.where(beyond_walls)[0]]
all_indices = sp.arange(len(vectors_mic))
no_wall_indices = sp.array(list(set(all_indices) - set(walled_indices)))
vectors_mic = vectors_mic[no_wall_indices]
dists = dists[no_wall_indices]
n_grid = n_grid[no_wall_indices]
dists[dists < self.X_grid_spacing] = self.X_grid_spacing
return vectors_mic, dists, n_grid
def get_forces(self, X0, direction_only=True):
"""
Parameters:
----------
X0: array_like, shape (n_features,)
Point with respect to which the ignorance is calculated.
Returns:
----------
field: array_like, shape (n_features,)
Force vector pointing towards the direction of maximum ignorance.
"""
dvecs, dnorms, ignorances = self.distance_vectors_not_walled(X0)
# weights can be substituted with something less naive than
# number of times the simulation crossed a point, i.e. MSE
# Electrostatic-like field: \sum_i q_i / |r_i|^3 * \mathbf{r}_i
field = ((ignorances / dnorms**3)[:,None] * dvecs).sum(axis=0)
if direction_only:
field /= LA.norm(field)
return field
# here follows an earlier algorithm of distance_vectors_not_walled,
# which was way too expensive, but too nice to delete.
#
# # each distance vector is discretised at points that are spaced by the grid spacing
# discrete_ns = (dists / self.X_grid_spacing).astype('int') + 1
# # same mesh size and ponts of given X_grid
# discretised_vectors_mic = [round_vector(
# sp.array(
# vec * sp.linspace(0, 1, n)[:,None]
# ), precision = self.X_grid_spacing)
# for vec, n in zip(vectors_mic, discrete_ns)]
# no_wall_present = []
# for dd in discretised_vectors_mic:
# try:
# indices = where_a_in_b(dd, X_grid_mic)
# # is there a wall anywhere?
# wall_present = wall[indices].any()
# no_wall_present.append(not wall_present)
# except Exception as err:
# # It cannot be determined if the segment crosses a wall.
# # In this case, assume there is a wall.
# warnings.warn("%s. \t Missing information for wall check." % err)
# no_wall_present.append(False)
# # no_wall_present becomes the indices array of where there is no wall
# no_wall_present = sp.where(no_wall_present)[0]
# vectors_mic, dists = vectors_mic[no_wall_present], dists[no_wall_present]
# # no_wall_indices = sp.arange(len(self.X_grid))[no_wall_present]
# n_grid = n_grid[no_wall_present]
|
marcocaccin/LearningMetaDynamics
|
ignorance_field.py
|
Python
|
gpl-2.0
| 10,109
|
[
"Matscipy"
] |
865ab099576c3a3681553ac09651baa52c0f6d86d5f88e00e247ce300a76b63b
|
#!/opt/local/bin/python2.5
#=============================================================================================
# Render a replica trajectory in PyMOL
#=============================================================================================
#=============================================================================================
# REQUIREMENTS
#
# This code requires the NetCDF module, available either as Scientific.IO.NetCDF or standalone through pynetcdf:
# http://pypi.python.org/pypi/pynetcdf/
# http://sourceforge.net/project/showfiles.php?group_id=1315&package_id=185504
#=============================================================================================
#=============================================================================================
# TODO
#=============================================================================================
#=============================================================================================
# CHAGELOG
#=============================================================================================
#=============================================================================================
# VERSION CONTROL INFORMATION
# * 2009-08-01 JDC
# Created file.
#=============================================================================================
#=============================================================================================
# IMPORTS
#=============================================================================================
import numpy
from numpy import *
#import Scientific.IO.NetCDF
import netCDF4 as NetCDF
import os
import os.path
from pymol import cmd
from pymol import util
#=============================================================================================
# PARAMETERS
#=============================================================================================
#=============================================================================================
# SUBROUTINES
#=============================================================================================
def readAtomsFromPDB(pdbfilename):
"""Read atom records from the PDB and return them in a list.
present_sequence = getPresentSequence(pdbfilename, chain=' ')
contents of protein.seqfile
REQUIRED ARGUMENTS
pdbfilename - the filename of the PDB file to import from
OPTIONAL ARGUMENTS
chain - the one-character chain ID of the chain to import (default ' ')
RETURN VALUES
atoms - a list of atom{} dictionaries
The ATOM records are read, and the sequence for which there are atomic coordinates is stored.
"""
# Read the PDB file into memory.
pdbfile = open(pdbfilename, 'r')
lines = pdbfile.readlines()
pdbfile.close()
# Read atoms.
atoms = []
for line in lines:
if line[0:5] == "ATOM ":
# Parse line into fields.
atom = { }
atom["serial"] = int(line[5:11])
atom["name"] = line[12:16]
atom["altLoc"] = line[16:17]
atom["resName"] = line[17:21]
atom["chainID"] = line[21:22]
atom["resSeq"] = int(line[22:26])
atom["iCode"] = line[26:27]
atom["x"] = float(line[30:38])
atom["y"] = float(line[38:46])
atom["z"] = float(line[46:54])
atom["occupancy"] = 1.0
if (line[54:60].strip() != ''):
atom["occupancy"] = float(line[54:60])
atom["tempFactor"] = 0.0
if (line[60:66].strip() != ''):
atom["tempFactor"] = float(line[60:66])
atom["segID"] = line[72:76]
atom["element"] = line[76:78]
atom["charge"] = line[78:80]
# Mangle resSeq|iCode:
if atom['iCode'] != ' ':
atom['resSeq'] = str(atom['resSeq']) + atom['iCode']
atom['iCode'] = ' '
atoms.append(atom)
# Return list of atoms.
return atoms
def write_netcdf_replica_trajectories(directory, prefix, title, ncfile):
"""Write out replica trajectories in AMBER NetCDF format.
ARGUMENTS
directory (string) - the directory to write files to
prefix (string) - prefix for replica trajectory files
title (string) - the title to give each NetCDF file
ncfile (NetCDF) - NetCDF file object for input file
"""
# Get current dimensions.
niterations = ncfile.variables['positions'].shape[0]
nstates = ncfile.variables['positions'].shape[1]
natoms = ncfile.variables['positions'].shape[2]
# Write out each replica to a separate file.
for replica in range(nstates):
# Create a new replica file.
output_filename = os.path.join(directory, '%s-%03d.nc' % (prefix, replica))
#ncoutfile = NetCDF.NetCDFFile(output_filename, 'w')
ncoutfile = NetCDF.Dataset(output_filename, 'w')
initialize_netcdf(ncoutfile, title + " (replica %d)" % replica, natoms)
for iteration in range(niterations):
coordinates = array(ncfile.variables['positions'][iteration,replica,:,:])
coordinates *= 10.0 # convert nm to angstroms
write_netcdf_frame(ncoutfile, iteration, time = 1.0 * iteration, coordinates = coordinates)
ncoutfile.close()
return
def compute_torsion_trajectories(ncfile, filename):
"""Write out torsion trajectories for Val 111.
ARGUMENTS
ncfile (NetCDF) - NetCDF file object for input file
filename (string) - name of file to be written
"""
atoms = [1735, 1737, 1739, 1741] # N-CA-CB-CG1 of Val 111
# Get current dimensions.
niterations = ncfile.variables['positions'].shape[0]
nstates = ncfile.variables['positions'].shape[1]
natoms = ncfile.variables['positions'].shape[2]
# Compute torsion angle
def compute_torsion(positions, atoms):
# Compute vectors from cross products
vBA = positions[atoms[0],:] - positions[atoms[1],:]
vBC = positions[atoms[2],:] - positions[atoms[1],:]
vCB = positions[atoms[1],:] - positions[atoms[2],:]
vCD = positions[atoms[3],:] - positions[atoms[2],:]
v1 = cross(vBA,vBC)
v2 = cross(vCB,vCD)
cos_theta = dot(v1,v2) / sqrt(dot(v1,v1) * dot(v2,v2))
theta = arccos(cos_theta) * 180.0 / math.pi
return theta
# Compute torsion angles for each replica
contents = ""
for iteration in range(niterations):
for replica in range(nstates):
# Compute torsion
torsion = compute_torsion(array(ncfile.variables['positions'][iteration,replica,:,:]), atoms)
# Write torsion
contents += "%8.1f" % torsion
contents += "\n"
# Write contents.
write_file(filename, contents)
return
#=============================================================================================
# MAIN
#=============================================================================================
import __main__
__main__.pymol_argv = [ 'pymol', '-qc']
import pymol
pymol.finish_launching()
# DEBUG: ANALYSIS PATH IS HARD-CODED FOR NOW
source_directory = 'output'
reference_pdbfile = 'setup/complex.pdb'
phase = 'complex-explicit'
replica = 0 # replica index to render
#replica = 15 # replica index to render
# Load PDB file.
cmd.rewind()
cmd.delete('all')
cmd.reset()
cmd.load(reference_pdbfile, 'complex')
cmd.remove('resn WAT') # remove waters
cmd.select('receptor', '(not resn MOL) and (not resn WAT) and (not hydrogen)')
cmd.select('ligand', 'resn MOL and not hydrogen')
cmd.deselect()
cmd.hide('all')
cmd.show('cartoon', 'receptor')
cmd.show('sticks', 'ligand')
util.cbay('ligand')
cmd.color('green', 'receptor')
# speed up builds
cmd.set('defer_builds_mode', 3)
cmd.set('cache_frames', 0)
model = cmd.get_model('complex')
#for atom in model.atom:
# print "%8d %4s %3s %5d %8.3f %8.3f %8.3f" % (atom.index, atom.name, atom.resn, int(atom.resi), atom.coord[0], atom.coord[1], atom.coord[2])
# Read atoms from PDB
pdbatoms = readAtomsFromPDB(reference_pdbfile)
# Build mappings.
pdb_indices = dict()
for (index, atom) in enumerate(pdbatoms):
if atom['chainID'] == ' ': atom['chainID'] = ''
#if atom['resName'] == 'WAT': continue
key = (atom['chainID'], int(atom['resSeq']), atom['name'].strip())
value = index
pdb_indices[key] = value
print "pdb_indices has %d entries" % len(pdb_indices.keys())
model_indices = dict()
for (index, atom) in enumerate(model.atom):
#if atom.resn == 'WAT': continue
key = (atom.chain, int(atom.resi), atom.name)
value = index
model_indices[key] = value
print "model_indices has %d entries" % len(model_indices.keys())
#model_mapping = list()
#for (pdb_index, atom) in enumerate(pdbatoms):
# #if atom['resName'] == 'WAT': continue
# key = (atom['chainID'], int(atom['resSeq']), atom['name'].strip())
# model_mapping.append(model_indices[key])
# Omit waters.
pdb_mapping = list()
for (index, atom) in enumerate(model.atom):
#if atom.resn == 'WAT': continue
key = (atom.chain, int(atom.resi), atom.name)
pdb_mapping.append(pdb_indices[key])
print pdb_mapping
# Construct full path to NetCDF file.
fullpath = os.path.join(source_directory, phase + '.nc')
# Open NetCDF file for reading.
print "Opening NetCDF trajectory file '%(fullpath)s' for reading..." % vars()
#ncfile = Scientific.IO.NetCDF.NetCDFFile(fullpath, 'r')
ncfile = NetCDF.Dataset(fullpath, 'r')
# DEBUG
print "dimensions:"
print ncfile.dimensions
# Read dimensions.
[niterations,nstates,natoms,ndim] = ncfile.variables['positions'].shape
print "Read %(niterations)d iterations, %(nstates)d states" % vars()
#niterations = 10 # DEBUG
# Load frames
cmd.set('all_states', 0)
print "Loading frames..."
for iteration in range(niterations):
# Set coordinates
print "iteration %8d / %8d" % (iteration, niterations)
positions = (10.0 * ncfile.variables['positions'][iteration, replica, :, :]).squeeze()
positions = positions[pdb_mapping,:]
xyz = positions.tolist()
xyz_iter = iter(xyz)
#cmd.load_model(model, 'complex', state=iteration+1)
#cmd.frame(iteration+1)
#model = cmd.get_model('complex', state=1)
#cmd.load_model(model, 'complex', state=iteration+1)
cmd.create('complex', 'complex', 1, iteration+1)
cmd.alter_state(iteration+1, 'complex', '(x,y,z) = xyz_iter.next()', space=locals())
#for pdb_index in range(natoms):
#if (pdb_index % 100)==0: print pdb_index
#model_index = model_mapping[pdb_index]
#model.atom[model_index].coord = (10 * ncfile.variables['positions'][iteration, replica, pdb_index, :]).squeeze().tolist()
#for k in range(3):
# model.atom[model_index].coord[k] = float(ncfile.variables['positions'][iteration, replica, pdb_index, k]) * 10.0 # convert to angstroms
#cmd.load_model(model, 'complex', state=iteration+1)
#cmd.load_model(model, 'complex')
print "done"
# Align all states
cmd.intra_fit('all')
cmd.hide('all')
#cmd.rewind()
cmd.select('receptor', '(not resn MOL) and (not resn WAT) and (not hydrogen)')
cmd.select('ligand', 'resn MOL and not hydrogen')
cmd.deselect()
cmd.hide('all')
cmd.show('cartoon', 'receptor')
cmd.show('sticks', 'ligand')
util.cbay('ligand')
cmd.color('green', 'receptor')
cmd.show('surface', 'receptor')
cmd.set('transparency', 0.65)
cmd.set('surface_mode', 3)
cmd.set('surface_color', 'white')
# Create one-to-one mapping between states and frames.
cmd.mset("1 -%d" % cmd.count_states())
# Zoom viewport
cmd.zoom('complex')
#cmd.orient('complex')
#cmd.zoom('ligand')
#cmd.orient('ligand')
#cmd.turn('x', -90)
# Render movie
frame_prefix = 'frames/frame'
cmd.set('ray_trace_frames', 1)
cmd.set('ray_trace_frames', 0) # DEBUG
for iteration in range(niterations):
print "rendering frame %04d / %04d" % (iteration+1, niterations)
cmd.frame(iteration+1)
cmd.set('stick_transparency', float(ncfile.variables['states'][iteration, replica]) / float(nstates-1))
cmd.png(frame_prefix + '%04d.png' % (iteration), ray=True)
#cmd.mpng(frame_prefix, iteration+1, iteration+1)
#cmd.load_model(model, 'complex')
cmd.set('ray_trace_frames', 0)
# Close file
ncfile.close()
|
jchodera/yank
|
examples/human-serum-albumin-explicit/render_trajectory.py
|
Python
|
lgpl-3.0
| 12,339
|
[
"Amber",
"NetCDF",
"PyMOL"
] |
3562b84586d4f432aeef43735f87901202d787df85542ca384039fea80658575
|
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid import logger
# noinspection PyUnresolvedReferences
from mantid.simpleapi import mtd, Abins, Scale, CompareWorkspaces, Load, DeleteWorkspace
from AbinsModules import AbinsConstants, AbinsTestHelpers
import numpy as np
def old_modules():
"""" Check if there are proper versions of Python and numpy."""
is_python_old = AbinsTestHelpers.old_python()
if is_python_old:
logger.warning("Skipping AbinsBasicTest because Python is too old.")
is_numpy_old = AbinsTestHelpers.is_numpy_valid(np.__version__)
if is_numpy_old:
logger.warning("Skipping AbinsBasicTest because numpy is too old.")
return is_python_old or is_numpy_old
def skip_if(skipping_criteria):
"""
Skip all tests if the supplied function returns true.
Python unittest.skipIf is not available in 2.6 (RHEL6) so we'll roll our own.
"""
def decorate(cls):
if skipping_criteria():
for attr in cls.__dict__.keys():
if callable(getattr(cls, attr)) and 'test' in attr:
delattr(cls, attr)
return cls
return decorate
@skip_if(old_modules)
class AbinsBasicTest(unittest.TestCase):
_si2 = "Si2-sc_Abins"
_squaricn = "squaricn_sum_Abins"
_dft_program = "CASTEP"
_temperature = 10.0 # temperature 10 K
_scale = 1.0
_sample_form = "Powder"
_instrument_name = "TOSCA"
_atoms = "" # if no atoms are specified then all atoms are taken into account
_sum_contributions = True
# this is a string; once it is read it is converted internally to integer
_quantum_order_events_number = str(AbinsConstants.FUNDAMENTALS)
_cross_section_factor = "Incoherent"
_workspace_name = "output_workspace"
_tolerance = 0.0001
def tearDown(self):
AbinsTestHelpers.remove_output_files(list_of_names=["Abins", "explicit", "default", "total",
"squaricn_scale", "benzene_exp", "experimental"])
mtd.clear()
def test_wrong_input(self):
"""Test if the correct behaviour of algorithm in case input is not valid"""
# invalid CASTEP file missing: Number of branches 6 in the header file
self.assertRaises(RuntimeError, Abins, PhononFile="Si2-sc_wrong.phonon", OutputWorkspace=self._workspace_name)
# wrong extension of phonon file in case of CASTEP
self.assertRaises(RuntimeError, Abins, PhononFile="Si2-sc.wrong_phonon", OutputWorkspace=self._workspace_name)
# wrong extension of phonon file in case of CRYSTAL
self.assertRaises(RuntimeError, Abins, DFTprogram="CRYSTAL", PhononFile="MgO.wrong_out",
OutputWorkspace=self._workspace_name)
# in case of molecular calculations AllKpointsGiven cannot be False
self.assertRaises(RuntimeError, Abins, DFTprogram="CRYSTAL", PhononFile="toluene_molecule_BasicAbins.out",
AllKpointsGiven=False, OutputWorkspace=self._workspace_name)
# no name for workspace
self.assertRaises(RuntimeError, Abins, PhononFile=self._si2 + ".phonon", Temperature=self._temperature)
# keyword total in the name of the workspace
self.assertRaises(RuntimeError, Abins, PhononFile=self._si2 + ".phonon", Temperature=self._temperature,
OutputWorkspace=self._workspace_name + "total")
# negative temperature in K
self.assertRaises(RuntimeError, Abins, PhononFile=self._si2 + ".phonon", Temperature=-1.0,
OutputWorkspace=self._workspace_name)
# negative scale
self.assertRaises(RuntimeError, Abins, PhononFile=self._si2 + ".phonon", Scale=-0.2,
OutputWorkspace=self._workspace_name)
# test if intermediate results are consistent
def test_non_unique_atoms(self):
"""Test scenario in which a user specifies non unique atoms (for example in squaricn that would be "C,C,H").
In that case Abins should terminate and print a meaningful message.
"""
self.assertRaises(RuntimeError, Abins, PhononFile=self._squaricn + ".phonon", Atoms="C,C,H",
OutputWorkspace=self._workspace_name)
def test_non_existing_atoms(self):
"""Test scenario in which a user requests to create workspaces for atoms which do not exist in the system.
In that case Abins should terminate and give a user a meaningful message about wrong atoms to analyse.
"""
# In _squaricn there is no C atoms
self.assertRaises(RuntimeError, Abins, PhononFile=self._squaricn + ".phonon", Atoms="N",
OutputWorkspace=self._workspace_name)
def test_scale(self):
"""
Test if scaling is correct.
@return:
"""
wrk_ref = Abins(DFTprogram=self._dft_program,
PhononFile=self._squaricn + ".phonon",
Temperature=self._temperature,
SampleForm=self._sample_form,
Instrument=self._instrument_name,
Atoms=self._atoms,
Scale=self._scale,
SumContributions=self._sum_contributions,
QuantumOrderEventsNumber=self._quantum_order_events_number,
ScaleByCrossSection=self._cross_section_factor,
OutputWorkspace=self._squaricn + "_ref")
wrk = Abins(DFTprogram=self._dft_program,
PhononFile=self._squaricn + ".phonon",
Temperature=self._temperature,
SampleForm=self._sample_form,
Instrument=self._instrument_name,
Atoms=self._atoms,
SumContributions=self._sum_contributions,
QuantumOrderEventsNumber=self._quantum_order_events_number,
Scale=10,
ScaleByCrossSection=self._cross_section_factor,
OutputWorkspace="squaricn_scale")
ref = Scale(wrk_ref, Factor=10)
(result, messages) = CompareWorkspaces(wrk, ref, Tolerance=self._tolerance)
self.assertEqual(result, True)
def test_exp(self):
"""
Tests if experimental data is loaded correctly.
@return:
"""
Abins(DFTprogram=self._dft_program,
PhononFile="benzene_Abins.phonon",
ExperimentalFile="benzene_Abins.dat",
Temperature=self._temperature,
SampleForm=self._sample_form,
Instrument=self._instrument_name,
Atoms=self._atoms,
Scale=self._scale,
SumContributions=self._sum_contributions,
QuantumOrderEventsNumber=self._quantum_order_events_number,
ScaleByCrossSection=self._cross_section_factor,
OutputWorkspace="benzene_exp")
# load experimental data
Load(Filename="benzene.dat", OutputWorkspace="benzene_only_exp")
(result, messages) = CompareWorkspaces(Workspace1=mtd["experimental_wrk"],
Workspace2=mtd["benzene_only_exp"],
CheckAxes=False,
Tolerance=self._tolerance)
self.assertEqual(result, True)
def test_partial(self):
# By default workspaces for all atoms should be created. Test this default behaviour.
experimental_file = ""
wrk_ref = Abins(DFTprogram=self._dft_program,
PhononFile=self._squaricn + ".phonon",
ExperimentalFile=experimental_file,
Temperature=self._temperature,
SampleForm=self._sample_form,
Instrument=self._instrument_name,
Atoms=self._atoms,
Scale=self._scale,
SumContributions=self._sum_contributions,
QuantumOrderEventsNumber=self._quantum_order_events_number,
ScaleByCrossSection=self._cross_section_factor,
OutputWorkspace=self._squaricn + "_ref")
wks_all_atoms_explicitly = Abins(PhononFile=self._squaricn + ".phonon",
Atoms="H, C, O",
SumContributions=self._sum_contributions,
QuantumOrderEventsNumber=self._quantum_order_events_number,
OutputWorkspace="explicit")
wks_all_atoms_default = Abins(PhononFile=self._squaricn + ".phonon",
SumContributions=self._sum_contributions,
QuantumOrderEventsNumber=self._quantum_order_events_number,
OutputWorkspace="default")
# Python 3 has no guarantee of dict order so the workspaces in the group may be in
# a different order on Python 3
self.assertEqual(wks_all_atoms_explicitly.size(), wks_all_atoms_default.size())
explicit_names = wks_all_atoms_explicitly.getNames()
for i in range(len(explicit_names)):
explicit_name = explicit_names[i]
default_name = "default" + explicit_name[8:]
(result, messages) = CompareWorkspaces(explicit_name, default_name,
Tolerance=self._tolerance)
self.assertEqual(result, True)
self.assertEqual(wrk_ref.size(), wks_all_atoms_default.size())
ref_names = wrk_ref.getNames()
for i in range(len(ref_names)):
ref_name = ref_names[i]
default_name = "default" + ref_name[len(self._squaricn + "_ref"):]
(result, messages) = CompareWorkspaces(ref_name, default_name,
Tolerance=self._tolerance)
self.assertEqual(result, True)
if __name__ == "__main__":
unittest.main()
|
wdzhou/mantid
|
Framework/PythonInterface/test/python/plugins/algorithms/AbinsBasicTest.py
|
Python
|
gpl-3.0
| 10,223
|
[
"CASTEP",
"CRYSTAL"
] |
83b6f597df0d2d12a893add26f013610691ec949b1b295932c8373f14819ffab
|
# $Id$
#
# Copyright (C) 2009 Greg Landrum
# All Rights Reserved
#
import pickle
from rdkit import DataStructs, Chem
from rdkit import Chem
similarityMethods = {
'RDK': DataStructs.ExplicitBitVect,
'AtomPairs': DataStructs.IntSparseIntVect,
'TopologicalTorsions': DataStructs.LongSparseIntVect,
'Pharm2D': DataStructs.SparseBitVect,
'Gobbi2D': DataStructs.SparseBitVect,
'Morgan': DataStructs.UIntSparseIntVect,
'Avalon': DataStructs.ExplicitBitVect,
}
supportedSimilarityMethods = list(iter(similarityMethods))
class LayeredOptions:
loadLayerFlags = 0xFFFFFFFF
searchLayerFlags = 0x7
minPath = 1
maxPath = 6
fpSize = 1024
wordSize = 32
nWords = fpSize // wordSize
@staticmethod
def GetFingerprint(mol, query=True):
if query:
flags = LayeredOptions.searchLayerFlags
else:
flags = LayeredOptions.loadLayerFlags
return Chem.LayeredFingerprint(mol, layerFlags=flags, minPath=LayeredOptions.minPath,
maxPath=LayeredOptions.maxPath, fpSize=LayeredOptions.fpSize)
@staticmethod
def GetWords(mol, query=True):
txt = LayeredOptions.GetFingerprint(mol, query=query).ToBitString()
words = [int(txt[x:x + 32], 2) for x in range(0, len(txt), 32)]
return words
@staticmethod
def GetQueryText(mol, query=True):
words = LayeredOptions.GetWords(mol, query=query)
colqs = []
for idx, word in enumerate(words):
if not word:
continue
idx = idx + 1
colqs.append('%(word)d&Col_%(idx)d=%(word)d' % locals())
return ' and '.join(colqs)
def BuildSigFactory(options=None, fdefFile=None,
bins=[(2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 100)],
skipFeats=('LumpedHydrophobe', 'ZnBinder')):
if options:
fdefFile = options.fdefFile
if not fdefFile:
raise ValueError('bad fdef file')
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem.Pharm2D import SigFactory
featFactory = ChemicalFeatures.BuildFeatureFactory(fdefFile)
sigFactory = SigFactory.SigFactory(featFactory, skipFeats=skipFeats, trianglePruneBins=False)
sigFactory.SetBins(bins)
return sigFactory
def BuildAtomPairFP(mol):
from rdkit.Chem.AtomPairs import Pairs
fp = Pairs.GetAtomPairFingerprintAsIntVect(mol)
fp._sumCache = fp.GetTotalVal()
return fp
def BuildTorsionsFP(mol):
from rdkit.Chem.AtomPairs import Torsions
fp = Torsions.GetTopologicalTorsionFingerprintAsIntVect(mol)
fp._sumCache = fp.GetTotalVal()
return fp
def BuildRDKitFP(mol):
fp = Chem.RDKFingerprint(mol, nBitsPerHash=1)
return fp
def BuildPharm2DFP(mol):
global sigFactory
from rdkit.Chem.Pharm2D import Generate
try:
fp = Generate.Gen2DFingerprint(mol, sigFactory)
except IndexError:
print('FAIL:', Chem.MolToSmiles(mol, True))
raise
return fp
def BuildMorganFP(mol):
from rdkit.Chem import rdMolDescriptors
fp = rdMolDescriptors.GetMorganFingerprint(mol, 2)
fp._sumCache = fp.GetTotalVal()
return fp
def BuildAvalonFP(mol, smiles=None):
from rdkit.Avalon import pyAvalonTools
if smiles is None:
fp = pyAvalonTools.GetAvalonFP(mol)
else:
fp = pyAvalonTools.GetAvalonFP(smiles, True)
return fp
def DepickleFP(pkl, similarityMethod):
if not isinstance(pkl, (bytes, str)):
pkl = str(pkl)
try:
klass = similarityMethods[similarityMethod]
fp = klass(pkl)
except Exception:
import traceback
traceback.print_exc()
fp = pickle.loads(pkl)
return fp
|
greglandrum/rdkit
|
rdkit/Chem/MolDb/FingerprintUtils.py
|
Python
|
bsd-3-clause
| 3,728
|
[
"RDKit"
] |
95f40d8d6423021a06330feef038fe6baac8fc5f24eabd2e38e58e09088bef2a
|
"""
This module contains all the Data Access Objects for models which are persisted to Elasticsearch
at some point in their lifecycle.
Each DAO is an extension of the octopus ESDAO utility class which provides all of the ES-level heavy lifting,
so these DAOs mostly just provide information on where to persist the data, and some additional storage-layer
query methods as required
"""
from octopus.modules.es import dao
class ContentLogDAO(dao.ESDAO):
__type__ = 'contentlog'
class UnroutedNotificationDAO(dao.ESDAO):
"""
DAO for UnroutedNotifications
"""
__type__ = 'unrouted'
""" The index type to use to store these objects """
@classmethod
def example(cls):
"""
request a document which acts as an example for this type
"""
from service.tests import fixtures
return cls(fixtures.NotificationFactory.unrouted_notification())
class RoutedNotificationDAO(dao.TimeBoxedTypeESDAO):
"""
DAO for RoutedNotification
This is an extension of the TimeBoxedTypeESDAO object, which means that a new type is created very
period (e.g. monthly) for new content. This enables rapid dropping of old index types without affecting
Elasticsearch performance, and works here because RoutedNotifications only persiste for a limited time
"""
__type__ = 'routed'
""" The base index type to use to store these objects - this will be appended by the time-boxing features of the DAO with the creation timestamp """
@classmethod
def example(cls):
"""
request a document which acts as an example for this type
"""
from service.tests import fixtures
return cls(fixtures.NotificationFactory.routed_notification())
class FailedNotificationDAO(dao.ESDAO):
"""
DAO for FailedNotifications
"""
__type__ = "failed"
""" The index type to use to store these objects """
class RepositoryConfigDAO(dao.ESDAO):
"""
DAO for RepositoryConfig
"""
__type__ = 'repo_config'
""" The index type to use to store these objects """
class MatchProvenanceDAO(dao.ESDAO):
"""
DAO for MatchProvenance
"""
__type__ = "match_prov"
""" The index type to use to store these objects """
@classmethod
def pull_by_notification(cls, notification_id, size=10):
"""
List all of the match provenance information for the requested notification
:param notification_id: the id of the notification for which to retrieve match provenance
:param size: the maximum number to return (defaults to 10)
"""
q = MatchProvNotificationQuery(notification_id, size=size)
return cls.object_query(q=q.query())
class MatchProvNotificationQuery(object):
"""
Query wrapper which generates an ES query for retrieving match provenance objects
based on the notification to which they are attached
"""
def __init__(self, notification_id, size=10):
"""
Set the parameters of the query
:param notification_id: the id of the notification for which to retrieve match provenance
:param size: the maximum number to return (defaults to 10)
"""
self.notification_id = notification_id
self.size = size
def query(self):
"""
generate the query as a python dictionary object
:return: a python dictionary containing the ES query, ready for JSON serialisation
"""
return {
"query" : {
"term" : {"notification.exact" : self.notification_id}
},
"size" : self.size
}
class RetrievalRecordDAO(dao.ESDAO):
"""
DAO for RetrievalRecord
"""
__type__ = "retrieval"
""" The index type to use to store these objects """
class AccountDAO(dao.ESDAO):
"""
DAO for Account
"""
__type__ = "account"
""" The index type to use to store these objects """
|
JiscPER/jper
|
service/dao.py
|
Python
|
apache-2.0
| 3,957
|
[
"Octopus"
] |
01842fdec6354bd617a419f103d4fb33220ba04b78a9a2239b891aefee98b31b
|
from datetime import datetime, timezone
from unittest import TestCase
from opaque_keys.edx.keys import CourseKey
import attr
from ...data import (
CourseOutlineData, CourseSectionData, CourseLearningSequenceData, VisibilityData, CourseVisibility
)
class TestCourseOutlineData(TestCase):
"""
Simple set of tests for data class validations.
"""
@classmethod
def setUpClass(cls):
"""
All our data classes are immutable, so we can set up a baseline course
outline and then make slightly modified versions for each particular
test as needed.
"""
super().setUpClass()
normal_visibility = VisibilityData(
hide_from_toc=False,
visible_to_staff_only=False
)
cls.course_key = CourseKey.from_string("course-v1:OpenEdX+Learning+TestRun")
cls.course_outline = CourseOutlineData(
course_key=cls.course_key,
title="Exciting Test Course!",
published_at=datetime(2020, 5, 19, tzinfo=timezone.utc),
published_version="5ebece4b69dd593d82fe2014",
sections=generate_sections(cls.course_key, [3, 2]),
course_visibility=CourseVisibility.PRIVATE
)
def test_deprecated_course_key(self):
"""Old-Mongo style, "Org/Course/Run" keys are not supported."""
old_course_key = CourseKey.from_string("OpenEdX/TestCourse/TestRun")
with self.assertRaises(ValueError):
attr.evolve(self.course_outline, course_key=old_course_key)
def test_sequence_building(self):
"""Make sure sequences were set correctly from sections data."""
for section in self.course_outline.sections:
for seq in section.sequences:
self.assertEqual(seq, self.course_outline.sequences[seq.usage_key])
self.assertEqual(
sum(len(section.sequences) for section in self.course_outline.sections),
len(self.course_outline.sequences),
)
def test_duplicate_sequence(self):
"""We don't support DAGs. Sequences can only be in one Section."""
# This section has Chapter 2's sequences in it
section_with_dupe_seq = attr.evolve(
self.course_outline.sections[1], title="Chapter 2 dupe",
)
with self.assertRaises(ValueError):
attr.evolve(
self.course_outline,
sections=self.course_outline.sections + [section_with_dupe_seq]
)
def test_size(self):
"""Limit how large a CourseOutline is allowed to be."""
with self.assertRaises(ValueError):
attr.evolve(
self.course_outline,
sections=generate_sections(self.course_key, [1001])
)
def test_remove_sequence(self):
"""Remove a single sequence from the CourseOutlineData (creates a copy)."""
seq_to_remove = self.course_outline.sections[0].sequences[0]
new_outline = self.course_outline.remove({seq_to_remove.usage_key})
assert self.course_outline != new_outline
assert seq_to_remove.usage_key in self.course_outline.sequences
assert seq_to_remove.usage_key not in new_outline.sequences
assert len(new_outline.sections[0].sequences) == len(self.course_outline.sections[0].sequences) - 1
for seq in new_outline.sections[0].sequences:
assert seq != seq_to_remove
def test_remove_section(self):
"""
Remove a whole Section from the CourseOutlineData (creates a copy).
Removing a Section also removes all Sequences in that Section.
"""
section_to_remove = self.course_outline.sections[0]
new_outline = self.course_outline.remove({section_to_remove.usage_key})
assert self.course_outline != new_outline
assert len(new_outline.sections) == len(self.course_outline.sections) - 1
assert section_to_remove != new_outline.sections[0]
for seq in section_to_remove.sequences:
assert seq.usage_key not in new_outline.sequences
def test_remove_nonexistant(self):
"""Removing something that's not already there is a no-op."""
seq_key_to_remove = self.course_key.make_usage_key('sequential', 'not_here')
new_outline = self.course_outline.remove({seq_key_to_remove})
assert new_outline == self.course_outline
def generate_sections(course_key, num_sequences):
"""
Generate a list of CourseSectionData.
`num_sequences` is a list that contains the length of each CourseSectionData
in order. So if you pass in [1, 3, 5], we would pass back a list of three
CourseSectionData, where the first one has 1 CourseLearningSequenceData as
it sequences, the second had 3 sequences, and the third had 5 sequences.
All sections and sequences have normal visibility.
"""
normal_visibility = VisibilityData(
hide_from_toc=False,
visible_to_staff_only=False
)
sections = []
for sec_num, seq_count in enumerate(num_sequences, 1):
sections.append(
CourseSectionData(
usage_key=course_key.make_usage_key('chapter', 'ch_{}'.format(sec_num)),
title="Chapter {}: 🔥".format(sec_num),
visibility=normal_visibility,
sequences=[
CourseLearningSequenceData(
usage_key=course_key.make_usage_key(
'sequential', 'seq_{}_{}'.format(sec_num, seq_num)
),
title="Seq {}.{}: 🔥".format(sec_num, seq_num),
visibility=normal_visibility,
)
for seq_num in range(seq_count)
]
)
)
return sections
|
msegado/edx-platform
|
openedx/core/djangoapps/content/learning_sequences/api/tests/test_data.py
|
Python
|
agpl-3.0
| 5,825
|
[
"exciting"
] |
82112223bd5afc8a9d1d109350098a801fc382bd0faecebd26f8c5464d65dc81
|
# -*- test-case-name: twisted.trial.test.test_tests -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Things likely to be used by writers of unit tests.
Maintainer: Jonathan Lange
"""
import doctest, inspect
import os, warnings, sys, tempfile, gc, types
from pprint import pformat
from twisted.internet import defer, utils
from twisted.python import components, failure, log, monkey
from twisted.python.reflect import qual
from twisted.python.compat import set
from twisted.python import deprecate
from twisted.python.deprecate import getDeprecationWarningString
from twisted.trial import itrial, reporter, util
pyunit = __import__('unittest')
from zope.interface import implements
class SkipTest(Exception):
"""
Raise this (with a reason) to skip the current test. You may also set
method.skip to a reason string to skip it, or set class.skip to skip the
entire TestCase.
"""
class FailTest(AssertionError):
"""Raised to indicate the current test has failed to pass."""
class Todo(object):
"""
Internal object used to mark a L{TestCase} as 'todo'. Tests marked 'todo'
are reported differently in Trial L{TestResult}s. If todo'd tests fail,
they do not fail the suite and the errors are reported in a separate
category. If todo'd tests succeed, Trial L{TestResult}s will report an
unexpected success.
"""
def __init__(self, reason, errors=None):
"""
@param reason: A string explaining why the test is marked 'todo'
@param errors: An iterable of exception types that the test is
expected to raise. If one of these errors is raised by the test, it
will be trapped. Raising any other kind of error will fail the test.
If C{None} is passed, then all errors will be trapped.
"""
self.reason = reason
self.errors = errors
def __repr__(self):
return "<Todo reason=%r errors=%r>" % (self.reason, self.errors)
def expected(self, failure):
"""
@param failure: A L{twisted.python.failure.Failure}.
@return: C{True} if C{failure} is expected, C{False} otherwise.
"""
if self.errors is None:
return True
for error in self.errors:
if failure.check(error):
return True
return False
def makeTodo(value):
"""
Return a L{Todo} object built from C{value}.
If C{value} is a string, return a Todo that expects any exception with
C{value} as a reason. If C{value} is a tuple, the second element is used
as the reason and the first element as the excepted error(s).
@param value: A string or a tuple of C{(errors, reason)}, where C{errors}
is either a single exception class or an iterable of exception classes.
@return: A L{Todo} object.
"""
if isinstance(value, str):
return Todo(reason=value)
if isinstance(value, tuple):
errors, reason = value
try:
errors = list(errors)
except TypeError:
errors = [errors]
return Todo(reason=reason, errors=errors)
class _Warning(object):
"""
A L{_Warning} instance represents one warning emitted through the Python
warning system (L{warnings}). This is used to insulate callers of
L{_collectWarnings} from changes to the Python warnings system which might
otherwise require changes to the warning objects that function passes to
the observer object it accepts.
@ivar message: The string which was passed as the message parameter to
L{warnings.warn}.
@ivar category: The L{Warning} subclass which was passed as the category
parameter to L{warnings.warn}.
@ivar filename: The name of the file containing the definition of the code
object which was C{stacklevel} frames above the call to
L{warnings.warn}, where C{stacklevel} is the value of the C{stacklevel}
parameter passed to L{warnings.warn}.
@ivar lineno: The source line associated with the active instruction of the
code object object which was C{stacklevel} frames above the call to
L{warnings.warn}, where C{stacklevel} is the value of the C{stacklevel}
parameter passed to L{warnings.warn}.
"""
def __init__(self, message, category, filename, lineno):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
def _collectWarnings(observeWarning, f, *args, **kwargs):
"""
Call C{f} with C{args} positional arguments and C{kwargs} keyword arguments
and collect all warnings which are emitted as a result in a list.
@param observeWarning: A callable which will be invoked with a L{_Warning}
instance each time a warning is emitted.
@return: The return value of C{f(*args, **kwargs)}.
"""
def showWarning(message, category, filename, lineno, file=None, line=None):
assert isinstance(message, Warning)
observeWarning(_Warning(
message.args[0], category, filename, lineno))
# Disable the per-module cache for every module otherwise if the warning
# which the caller is expecting us to collect was already emitted it won't
# be re-emitted by the call to f which happens below.
for v in sys.modules.itervalues():
if v is not None:
try:
v.__warningregistry__ = None
except:
# Don't specify a particular exception type to handle in case
# some wacky object raises some wacky exception in response to
# the setattr attempt.
pass
origFilters = warnings.filters[:]
origShow = warnings.showwarning
warnings.simplefilter('always')
try:
warnings.showwarning = showWarning
result = f(*args, **kwargs)
finally:
warnings.filters[:] = origFilters
warnings.showwarning = origShow
return result
class _Assertions(pyunit.TestCase, object):
"""
Replaces many of the built-in TestCase assertions. In general, these
assertions provide better error messages and are easier to use in
callbacks. Also provides new assertions such as L{failUnlessFailure}.
Although the tests are defined as 'failIf*' and 'failUnless*', they can
also be called as 'assertNot*' and 'assert*'.
"""
def fail(self, msg=None):
"""
Absolutely fail the test. Do not pass go, do not collect $200.
@param msg: the message that will be displayed as the reason for the
failure
"""
raise self.failureException(msg)
def failIf(self, condition, msg=None):
"""
Fail the test if C{condition} evaluates to True.
@param condition: any object that defines __nonzero__
"""
if condition:
raise self.failureException(msg)
return condition
assertNot = assertFalse = failUnlessFalse = failIf
def failUnless(self, condition, msg=None):
"""
Fail the test if C{condition} evaluates to False.
@param condition: any object that defines __nonzero__
"""
if not condition:
raise self.failureException(msg)
return condition
assert_ = assertTrue = failUnlessTrue = failUnless
def failUnlessRaises(self, exception, f, *args, **kwargs):
"""
Fail the test unless calling the function C{f} with the given
C{args} and C{kwargs} raises C{exception}. The failure will report
the traceback and call stack of the unexpected exception.
@param exception: exception type that is to be expected
@param f: the function to call
@return: The raised exception instance, if it is of the given type.
@raise self.failureException: Raised if the function call does
not raise an exception or if it raises an exception of a
different type.
"""
try:
result = f(*args, **kwargs)
except exception, inst:
return inst
except:
raise self.failureException('%s raised instead of %s:\n %s'
% (sys.exc_info()[0],
exception.__name__,
failure.Failure().getTraceback()))
else:
raise self.failureException('%s not raised (%r returned)'
% (exception.__name__, result))
assertRaises = failUnlessRaises
def failUnlessEqual(self, first, second, msg=''):
"""
Fail the test if C{first} and C{second} are not equal.
@param msg: A string describing the failure that's included in the
exception.
"""
if not first == second:
if msg is None:
msg = ''
if len(msg) > 0:
msg += '\n'
raise self.failureException(
'%snot equal:\na = %s\nb = %s\n'
% (msg, pformat(first), pformat(second)))
return first
assertEqual = assertEquals = failUnlessEquals = failUnlessEqual
def failUnlessIdentical(self, first, second, msg=None):
"""
Fail the test if C{first} is not C{second}. This is an
obect-identity-equality test, not an object equality
(i.e. C{__eq__}) test.
@param msg: if msg is None, then the failure message will be
'%r is not %r' % (first, second)
"""
if first is not second:
raise self.failureException(msg or '%r is not %r' % (first, second))
return first
assertIdentical = failUnlessIdentical
def failIfIdentical(self, first, second, msg=None):
"""
Fail the test if C{first} is C{second}. This is an
obect-identity-equality test, not an object equality
(i.e. C{__eq__}) test.
@param msg: if msg is None, then the failure message will be
'%r is %r' % (first, second)
"""
if first is second:
raise self.failureException(msg or '%r is %r' % (first, second))
return first
assertNotIdentical = failIfIdentical
def failIfEqual(self, first, second, msg=None):
"""
Fail the test if C{first} == C{second}.
@param msg: if msg is None, then the failure message will be
'%r == %r' % (first, second)
"""
if not first != second:
raise self.failureException(msg or '%r == %r' % (first, second))
return first
assertNotEqual = assertNotEquals = failIfEquals = failIfEqual
def failUnlessIn(self, containee, container, msg=None):
"""
Fail the test if C{containee} is not found in C{container}.
@param containee: the value that should be in C{container}
@param container: a sequence type, or in the case of a mapping type,
will follow semantics of 'if key in dict.keys()'
@param msg: if msg is None, then the failure message will be
'%r not in %r' % (first, second)
"""
if containee not in container:
raise self.failureException(msg or "%r not in %r"
% (containee, container))
return containee
assertIn = failUnlessIn
def failIfIn(self, containee, container, msg=None):
"""
Fail the test if C{containee} is found in C{container}.
@param containee: the value that should not be in C{container}
@param container: a sequence type, or in the case of a mapping type,
will follow semantics of 'if key in dict.keys()'
@param msg: if msg is None, then the failure message will be
'%r in %r' % (first, second)
"""
if containee in container:
raise self.failureException(msg or "%r in %r"
% (containee, container))
return containee
assertNotIn = failIfIn
def failIfAlmostEqual(self, first, second, places=7, msg=None):
"""
Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero.
@note: decimal places (from zero) is usually not the same
as significant digits (measured from the most
signficant digit).
@note: included for compatiblity with PyUnit test cases
"""
if round(second-first, places) == 0:
raise self.failureException(msg or '%r == %r within %r places'
% (first, second, places))
return first
assertNotAlmostEqual = assertNotAlmostEquals = failIfAlmostEqual
failIfAlmostEquals = failIfAlmostEqual
def failUnlessAlmostEqual(self, first, second, places=7, msg=None):
"""
Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero.
@note: decimal places (from zero) is usually not the same
as significant digits (measured from the most
signficant digit).
@note: included for compatiblity with PyUnit test cases
"""
if round(second-first, places) != 0:
raise self.failureException(msg or '%r != %r within %r places'
% (first, second, places))
return first
assertAlmostEqual = assertAlmostEquals = failUnlessAlmostEqual
failUnlessAlmostEquals = failUnlessAlmostEqual
def failUnlessApproximates(self, first, second, tolerance, msg=None):
"""
Fail if C{first} - C{second} > C{tolerance}
@param msg: if msg is None, then the failure message will be
'%r ~== %r' % (first, second)
"""
if abs(first - second) > tolerance:
raise self.failureException(msg or "%s ~== %s" % (first, second))
return first
assertApproximates = failUnlessApproximates
def failUnlessFailure(self, deferred, *expectedFailures):
"""
Fail if C{deferred} does not errback with one of C{expectedFailures}.
Returns the original Deferred with callbacks added. You will need
to return this Deferred from your test case.
"""
def _cb(ignore):
raise self.failureException(
"did not catch an error, instead got %r" % (ignore,))
def _eb(failure):
if failure.check(*expectedFailures):
return failure.value
else:
output = ('\nExpected: %r\nGot:\n%s'
% (expectedFailures, str(failure)))
raise self.failureException(output)
return deferred.addCallbacks(_cb, _eb)
assertFailure = failUnlessFailure
def failUnlessSubstring(self, substring, astring, msg=None):
"""
Fail if C{substring} does not exist within C{astring}.
"""
return self.failUnlessIn(substring, astring, msg)
assertSubstring = failUnlessSubstring
def failIfSubstring(self, substring, astring, msg=None):
"""
Fail if C{astring} contains C{substring}.
"""
return self.failIfIn(substring, astring, msg)
assertNotSubstring = failIfSubstring
def failUnlessWarns(self, category, message, filename, f,
*args, **kwargs):
"""
Fail if the given function doesn't generate the specified warning when
called. It calls the function, checks the warning, and forwards the
result of the function if everything is fine.
@param category: the category of the warning to check.
@param message: the output message of the warning to check.
@param filename: the filename where the warning should come from.
@param f: the function which is supposed to generate the warning.
@type f: any callable.
@param args: the arguments to C{f}.
@param kwargs: the keywords arguments to C{f}.
@return: the result of the original function C{f}.
"""
warningsShown = []
result = _collectWarnings(warningsShown.append, f, *args, **kwargs)
if not warningsShown:
self.fail("No warnings emitted")
first = warningsShown[0]
for other in warningsShown[1:]:
if ((other.message, other.category)
!= (first.message, first.category)):
self.fail("Can't handle different warnings")
self.assertEqual(first.message, message)
self.assertIdentical(first.category, category)
# Use starts with because of .pyc/.pyo issues.
self.failUnless(
filename.startswith(first.filename),
'Warning in %r, expected %r' % (first.filename, filename))
# It would be nice to be able to check the line number as well, but
# different configurations actually end up reporting different line
# numbers (generally the variation is only 1 line, but that's enough
# to fail the test erroneously...).
# self.assertEqual(lineno, xxx)
return result
assertWarns = failUnlessWarns
def failUnlessIsInstance(self, instance, classOrTuple):
"""
Fail if C{instance} is not an instance of the given class or of
one of the given classes.
@param instance: the object to test the type (first argument of the
C{isinstance} call).
@type instance: any.
@param classOrTuple: the class or classes to test against (second
argument of the C{isinstance} call).
@type classOrTuple: class, type, or tuple.
"""
if not isinstance(instance, classOrTuple):
self.fail("%r is not an instance of %s" % (instance, classOrTuple))
assertIsInstance = failUnlessIsInstance
def failIfIsInstance(self, instance, classOrTuple):
"""
Fail if C{instance} is not an instance of the given class or of
one of the given classes.
@param instance: the object to test the type (first argument of the
C{isinstance} call).
@type instance: any.
@param classOrTuple: the class or classes to test against (second
argument of the C{isinstance} call).
@type classOrTuple: class, type, or tuple.
"""
if isinstance(instance, classOrTuple):
self.fail("%r is an instance of %s" % (instance, classOrTuple))
assertNotIsInstance = failIfIsInstance
class _LogObserver(object):
"""
Observes the Twisted logs and catches any errors.
@ivar _errors: A C{list} of L{Failure} instances which were received as
error events from the Twisted logging system.
@ivar _added: A C{int} giving the number of times C{_add} has been called
less the number of times C{_remove} has been called; used to only add
this observer to the Twisted logging since once, regardless of the
number of calls to the add method.
@ivar _ignored: A C{list} of exception types which will not be recorded.
"""
def __init__(self):
self._errors = []
self._added = 0
self._ignored = []
def _add(self):
if self._added == 0:
log.addObserver(self.gotEvent)
self._oldFE, log._flushErrors = (log._flushErrors, self.flushErrors)
self._oldIE, log._ignore = (log._ignore, self._ignoreErrors)
self._oldCI, log._clearIgnores = (log._clearIgnores,
self._clearIgnores)
self._added += 1
def _remove(self):
self._added -= 1
if self._added == 0:
log.removeObserver(self.gotEvent)
log._flushErrors = self._oldFE
log._ignore = self._oldIE
log._clearIgnores = self._oldCI
def _ignoreErrors(self, *errorTypes):
"""
Do not store any errors with any of the given types.
"""
self._ignored.extend(errorTypes)
def _clearIgnores(self):
"""
Stop ignoring any errors we might currently be ignoring.
"""
self._ignored = []
def flushErrors(self, *errorTypes):
"""
Flush errors from the list of caught errors. If no arguments are
specified, remove all errors. If arguments are specified, only remove
errors of those types from the stored list.
"""
if errorTypes:
flushed = []
remainder = []
for f in self._errors:
if f.check(*errorTypes):
flushed.append(f)
else:
remainder.append(f)
self._errors = remainder
else:
flushed = self._errors
self._errors = []
return flushed
def getErrors(self):
"""
Return a list of errors caught by this observer.
"""
return self._errors
def gotEvent(self, event):
"""
The actual observer method. Called whenever a message is logged.
@param event: A dictionary containing the log message. Actual
structure undocumented (see source for L{twisted.python.log}).
"""
if event.get('isError', False) and 'failure' in event:
f = event['failure']
if len(self._ignored) == 0 or not f.check(*self._ignored):
self._errors.append(f)
_logObserver = _LogObserver()
_wait_is_running = []
_classFixturesDeprecationMessage = (
"%(method)s, deprecated since Twisted 8.2.0, was overridden by "
"%(class)s. Use %(replace)s instead.")
class TestCase(_Assertions):
"""
A unit test. The atom of the unit testing universe.
This class extends C{unittest.TestCase} from the standard library. The
main feature is the ability to return C{Deferred}s from tests and fixture
methods and to have the suite wait for those C{Deferred}s to fire.
To write a unit test, subclass C{TestCase} and define a method (say,
'test_foo') on the subclass. To run the test, instantiate your subclass
with the name of the method, and call L{run} on the instance, passing a
L{TestResult} object.
The C{trial} script will automatically find any C{TestCase} subclasses
defined in modules beginning with 'test_' and construct test cases for all
methods beginning with 'test'.
If an error is logged during the test run, the test will fail with an
error. See L{log.err}.
@ivar failureException: An exception class, defaulting to C{FailTest}. If
the test method raises this exception, it will be reported as a failure,
rather than an exception. All of the assertion methods raise this if the
assertion fails.
@ivar skip: C{None} or a string explaining why this test is to be
skipped. If defined, the test will not be run. Instead, it will be
reported to the result object as 'skipped' (if the C{TestResult} supports
skipping).
@ivar suppress: C{None} or a list of tuples of C{(args, kwargs)} to be
passed to C{warnings.filterwarnings}. Use these to suppress warnings
raised in a test. Useful for testing deprecated code. See also
L{util.suppress}.
@ivar timeout: C{None} or a real number of seconds. If set, the test will
raise an error if it takes longer than C{timeout} seconds.
@ivar todo: C{None}, a string or a tuple of C{(errors, reason)} where
C{errors} is either an exception class or an iterable of exception
classes, and C{reason} is a string. See L{Todo} or L{makeTodo} for more
information.
@ivar _suppressUpDownWarning: Private flag used by tests for C{setUpClass}
and C{tearDownClass} to suppress the deprecation warnings for these
methods. This is necessary since the normal warning suppression
mechanism does not work for these warnings. No code should use this
flag aside from tests for these methods. When support for the methods
is removed altogether, so should this flag be removed.
"""
implements(itrial.ITestCase)
failureException = FailTest
def __init__(self, methodName='runTest'):
"""
Construct an asynchronous test case for C{methodName}.
@param methodName: The name of a method on C{self}. This method should
be a unit test. That is, it should be a short method that calls some of
the assert* methods. If C{methodName} is unspecified, L{runTest} will
be used as the test method. This is mostly useful for testing Trial.
"""
super(TestCase, self).__init__(methodName)
self._testMethodName = methodName
testMethod = getattr(self, methodName)
self._parents = [testMethod, self]
self._parents.extend(util.getPythonContainers(testMethod))
self._shared = (hasattr(self, 'setUpClass') or
hasattr(self, 'tearDownClass'))
if self._shared:
self._prepareClassFixture()
if not hasattr(self.__class__, '_instances'):
self._initInstances()
self.__class__._instances.add(self)
self._passed = False
self._cleanups = []
def _initInstances(cls):
cls._instances = set()
cls._instancesRun = set()
_initInstances = classmethod(_initInstances)
if sys.version_info >= (2, 6):
# Override the comparison defined by the base TestCase which considers
# instances of the same class with the same _testMethodName to be
# equal. Since trial puts TestCase instances into a set, that
# definition of comparison makes it impossible to run the same test
# method twice. Most likely, trial should stop using a set to hold
# tests, but until it does, this is necessary on Python 2.6. Only
# __eq__ and __ne__ are required here, not __hash__, since the
# inherited __hash__ is compatible with these equality semantics. A
# different __hash__ might be slightly more efficient (by reducing
# collisions), but who cares? -exarkun
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def _isFirst(self):
return len(self.__class__._instancesRun) == 0
def _isLast(self):
return self.__class__._instancesRun == self.__class__._instances
def _prepareClassFixture(self):
"""Lots of tests assume that test methods all run in the same instance
of TestCase. This isn't true. Calling this method ensures that
self.__class__._testCaseInstance contains an instance of this class
that will remain the same for all tests from this class.
"""
if not hasattr(self.__class__, '_testCaseInstance'):
self.__class__._testCaseInstance = self
if self.__class__._testCaseInstance.__class__ != self.__class__:
self.__class__._testCaseInstance = self
def _run(self, methodName, result):
from twisted.internet import reactor
timeout = self.getTimeout()
def onTimeout(d):
e = defer.TimeoutError("%r (%s) still running at %s secs"
% (self, methodName, timeout))
f = failure.Failure(e)
# try to errback the deferred that the test returns (for no gorram
# reason) (see issue1005 and test_errorPropagation in
# test_deferred)
try:
d.errback(f)
except defer.AlreadyCalledError:
# if the deferred has been called already but the *back chain
# is still unfinished, crash the reactor and report timeout
# error ourself.
reactor.crash()
self._timedOut = True # see self._wait
todo = self.getTodo()
if todo is not None and todo.expected(f):
result.addExpectedFailure(self, f, todo)
else:
result.addError(self, f)
onTimeout = utils.suppressWarnings(
onTimeout, util.suppress(category=DeprecationWarning))
if self._shared:
test = self.__class__._testCaseInstance
else:
test = self
method = getattr(test, methodName)
d = defer.maybeDeferred(utils.runWithWarningsSuppressed,
self.getSuppress(), method)
call = reactor.callLater(timeout, onTimeout, d)
d.addBoth(lambda x : call.active() and call.cancel() or x)
return d
def shortDescription(self):
desc = super(TestCase, self).shortDescription()
if desc is None:
return self._testMethodName
return desc
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
def deferSetUpClass(self, result):
"""
Run the per-class set up fixture, C{setUpClass}, for this test case.
This must be called only once per TestCase subclass, since it will
run the fixture unconditionally.
@type result: L{IReporter} provider
@param result: The result which will be used to report any problems
encountered in C{setUpClass}.
@return: A L{Deferred} which will fire with the result of
C{setUpClass} or with C{None} if there is no C{setUpClass}
defined.
"""
if not hasattr(self, 'setUpClass'):
d = defer.succeed(None)
d.addCallback(self.deferSetUp, result)
return d
if not getattr(self, '_suppressUpDownWarning', None):
warn = deprecate.getWarningMethod()
warn(_classFixturesDeprecationMessage % {
'method': 'setUpClass',
'class': qual(self.__class__),
'replace': 'setUp'},
category=DeprecationWarning,
stacklevel=0)
d = self._run('setUpClass', result)
d.addCallbacks(self.deferSetUp, self._ebDeferSetUpClass,
callbackArgs=(result,),
errbackArgs=(result,))
return d
def _ebDeferSetUpClass(self, error, result):
if error.check(SkipTest):
result.addSkip(self, self._getReason(error))
self.__class__._instancesRun.remove(self)
elif error.check(KeyboardInterrupt):
result.stop()
else:
result.addError(self, error)
self.__class__._instancesRun.remove(self)
def deferSetUp(self, ignored, result):
d = self._run('setUp', result)
d.addCallbacks(self.deferTestMethod, self._ebDeferSetUp,
callbackArgs=(result,),
errbackArgs=(result,))
return d
def _ebDeferSetUp(self, failure, result):
if failure.check(SkipTest):
result.addSkip(self, self._getReason(failure))
else:
result.addError(self, failure)
if failure.check(KeyboardInterrupt):
result.stop()
return self.deferRunCleanups(None, result)
def deferTestMethod(self, ignored, result):
d = self._run(self._testMethodName, result)
d.addCallbacks(self._cbDeferTestMethod, self._ebDeferTestMethod,
callbackArgs=(result,),
errbackArgs=(result,))
d.addBoth(self.deferRunCleanups, result)
d.addBoth(self.deferTearDown, result)
if self._shared and hasattr(self, 'tearDownClass') and self._isLast():
d.addBoth(self.deferTearDownClass, result)
return d
def _cbDeferTestMethod(self, ignored, result):
if self.getTodo() is not None:
result.addUnexpectedSuccess(self, self.getTodo())
else:
self._passed = True
return ignored
def _ebDeferTestMethod(self, f, result):
todo = self.getTodo()
if todo is not None and todo.expected(f):
result.addExpectedFailure(self, f, todo)
elif f.check(self.failureException, FailTest):
result.addFailure(self, f)
elif f.check(KeyboardInterrupt):
result.addError(self, f)
result.stop()
elif f.check(SkipTest):
result.addSkip(self, self._getReason(f))
else:
result.addError(self, f)
def deferTearDown(self, ignored, result):
d = self._run('tearDown', result)
d.addErrback(self._ebDeferTearDown, result)
return d
def _ebDeferTearDown(self, failure, result):
result.addError(self, failure)
if failure.check(KeyboardInterrupt):
result.stop()
self._passed = False
def deferRunCleanups(self, ignored, result):
"""
Run any scheduled cleanups and report errors (if any to the result
object.
"""
d = self._runCleanups()
d.addCallback(self._cbDeferRunCleanups, result)
return d
def _cbDeferRunCleanups(self, cleanupResults, result):
for flag, failure in cleanupResults:
if flag == defer.FAILURE:
result.addError(self, failure)
if failure.check(KeyboardInterrupt):
result.stop()
self._passed = False
def deferTearDownClass(self, ignored, result):
"""
Run the per-class tear down fixture, C{tearDownClass}, for this test
case.
This must be called only once per TestCase subclass, since it will
run the fixture unconditionally. This must not be called if there
is no C{tearDownClass} method.
@param ignored: An ignored parameter.
@type result: L{IReporter} provider
@param result: The result which will be used to report any problems
encountered in C{tearDownClass}.
@return: A L{Deferred} which will fire with the result of
C{tearDownClass}.
"""
if not getattr(self, '_suppressUpDownWarning', None):
warn = deprecate.getWarningMethod()
warn(_classFixturesDeprecationMessage % {
'method': 'tearDownClass',
'class': qual(self.__class__),
'replace': 'tearDown'},
category=DeprecationWarning,
stacklevel=1)
d = self._run('tearDownClass', result)
d.addErrback(self._ebTearDownClass, result)
return d
def _ebTearDownClass(self, error, result):
if error.check(KeyboardInterrupt):
result.stop()
result.addError(self, error)
def _cleanUp(self, result):
try:
clean = util._Janitor(self, result).postCaseCleanup()
if not clean:
self._passed = False
except:
result.addError(self, failure.Failure())
self._passed = False
for error in self._observer.getErrors():
result.addError(self, error)
self._passed = False
self.flushLoggedErrors()
self._removeObserver()
if self._passed:
result.addSuccess(self)
def _classCleanUp(self, result):
try:
util._Janitor(self, result).postClassCleanup()
except:
result.addError(self, failure.Failure())
def _makeReactorMethod(self, name):
"""
Create a method which wraps the reactor method C{name}. The new
method issues a deprecation warning and calls the original.
"""
def _(*a, **kw):
warnings.warn("reactor.%s cannot be used inside unit tests. "
"In the future, using %s will fail the test and may "
"crash or hang the test run."
% (name, name),
stacklevel=2, category=DeprecationWarning)
return self._reactorMethods[name](*a, **kw)
return _
def _deprecateReactor(self, reactor):
"""
Deprecate C{iterate}, C{crash} and C{stop} on C{reactor}. That is,
each method is wrapped in a function that issues a deprecation
warning, then calls the original.
@param reactor: The Twisted reactor.
"""
self._reactorMethods = {}
for name in ['crash', 'iterate', 'stop']:
self._reactorMethods[name] = getattr(reactor, name)
setattr(reactor, name, self._makeReactorMethod(name))
def _undeprecateReactor(self, reactor):
"""
Restore the deprecated reactor methods. Undoes what
L{_deprecateReactor} did.
@param reactor: The Twisted reactor.
"""
for name, method in self._reactorMethods.iteritems():
setattr(reactor, name, method)
self._reactorMethods = {}
def _installObserver(self):
self._observer = _logObserver
self._observer._add()
def _removeObserver(self):
self._observer._remove()
def flushLoggedErrors(self, *errorTypes):
"""
Remove stored errors received from the log.
C{TestCase} stores each error logged during the run of the test and
reports them as errors during the cleanup phase (after C{tearDown}).
@param *errorTypes: If unspecifed, flush all errors. Otherwise, only
flush errors that match the given types.
@return: A list of failures that have been removed.
"""
return self._observer.flushErrors(*errorTypes)
def flushWarnings(self, offendingFunctions=None):
"""
Remove stored warnings from the list of captured warnings and return
them.
@param offendingFunctions: If C{None}, all warnings issued during the
currently running test will be flushed. Otherwise, only warnings
which I{point} to a function included in this list will be flushed.
All warnings include a filename and source line number; if these
parts of a warning point to a source line which is part of a
function, then the warning I{points} to that function.
@type offendingFunctions: L{NoneType} or L{list} of functions or methods.
@raise ValueError: If C{offendingFunctions} is not C{None} and includes
an object which is not a L{FunctionType} or L{MethodType} instance.
@raise IOError: If the source file (.py) for one of the functions in
C{offendingFunctions} is not available, its lines cannot be
determined and L{IOError} will be raised. (It may be possible to
implement this function without requiring source files by using the
co_lnotab attribute of code objects.)
@return: A C{list}, each element of which is a C{dict} giving
information about one warning which was flushed by this call. The
keys of each C{dict} are:
- C{'message'}: The string which was passed as the I{message}
parameter to L{warnings.warn}.
- C{'category'}: The warning subclass which was passed as the
I{category} parameter to L{warnings.warn}.
- C{'filename'}: The name of the file containing the definition
of the code object which was C{stacklevel} frames above the
call to L{warnings.warn}, where C{stacklevel} is the value of
the C{stacklevel} parameter passed to L{warnings.warn}.
- C{'lineno'}: The source line associated with the active
instruction of the code object object which was C{stacklevel}
frames above the call to L{warnings.warn}, where
C{stacklevel} is the value of the C{stacklevel} parameter
passed to L{warnings.warn}.
"""
if offendingFunctions is None:
toFlush = self._warnings[:]
self._warnings[:] = []
else:
toFlush = []
for w in self._warnings:
for aFunction in offendingFunctions:
if not isinstance(aFunction, (
types.FunctionType, types.MethodType)):
raise ValueError("%r is not a function or method" % (
aFunction,))
filename = inspect.getabsfile(aFunction)
if os.path.normcase(filename) != os.path.normcase(w.filename):
continue
lines, start = inspect.getsourcelines(aFunction)
if not (start <= w.lineno <= start + len(lines)):
continue
# The warning points to this function, flush it and move on
# to the next warning.
toFlush.append(w)
break
# Remove everything which is being flushed.
map(self._warnings.remove, toFlush)
return [
{'message': w.message, 'category': w.category,
'filename': w.filename, 'lineno': w.lineno}
for w in toFlush]
def addCleanup(self, f, *args, **kwargs):
"""
Add the given function to a list of functions to be called after the
test has run, but before C{tearDown}.
Functions will be run in reverse order of being added. This helps
ensure that tear down complements set up.
The function C{f} may return a Deferred. If so, C{TestCase} will wait
until the Deferred has fired before proceeding to the next function.
"""
self._cleanups.append((f, args, kwargs))
def callDeprecated(self, version, f, *args, **kwargs):
"""
Call a function that was deprecated at a specific version.
@param version: The version that the function was deprecated in.
@param f: The deprecated function to call.
@return: Whatever the function returns.
"""
result = f(*args, **kwargs)
warningsShown = self.flushWarnings([self.callDeprecated])
if len(warningsShown) == 0:
self.fail('%r is not deprecated.' % (f,))
observedWarning = warningsShown[0]['message']
expectedWarning = getDeprecationWarningString(f, version)
self.assertEqual(expectedWarning, observedWarning)
return result
def _runCleanups(self):
"""
Run the cleanups added with L{addCleanup} in order.
@return: A C{Deferred} that fires when all cleanups are run.
"""
def _makeFunction(f, args, kwargs):
return lambda: f(*args, **kwargs)
callables = []
while len(self._cleanups) > 0:
f, args, kwargs = self._cleanups.pop()
callables.append(_makeFunction(f, args, kwargs))
return util._runSequentially(callables)
def patch(self, obj, attribute, value):
"""
Monkey patch an object for the duration of the test.
The monkey patch will be reverted at the end of the test using the
L{addCleanup} mechanism.
The L{MonkeyPatcher} is returned so that users can restore and
re-apply the monkey patch within their tests.
@param obj: The object to monkey patch.
@param attribute: The name of the attribute to change.
@param value: The value to set the attribute to.
@return: A L{monkey.MonkeyPatcher} object.
"""
monkeyPatch = monkey.MonkeyPatcher((obj, attribute, value))
monkeyPatch.patch()
self.addCleanup(monkeyPatch.restore)
return monkeyPatch
def runTest(self):
"""
If no C{methodName} argument is passed to the constructor, L{run} will
treat this method as the thing with the actual test inside.
"""
def run(self, result):
"""
Run the test case, storing the results in C{result}.
First runs C{setUp} on self, then runs the test method (defined in the
constructor), then runs C{tearDown}. Any of these may return
L{Deferred}s. After they complete, does some reactor cleanup.
@param result: A L{TestResult} object.
"""
log.msg("--> %s <--" % (self.id()))
from twisted.internet import reactor
new_result = itrial.IReporter(result, None)
if new_result is None:
result = PyUnitResultAdapter(result)
else:
result = new_result
self._timedOut = False
if self._shared and self not in self.__class__._instances:
self.__class__._instances.add(self)
result.startTest(self)
if self.getSkip(): # don't run test methods that are marked as .skip
result.addSkip(self, self.getSkip())
result.stopTest(self)
return
self._installObserver()
# All the code inside runThunk will be run such that warnings emitted
# by it will be collected and retrievable by flushWarnings.
def runThunk():
self._passed = False
first = False
if self._shared:
first = self._isFirst()
self.__class__._instancesRun.add(self)
self._deprecateReactor(reactor)
try:
if first:
d = self.deferSetUpClass(result)
else:
d = self.deferSetUp(None, result)
try:
self._wait(d)
finally:
self._cleanUp(result)
if self._shared and self._isLast():
self._initInstances()
self._classCleanUp(result)
if not self._shared:
self._classCleanUp(result)
finally:
self._undeprecateReactor(reactor)
self._warnings = []
_collectWarnings(self._warnings.append, runThunk)
# Any collected warnings which the test method didn't flush get
# re-emitted so they'll be logged or show up on stdout or whatever.
for w in self.flushWarnings():
try:
warnings.warn_explicit(**w)
except:
result.addError(self, failure.Failure())
result.stopTest(self)
def _getReason(self, f):
if len(f.value.args) > 0:
reason = f.value.args[0]
else:
warnings.warn(("Do not raise unittest.SkipTest with no "
"arguments! Give a reason for skipping tests!"),
stacklevel=2)
reason = f
return reason
def getSkip(self):
"""
Return the skip reason set on this test, if any is set. Checks on the
instance first, then the class, then the module, then packages. As
soon as it finds something with a C{skip} attribute, returns that.
Returns C{None} if it cannot find anything. See L{TestCase} docstring
for more details.
"""
return util.acquireAttribute(self._parents, 'skip', None)
def getTodo(self):
"""
Return a L{Todo} object if the test is marked todo. Checks on the
instance first, then the class, then the module, then packages. As
soon as it finds something with a C{todo} attribute, returns that.
Returns C{None} if it cannot find anything. See L{TestCase} docstring
for more details.
"""
todo = util.acquireAttribute(self._parents, 'todo', None)
if todo is None:
return None
return makeTodo(todo)
def getTimeout(self):
"""
Returns the timeout value set on this test. Checks on the instance
first, then the class, then the module, then packages. As soon as it
finds something with a C{timeout} attribute, returns that. Returns
L{util.DEFAULT_TIMEOUT_DURATION} if it cannot find anything. See
L{TestCase} docstring for more details.
"""
timeout = util.acquireAttribute(self._parents, 'timeout',
util.DEFAULT_TIMEOUT_DURATION)
try:
return float(timeout)
except (ValueError, TypeError):
# XXX -- this is here because sometimes people will have methods
# called 'timeout', or set timeout to 'orange', or something
# Particularly, test_news.NewsTestCase and ReactorCoreTestCase
# both do this.
warnings.warn("'timeout' attribute needs to be a number.",
category=DeprecationWarning)
return util.DEFAULT_TIMEOUT_DURATION
def getSuppress(self):
"""
Returns any warning suppressions set for this test. Checks on the
instance first, then the class, then the module, then packages. As
soon as it finds something with a C{suppress} attribute, returns that.
Returns any empty list (i.e. suppress no warnings) if it cannot find
anything. See L{TestCase} docstring for more details.
"""
return util.acquireAttribute(self._parents, 'suppress', [])
def visit(self, visitor):
"""
Visit this test case. Call C{visitor} with C{self} as a parameter.
Deprecated in Twisted 8.0.
@param visitor: A callable which expects a single parameter: a test
case.
@return: None
"""
warnings.warn("Test visitors deprecated in Twisted 8.0",
category=DeprecationWarning)
visitor(self)
def mktemp(self):
"""Returns a unique name that may be used as either a temporary
directory or filename.
@note: you must call os.mkdir on the value returned from this
method if you wish to use it as a directory!
"""
MAX_FILENAME = 32 # some platforms limit lengths of filenames
base = os.path.join(self.__class__.__module__[:MAX_FILENAME],
self.__class__.__name__[:MAX_FILENAME],
self._testMethodName[:MAX_FILENAME])
if not os.path.exists(base):
os.makedirs(base)
dirname = tempfile.mkdtemp('', '', base)
return os.path.join(dirname, 'temp')
def _wait(self, d, running=_wait_is_running):
"""Take a Deferred that only ever callbacks. Block until it happens.
"""
from twisted.internet import reactor
if running:
raise RuntimeError("_wait is not reentrant")
results = []
def append(any):
if results is not None:
results.append(any)
def crash(ign):
if results is not None:
reactor.crash()
crash = utils.suppressWarnings(
crash, util.suppress(message=r'reactor\.crash cannot be used.*',
category=DeprecationWarning))
def stop():
reactor.crash()
stop = utils.suppressWarnings(
stop, util.suppress(message=r'reactor\.crash cannot be used.*',
category=DeprecationWarning))
running.append(None)
try:
d.addBoth(append)
if results:
# d might have already been fired, in which case append is
# called synchronously. Avoid any reactor stuff.
return
d.addBoth(crash)
reactor.stop = stop
try:
reactor.run()
finally:
del reactor.stop
# If the reactor was crashed elsewhere due to a timeout, hopefully
# that crasher also reported an error. Just return.
# _timedOut is most likely to be set when d has fired but hasn't
# completed its callback chain (see self._run)
if results or self._timedOut: #defined in run() and _run()
return
# If the timeout didn't happen, and we didn't get a result or
# a failure, then the user probably aborted the test, so let's
# just raise KeyboardInterrupt.
# FIXME: imagine this:
# web/test/test_webclient.py:
# exc = self.assertRaises(error.Error, wait, method(url))
#
# wait() will raise KeyboardInterrupt, and assertRaises will
# swallow it. Therefore, wait() raising KeyboardInterrupt is
# insufficient to stop trial. A suggested solution is to have
# this code set a "stop trial" flag, or otherwise notify trial
# that it should really try to stop as soon as possible.
raise KeyboardInterrupt()
finally:
results = None
running.pop()
class UnsupportedTrialFeature(Exception):
"""A feature of twisted.trial was used that pyunit cannot support."""
class PyUnitResultAdapter(object):
"""
Wrap a C{TestResult} from the standard library's C{unittest} so that it
supports the extended result types from Trial, and also supports
L{twisted.python.failure.Failure}s being passed to L{addError} and
L{addFailure}.
"""
def __init__(self, original):
"""
@param original: A C{TestResult} instance from C{unittest}.
"""
self.original = original
def _exc_info(self, err):
if isinstance(err, failure.Failure):
# Unwrap the Failure into a exc_info tuple.
err = (err.type, err.value, err.getTracebackObject())
return err
def startTest(self, method):
self.original.startTest(method)
def stopTest(self, method):
self.original.stopTest(method)
def addFailure(self, test, fail):
self.original.addFailure(test, self._exc_info(fail))
def addError(self, test, error):
self.original.addError(test, self._exc_info(error))
def _unsupported(self, test, feature, info):
self.original.addFailure(
test,
(UnsupportedTrialFeature,
UnsupportedTrialFeature(feature, info),
None))
def addSkip(self, test, reason):
"""
Report the skip as a failure.
"""
self._unsupported(test, 'skip', reason)
def addUnexpectedSuccess(self, test, todo):
"""
Report the unexpected success as a failure.
"""
self._unsupported(test, 'unexpected success', todo)
def addExpectedFailure(self, test, error):
"""
Report the expected failure (i.e. todo) as a failure.
"""
self._unsupported(test, 'expected failure', error)
def addSuccess(self, test):
self.original.addSuccess(test)
def upDownError(self, method, error, warn, printStatus):
pass
def suiteVisit(suite, visitor):
"""
Visit each test in C{suite} with C{visitor}.
Deprecated in Twisted 8.0.
@param visitor: A callable which takes a single argument, the L{TestCase}
instance to visit.
@return: None
"""
warnings.warn("Test visitors deprecated in Twisted 8.0",
category=DeprecationWarning)
for case in suite._tests:
visit = getattr(case, 'visit', None)
if visit is not None:
visit(visitor)
elif isinstance(case, pyunit.TestCase):
case = itrial.ITestCase(case)
case.visit(visitor)
elif isinstance(case, pyunit.TestSuite):
suiteVisit(case, visitor)
else:
case.visit(visitor)
class TestSuite(pyunit.TestSuite):
"""
Extend the standard library's C{TestSuite} with support for the visitor
pattern and a consistently overrideable C{run} method.
"""
visit = suiteVisit
def __call__(self, result):
return self.run(result)
def run(self, result):
"""
Call C{run} on every member of the suite.
"""
# we implement this because Python 2.3 unittest defines this code
# in __call__, whereas 2.4 defines the code in run.
for test in self._tests:
if result.shouldStop:
break
test(result)
return result
class TestDecorator(components.proxyForInterface(itrial.ITestCase,
"_originalTest")):
"""
Decorator for test cases.
@param _originalTest: The wrapped instance of test.
@type _originalTest: A provider of L{itrial.ITestCase}
"""
implements(itrial.ITestCase)
def __call__(self, result):
"""
Run the unit test.
@param result: A TestResult object.
"""
return self.run(result)
def run(self, result):
"""
Run the unit test.
@param result: A TestResult object.
"""
return self._originalTest.run(
reporter._AdaptedReporter(result, self.__class__))
def _clearSuite(suite):
"""
Clear all tests from C{suite}.
This messes with the internals of C{suite}. In particular, it assumes that
the suite keeps all of its tests in a list in an instance variable called
C{_tests}.
"""
suite._tests = []
def decorate(test, decorator):
"""
Decorate all test cases in C{test} with C{decorator}.
C{test} can be a test case or a test suite. If it is a test suite, then the
structure of the suite is preserved.
L{decorate} tries to preserve the class of the test suites it finds, but
assumes the presence of the C{_tests} attribute on the suite.
@param test: The C{TestCase} or C{TestSuite} to decorate.
@param decorator: A unary callable used to decorate C{TestCase}s.
@return: A decorated C{TestCase} or a C{TestSuite} containing decorated
C{TestCase}s.
"""
try:
tests = iter(test)
except TypeError:
return decorator(test)
# At this point, we know that 'test' is a test suite.
_clearSuite(test)
for case in tests:
test.addTest(decorate(case, decorator))
return test
class _PyUnitTestCaseAdapter(TestDecorator):
"""
Adapt from pyunit.TestCase to ITestCase.
"""
def visit(self, visitor):
"""
Deprecated in Twisted 8.0.
"""
warnings.warn("Test visitors deprecated in Twisted 8.0",
category=DeprecationWarning)
visitor(self)
class _BrokenIDTestCaseAdapter(_PyUnitTestCaseAdapter):
"""
Adapter for pyunit-style C{TestCase} subclasses that have undesirable id()
methods. That is L{pyunit.FunctionTestCase} and L{pyunit.DocTestCase}.
"""
def id(self):
"""
Return the fully-qualified Python name of the doctest.
"""
testID = self._originalTest.shortDescription()
if testID is not None:
return testID
return self._originalTest.id()
class _ForceGarbageCollectionDecorator(TestDecorator):
"""
Forces garbage collection to be run before and after the test. Any errors
logged during the post-test collection are added to the test result as
errors.
"""
def run(self, result):
gc.collect()
TestDecorator.run(self, result)
_logObserver._add()
gc.collect()
for error in _logObserver.getErrors():
result.addError(self, error)
_logObserver.flushErrors()
_logObserver._remove()
components.registerAdapter(
_PyUnitTestCaseAdapter, pyunit.TestCase, itrial.ITestCase)
components.registerAdapter(
_BrokenIDTestCaseAdapter, pyunit.FunctionTestCase, itrial.ITestCase)
_docTestCase = getattr(doctest, 'DocTestCase', None)
if _docTestCase:
components.registerAdapter(
_BrokenIDTestCaseAdapter, _docTestCase, itrial.ITestCase)
def _iterateTests(testSuiteOrCase):
"""
Iterate through all of the test cases in C{testSuiteOrCase}.
"""
try:
suite = iter(testSuiteOrCase)
except TypeError:
yield testSuiteOrCase
else:
for test in suite:
for subtest in _iterateTests(test):
yield subtest
# Support for Python 2.3
try:
iter(pyunit.TestSuite())
except TypeError:
# Python 2.3's TestSuite doesn't support iteration. Let's monkey patch it!
def __iter__(self):
return iter(self._tests)
pyunit.TestSuite.__iter__ = __iter__
class _SubTestCase(TestCase):
def __init__(self):
TestCase.__init__(self, 'run')
_inst = _SubTestCase()
def _deprecate(name):
"""
Internal method used to deprecate top-level assertions. Do not use this.
"""
def _(*args, **kwargs):
warnings.warn("unittest.%s is deprecated. Instead use the %r "
"method on unittest.TestCase" % (name, name),
stacklevel=2, category=DeprecationWarning)
return getattr(_inst, name)(*args, **kwargs)
return _
_assertions = ['fail', 'failUnlessEqual', 'failIfEqual', 'failIfEquals',
'failUnless', 'failUnlessIdentical', 'failUnlessIn',
'failIfIdentical', 'failIfIn', 'failIf',
'failUnlessAlmostEqual', 'failIfAlmostEqual',
'failUnlessRaises', 'assertApproximates',
'assertFailure', 'failUnlessSubstring', 'failIfSubstring',
'assertAlmostEqual', 'assertAlmostEquals',
'assertNotAlmostEqual', 'assertNotAlmostEquals', 'assertEqual',
'assertEquals', 'assertNotEqual', 'assertNotEquals',
'assertRaises', 'assert_', 'assertIdentical',
'assertNotIdentical', 'assertIn', 'assertNotIn',
'failUnlessFailure', 'assertSubstring', 'assertNotSubstring']
for methodName in _assertions:
globals()[methodName] = _deprecate(methodName)
__all__ = ['TestCase', 'wait', 'FailTest', 'SkipTest']
|
hortonworks/hortonworks-sandbox
|
desktop/core/ext-py/Twisted/twisted/trial/unittest.py
|
Python
|
apache-2.0
| 62,088
|
[
"VisIt"
] |
03f9a53f2de53c32487e3c7d32b2cac72631a1d3290f0c73d00c554390a82ec3
|
#
# Copyright 2017 Russell Smiley
#
# This file is part of timetools.
#
# timetools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# timetools is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with timetools. If not, see <http://www.gnu.org/licenses/>.
#
import matplotlib.pyplot as mpp
import unittest
import timetools.synchronization.clock as sc
import timetools.synchronization.oscillator as tso
import timetools.synchronization.oscillator.noise.gaussian as tsong
import timetools.synchronization.time as st
import timetools.synchronization.compliance.visualization as tscv
import timetools.synchronization.compliance.ituTG8263.compute as tscg8263
import timetools.synchronization.compliance.ituTG8263.wanderGeneration as tscg8263wg
import timetools.synchronization.compliance.ituTG8263.holdoverTransient as tscg8263h
class TestItuTG8263( unittest.TestCase ) :
def testConstantTemperatureWanderGenerationMask( self ) :
thisMask = tscg8263wg.constantTemperatureMtieNs
figureHandle = mpp.figure( )
# Set the plot limits before the mask plot so that it will figure out
# appropriate ranges in the absence of signal data
mpp.xlim( (0.01, 20e3) )
mpp.ylim( (100, 200e3) )
thisMask.addToPlot( figureHandle.number )
mpp.yscale( 'log' )
mpp.xscale( 'log' )
mpp.grid( which = 'minor' )
mpp.title( self.testConstantTemperatureWanderGenerationMask.__name__ )
def testVariableTemperatureWanderGenerationMask( self ) :
constTempMask = tscg8263wg.constantTemperatureMtieNs
thisMask = tscg8263wg.variableTemperatureMtieNs
figureHandle = mpp.figure( )
# Set the plot limits before the mask plot so that it will figure out
# appropriate ranges in the absence of signal data
mpp.xlim( (0.01, 20e3) )
mpp.ylim( (100, 200e3) )
thisMask.addToPlot( figureHandle.number, color = 'b' )
constTempMask.addToPlot( figureHandle.number, color = 'r', linestyle = '--' )
mpp.yscale( 'log' )
mpp.xscale( 'log' )
mpp.grid( which = 'minor' )
mpp.title( self.testVariableTemperatureWanderGenerationMask.__name__ )
def testHoldoverTransientPhaseErrorMask( self ) :
thisMask = tscg8263h.phaseErrorNs
figureHandle = mpp.figure( )
mpp.title( self.testHoldoverTransientPhaseErrorMask.__name__ )
# Set the plot limits before the mask plot so that it will figure out
# appropriate ranges in the absence of signal data
mpp.xlim( (0, 100) )
mpp.ylim( (-1000, 1000) )
mpp.grid( )
thisMask.addToPlot( figureHandle.number, linewidth = 4, color = 'b', marker = 'o' )
mpp.grid( which = 'minor' )
def testHoldoverTransientFfoMask( self ) :
thisMask = tscg8263h.ffoPpb
figureHandle = mpp.figure( )
mpp.title( self.testHoldoverTransientFfoMask.__name__ )
# Set the plot limits before the mask plot so that it will figure out
# appropriate ranges in the absence of signal data
mpp.xlim( (0, 24 * 3600) )
mpp.ylim( (-15, 15) )
mpp.grid( )
thisMask.addToPlot( figureHandle.number, linewidth = 4, color = 'b', marker = 'o' )
mpp.grid( which = 'minor' )
def testHoldoverTransientFfoRateMask( self ) :
thisMask = tscg8263h.ffoRatePpbPerSecond
figureHandle = mpp.figure( )
mpp.title( self.testHoldoverTransientFfoRateMask.__name__ )
# Set the plot limits before the mask plot so that it will figure out
# appropriate ranges in the absence of signal data
mpp.xlim( (0, 3600) )
mpp.ylim( (-2e-5, 2e-5) )
mpp.grid( )
thisMask.addToPlot( figureHandle.number, linewidth = 4, color = 'b', marker = 'o' )
mpp.grid( which = 'minor' )
def testWanderGenerationConstantTemperatureNs1( self ) :
timeStepSeconds = 1
numberSamples = 10000
desiredNumberObservations = 15
clockFfoPpb = 0.5
clockRmsJitterPpb = 2
referenceTimeGenerator = st.referenceGenerator( timeStepSeconds )
referenceTimeSeconds = referenceTimeGenerator.generate( numberSamples )
clockModel = sc.ClockModel( tso.OscillatorModel( initialFfoPpb = clockFfoPpb,
noiseModel = tsong.GaussianNoise(
standardDeviationPpb = clockRmsJitterPpb,
seed = 1459 ) ) )
localTimeSeconds, instantaneousLoFfoPpb = clockModel.generate( referenceTimeSeconds )
analysisResult, thisMask, mtieData = tscg8263.analyzeItuTG8263Mask( localTimeSeconds, referenceTimeSeconds,
timeStepSeconds, desiredNumberObservations )
thisPlot = tscv.plot( )
thisPlot.addMask( thisMask, linewidth = 4, color = 'r', linestyle = '--', marker = 'o' )
thisPlot.addSignal( mtieData )
thisPlot.go( )
mpp.yscale( 'log' )
mpp.xscale( 'log' )
mpp.grid( which = 'minor' )
mpp.title( self.testWanderGenerationConstantTemperatureNs1.__name__ )
self.assertTrue( analysisResult, 'Failed 16 ppb mask when should not have' )
def testWanderGenerationConstantTemperatureNs2( self ) :
timeStepSeconds = 1
numberSamples = 10000
desiredNumberObservations = 15
clockFfoPpb = 5
clockRmsJitterPpb = 2
referenceTimeGenerator = st.referenceGenerator( timeStepSeconds )
referenceTimeSeconds = referenceTimeGenerator.generate( numberSamples )
clockModel = sc.ClockModel( tso.OscillatorModel( initialFfoPpb = clockFfoPpb,
noiseModel = tsong.GaussianNoise(
standardDeviationPpb = clockRmsJitterPpb,
seed = 1459 ) ) )
localTimeSeconds, instantaneousLoFfoPpb = clockModel.generate( referenceTimeSeconds )
analysisResult, thisMask, mtieData = tscg8263.analyzeItuTG8263Mask( localTimeSeconds, referenceTimeSeconds,
timeStepSeconds, desiredNumberObservations )
thisPlot = tscv.plot( )
thisPlot.addMask( thisMask, linewidth = 4, color = 'r', linestyle = '--' )
thisPlot.addSignal( mtieData, linestyle = '--', marker = 'o' )
thisPlot.go( )
mpp.yscale( 'log' )
mpp.xscale( 'log' )
mpp.grid( which = 'minor' )
mpp.title( self.testWanderGenerationConstantTemperatureNs2.__name__ )
self.assertFalse( analysisResult, 'Passed 16 ppb mask when should not have' )
def tearDown( self ) :
if __name__ == "__main__" :
mpp.show( )
if __name__ == "__main__" :
unittest.main( )
|
blueskyjunkie/timeTools
|
timetools/synchronization/compliance/tests/testItuTG8263.py
|
Python
|
gpl-3.0
| 7,483
|
[
"Gaussian"
] |
41249eb06fcf458905c0810c5eaaee80141959a05f44e63b850927f3b8bffbdc
|
# -*- coding: utf-8 -*-
## perform apodization of echoes using external python script
## new writing in a module like fashion in order to be able to call it from another python script
def apodize_echoes(gb=None, cycle=None, echo_position=None, dead_pts=None, noDialog=False, dataset=None):
"""
This function applies a gaussian apodization to each echo of a cpmg acquisition
that used qcpmg.jt pulse sequence by calling an external python program.
It may be used for other pulse sequences.
If a variable is set to None a default value is chosen
gb: gaussian broadening (Hz) (default value stored in USERP1)
echo_position: Position of echo with respect to start of FID (not including digital filter)
Position is read/stored in USERP1 if None, it defaults to D3+D6. The value stored is
reset to default if set to 0.
cycle: Cycle time of CPMG sequence
"""
import sys
import os
import os.path
import subprocess
# if this function is called from imported module then one needs to import TOPSPIN functions
# so that they are available in the current namespace
from TopCmds import CURDATA, GETPAR, GETPARSTAT, PUTPAR, RE, INPUT_DIALOG, MSG
import JTutils
# whether CURDATA should be called here or specific dataset should be provided as argument is not clear
if dataset == None:
dataset = CURDATA()
RE(dataset)
# process the arguments that are handled by this script
if gb == None:
gb = GETPAR("USERP1")
try :
test_gb = float(gb)
except ValueError:
noDialog = False
D3 = float(GETPARSTAT("D 3"))*1e6
D6 = float(GETPARSTAT("D 6"))*1e6
if cycle == None:
cycle = float(GETPARSTAT("P 60"))
if cycle < 1:
# P60 is not likely to have stored the cycle time then uses historic calculation
P4 = float(GETPARSTAT("P 4"))
cycle = 2*(D3+D6) + P4
cycle = str(cycle)
if dead_pts == None:
dead_pts = GETPAR("TDoff")
if echo_position == None:
echo_position = GETPAR("USERP2")
try :
echo_position = float(echo_position)
if echo_position > 0:
echo_position = str(echo_position)
else:
echo_position = str(D3+D6)
except ValueError:
MSG("""Warning!
echo position form USERP2 could not be converted to float: %s.
Using default D3+D6.""" % (echo_position,))
echo_position = str(D3+D6)
if not noDialog:
result = INPUT_DIALOG("processing parameters",
"""Please provide:
- the gaussian broadening (GB) applyied to each echo,
- the cycle time of the sequence
- the position of first echo with respect to start of FID
(not including digital filter) Echo position defaults to D3+D6,
setting it to 0 resets it to default.
- the number of dead pts to zero at start of echo (stored as TDoff)
""",
["GB (Hz)=", "cycle time (us)", "echo position (default=D3+D6) (us)", "dead points"],
[gb, cycle, echo_position, dead_pts])
try :
(gb, cycle, echo_position, dead_pts) = result
except TypeError:
EXIT()
PUTPAR("TDoff", dead_pts)
PUTPAR("USERP1", gb)
PUTPAR("USERP2", echo_position)
fulldataPATH = JTutils.fullpath(dataset)
opt_args = " -g %s -c %s -s %s -t %s" % (gb, cycle, echo_position, dead_pts)
JTutils.run_CpyBin_script("qcpmgapod_.py", opt_args.split() + [fulldataPATH])
RE(dataset)
if __name__ == '__main__':
class dummy():
def __init__(self):
self.gb = None
self.c = None
self.t = None
self.echo_position = None
self.noDialog = False
try :
import argparse
parser = argparse.ArgumentParser(description='Add echoes in a qcpmg bruker experiment')
parser.add_argument('-g', '--gb', help='Gaussian broadening applied to each echo', default=None)
parser.add_argument('-c', help='qcpmg cycle in us', default=None)
parser.add_argument('-t', help='number of point to discard at start of echo to remove dead time.', default=None)
parser.add_argument('--echo_position', default=None,
help='echo position from start of FID (digital filter excluded) in us')
parser.add_argument('--noDialog', action='store_true', help='Do not show dialog : use default or provided optional arguments')
args = parser.parse_args(sys.argv[1:])
except ImportError :
if len(sys.argv) > 1:
MSG("Argparse module not found!\n Arguments won't be processed")
args = dummy()
except SystemExit:
MSG(""" Script is exiting : either you asked for help or there is an argument error.
Check console for additional information
""" + parser.format_help() )
EXIT()
dataset = CURDATA()
apodize_echoes(gb=args.gb, cycle=args.c, echo_position=args.echo_position, dead_pts=args.t, noDialog=args.noDialog, dataset=dataset)
|
jtrebosc/JTutils
|
TSpy/qcpmgapod.py
|
Python
|
bsd-3-clause
| 5,184
|
[
"Gaussian"
] |
799e6b4cc7f4671c6c08ea997ad3cd0c22d0442d3151235fe6fd6837373a99fe
|
>>> from ftplib import FTP
>>> ftp = FTP('ftp.ncbi.nih.gov')
>>> ftp.login()
'230-Anonymous access granted, restrictions apply.\n Please read the file README.ftp\n230 it was last modified on Wed Apr 7 10:18:00 2010 - 193 days ago'
>>> ftp.retrlines('LIST')
dr-xr-xr-x 3 ftp anonymous 4096 Sep 7 21:03 1000genomes
-r--r--r-- 1 ftp anonymous 10737418240 May 17 19:21 10GB
-r--r--r-- 1 ftp anonymous 1073741824 May 17 19:19 1GB
-r--r--r-- 1 ftp anonymous 1868 Apr 7 2010 README.ftp
lr--r--r-- 1 ftp anonymous 29 Apr 7 2010 asn1-converters -> toolbox/ncbi_tools/converters
dr-xr-xr-x 8 ftp anonymous 4096 Sep 29 2004 blast
dr-xr-xr-x 3 ftp anonymous 4096 Sep 13 2004 cgap
dr-xr-xr-x 4 ftp anonymous 4096 Jan 8 2009 cn3d
dr-xr-xr-x 27 ftp anonymous 4096 Sep 13 20:05 dbgap
dr-xr-xr-x 3 ftp anonymous 4096 Aug 23 19:52 dra0
dr-xr-xr-x 11 ftp anonymous 4096 Jun 4 2006 entrez
dr-xr-xr-x 7 ftp anonymous 4096 Jun 2 15:21 epigenomics
dr-xr-xr-x 0 ftp anonymous 0 Dec 4 2009 era0
dr-xr-xr-x 3 ftp anonymous 4096 May 3 19:46 era1
dr-xr-xr-x 6 ftp anonymous 4096 Aug 4 2006 fa2htgs
dr-xr-xr-x 10 ftp anonymous 155648 Oct 1 20:20 genbank
dr-xr-xr-x 6 ftp anonymous 4096 Aug 11 17:09 gene
dr-xr-xr-x 63 ftp anonymous 8192 Sep 21 14:02 genomes
dr-xr-xr-x 24 ftp anonymous 4096 Aug 18 03:08 hapmap
dr-xr-xr-x 12 ftp anonymous 4096 Oct 14 16:39 mmdb
dr-xr-xr-x 5 ftp anonymous 135168 Aug 19 03:53 ncbi-asn1
dr-xr-xr-x 147 ftp anonymous 12288 Jul 26 20:07 pub
dr-xr-xr-x 10 ftp anonymous 4096 Oct 15 18:28 pubchem
dr-xr-xr-x 2 ftp anonymous 4096 Oct 16 01:15 pubmed
dr-xr-xr-x 15 ftp anonymous 4096 Mar 24 2010 refseq
dr-xr-xr-x 57 ftp anonymous 8192 Aug 20 2008 repository
dr-xr-xr-x 5 ftp anonymous 4096 Oct 8 18:51 sequin
dr-xr-xr-x 9 ftp anonymous 4096 May 24 20:03 sky-cgh
dr-xr-xr-x 16 ftp anonymous 12288 May 18 02:50 snp
dr-xr-xr-x 13 ftp anonymous 4096 Jul 9 16:55 sra
dr-xr-xr-x 4 ftp anonymous 4096 Apr 13 2010 sra0
dr-xr-xr-x 5 ftp anonymous 4096 May 5 00:11 sra1
dr-xr-xr-x 4 ftp anonymous 4096 May 24 21:46 sra2
dr-xr-xr-x 2 ftp anonymous 4096 Sep 29 2004 tech-reports
dr-xr-xr-x 13 ftp anonymous 4096 Oct 16 2006 toolbox
dr-xr-xr-x 5 ftp anonymous 4096 Apr 24 2009 tpa
dr-xr-xr-x 2 ftp anonymous 4096 Sep 2 16:04 varpipe-intqc
'226 Transfer complete.'
>>>ftp.close
|
abhishektiwari/ToyProjects
|
Python/BioRelated/ncbi_ftp.py
|
Python
|
mit
| 2,682
|
[
"BLAST"
] |
38dc8c82507c094e601017ee0db15f340cc12eda5a3009e600a0bae465e8345d
|
#!/usr/bin/env python
"""
clim.py
ROMS climatology utilities
Written by Brian Powell on 08/15/15
Copyright (c)2017 University of Hawaii under the MIT-License.
"""
import seapy
import numpy as np
import netCDF4
def gen_bry_clim(clim_file, grid, bry, clobber=False, cdl=None):
"""
Taking the results of gen_ncks and interpolation, stitch together
climatology files that were interpolated using only the boundary regions
into a single climatology (with no data where interpolation wasn't
performed).
Parameters
----------
clim_file: str,
The name of the output climate file
grid: seapy.model.grid or str,
The output ROMS grid
bry: dict,
A dictionary prescribing the climatology file interpolated for each
boundary side.
{"west":filename, "south":filename}, ...}
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
Returns
-------
None
"""
grid = seapy.model.asgrid(grid)
# Grab the first dictionary record and use it to determine the number
# of times in the new climatology file
nc = netCDF4.Dataset(bry[list(bry.keys())[0]])
reftime, time = seapy.roms.get_reftime(nc)
times = nc.variables[time][:]
nc.close()
# Create the new climatology file
ncout = seapy.roms.ncgen.create_clim(clim_file,
eta_rho=grid.ln, xi_rho=grid.lm, s_rho=grid.n,
reftime=reftime, clobber=clobber, cdl=cdl,
title="stitched from boundary interpolation")
ncout.variables["clim_time"][:] = times
for side in bry:
if bry[side] is None:
continue
ncin = netCDF4.Dataset(bry[side])
for fld in seapy.roms.fields:
idx = [np.s_[:] for i in range(seapy.roms.fields[fld]["dims"] + 1)]
dat = ncin.variables[fld][:]
shp = dat.shape
if side == "west":
idx[-1] = np.s_[:shp[-1]]
pass
elif side == "east":
idx[-1] = np.s_[-shp[-1]:]
pass
elif side == "north":
idx[-2] = np.s_[-shp[-2]:]
pass
elif side == "south":
idx[-2] = np.s_[:shp[-2]]
pass
ncout.variables[fld][idx] = dat
ncout.sync()
ncin.close()
ncout.close()
|
ocefpaf/seapy
|
seapy/roms/clim.py
|
Python
|
mit
| 2,643
|
[
"Brian",
"NetCDF"
] |
cf6592ccdda5c4af643e3dd572736e2a2ae41729bdd9ab1e98ccca95857aafd7
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import math
import warnings
from math import pi
import torch
from torch.distributions import VonMises
from torch.distributions.utils import broadcast_all, lazy_property
from pyro.distributions import constraints
from pyro.distributions.torch_distribution import TorchDistribution
from pyro.distributions.util import broadcast_shape
from pyro.ops.special import log_I1
class SineBivariateVonMises(TorchDistribution):
r"""Unimodal distribution of two dependent angles on the 2-torus (S^1 ⨂ S^1) given by
.. math::
C^{-1}\exp(\kappa_1\cos(x-\mu_1) + \kappa_2\cos(x_2 -\mu_2) + \rho\sin(x_1 - \mu_1)\sin(x_2 - \mu_2))
and
.. math::
C = (2\pi)^2 \sum_{i=0} {2i \choose i}
\left(\frac{\rho^2}{4\kappa_1\kappa_2}\right)^i I_i(\kappa_1)I_i(\kappa_2),
where I_i(\cdot) is the modified bessel function of first kind, mu's are the locations of the distribution,
kappa's are the concentration and rho gives the correlation between angles x_1 and x_2.
This distribution is a submodel of the Bivariate von Mises distribution, called the Sine Distribution [2] in
directional statistics.
This distribution is helpful for modeling coupled angles such as torsion angles in peptide chains.
To infer parameters, use :class:`~pyro.infer.NUTS` or :class:`~pyro.infer.HMC` with priors that
avoid parameterizations where the distribution becomes bimodal; see note below.
.. note:: Sample efficiency drops as
.. math::
\frac{\rho}{\kappa_1\kappa_2} \rightarrow 1
because the distribution becomes increasingly bimodal.
.. note:: The correlation and weighted_correlation params are mutually exclusive.
.. note:: In the context of :class:`~pyro.infer.SVI`, this distribution can be used as a likelihood but not for
latent variables.
** References: **
1. Probabilistic model for two dependent circular variables Singh, H., Hnizdo, V., and Demchuck, E. (2002)
2. Protein Bioinformatics and Mixtures of Bivariate von Mises Distributions for Angular Data,
Mardia, K. V, Taylor, T. C., and Subramaniam, G. (2007)
:param torch.Tensor phi_loc: location of first angle
:param torch.Tensor psi_loc: location of second angle
:param torch.Tensor phi_concentration: concentration of first angle
:param torch.Tensor psi_concentration: concentration of second angle
:param torch.Tensor correlation: correlation between the two angles
:param torch.Tensor weighted_correlation: set correlation to weigthed_corr * sqrt(phi_conc*psi_conc)
to avoid bimodality (see note).
"""
arg_constraints = {
"phi_loc": constraints.real,
"psi_loc": constraints.real,
"phi_concentration": constraints.positive,
"psi_concentration": constraints.positive,
"correlation": constraints.real,
}
support = constraints.independent(constraints.real, 1)
max_sample_iter = 1000
def __init__(
self,
phi_loc,
psi_loc,
phi_concentration,
psi_concentration,
correlation=None,
weighted_correlation=None,
validate_args=None,
):
assert (correlation is None) != (weighted_correlation is None)
if weighted_correlation is not None:
sqrt_ = (
torch.sqrt if isinstance(phi_concentration, torch.Tensor) else math.sqrt
)
correlation = (
weighted_correlation * sqrt_(phi_concentration * psi_concentration)
+ 1e-8
)
(
phi_loc,
psi_loc,
phi_concentration,
psi_concentration,
correlation,
) = broadcast_all(
phi_loc, psi_loc, phi_concentration, psi_concentration, correlation
)
self.phi_loc = phi_loc
self.psi_loc = psi_loc
self.phi_concentration = phi_concentration
self.psi_concentration = psi_concentration
self.correlation = correlation
event_shape = torch.Size([2])
batch_shape = phi_loc.shape
super().__init__(batch_shape, event_shape, validate_args)
if self._validate_args and torch.any(
phi_concentration * psi_concentration <= correlation ** 2
):
warnings.warn(
f"{self.__class__.__name__} bimodal due to concentration-correlation relation, "
f"sampling will likely fail.",
UserWarning,
)
@lazy_property
def norm_const(self):
corr = self.correlation.view(1, -1) + 1e-8
conc = torch.stack(
(self.phi_concentration, self.psi_concentration), dim=-1
).view(-1, 2)
m = torch.arange(50, device=self.phi_loc.device).view(-1, 1)
fs = (
SineBivariateVonMises._lbinoms(m.max() + 1).view(-1, 1)
+ 2 * m * torch.log(corr)
- m * torch.log(4 * torch.prod(conc, dim=-1))
)
fs += log_I1(m.max(), conc, 51).sum(-1)
mfs = fs.max()
norm_const = 2 * torch.log(torch.tensor(2 * pi)) + mfs + (fs - mfs).logsumexp(0)
return norm_const.reshape(self.phi_loc.shape)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
indv = self.phi_concentration * torch.cos(
value[..., 0] - self.phi_loc
) + self.psi_concentration * torch.cos(value[..., 1] - self.psi_loc)
corr = (
self.correlation
* torch.sin(value[..., 0] - self.phi_loc)
* torch.sin(value[..., 1] - self.psi_loc)
)
return indv + corr - self.norm_const
def sample(self, sample_shape=torch.Size()):
"""
** References: **
1. A New Unified Approach for the Simulation of aWide Class of Directional Distributions
John T. Kent, Asaad M. Ganeiber & Kanti V. Mardia (2018)
"""
assert not torch._C._get_tracing_state(), "jit not supported"
sample_shape = torch.Size(sample_shape)
corr = self.correlation
conc = torch.stack((self.phi_concentration, self.psi_concentration))
eig = 0.5 * (conc[0] - corr ** 2 / conc[1])
eig = torch.stack((torch.zeros_like(eig), eig))
eigmin = torch.where(
eig[1] < 0, eig[1], torch.zeros_like(eig[1], dtype=eig.dtype)
)
eig = eig - eigmin
b0 = self._bfind(eig)
total = sample_shape.numel()
missing = total * torch.ones(
(self.batch_shape.numel(),), dtype=torch.int, device=conc.device
)
start = torch.zeros_like(missing, device=conc.device)
phi = torch.empty(
(2, *missing.shape, total), dtype=corr.dtype, device=conc.device
)
max_iter = SineBivariateVonMises.max_sample_iter
# flatten batch_shape
conc = conc.view(2, -1, 1)
eigmin = eigmin.view(-1, 1)
corr = corr.reshape(-1, 1)
eig = eig.view(2, -1)
b0 = b0.view(-1)
phi_den = log_I1(0, conc[1]).view(-1, 1)
lengths = torch.arange(total, device=conc.device).view(1, -1)
while torch.any(missing > 0) and max_iter:
curr_conc = conc[:, missing > 0, :]
curr_corr = corr[missing > 0]
curr_eig = eig[:, missing > 0]
curr_b0 = b0[missing > 0]
x = (
torch.distributions.Normal(0.0, torch.sqrt(1 + 2 * curr_eig / curr_b0))
.sample((missing[missing > 0].min(),))
.view(2, -1, missing[missing > 0].min())
)
x /= x.norm(dim=0)[None, ...] # Angular Central Gaussian distribution
lf = (
curr_conc[0] * (x[0] - 1)
+ eigmin[missing > 0]
+ log_I1(
0, torch.sqrt(curr_conc[1] ** 2 + (curr_corr * x[1]) ** 2)
).squeeze(0)
- phi_den[missing > 0]
)
assert lf.shape == ((missing > 0).sum(), missing[missing > 0].min())
lg_inv = (
1.0
- curr_b0.view(-1, 1) / 2
+ torch.log(
curr_b0.view(-1, 1) / 2 + (curr_eig.view(2, -1, 1) * x ** 2).sum(0)
)
)
assert lg_inv.shape == lf.shape
accepted = (
torch.distributions.Uniform(
0.0, torch.ones((), device=conc.device)
).sample(lf.shape)
< (lf + lg_inv).exp()
)
phi_mask = torch.zeros(
(*missing.shape, total), dtype=torch.bool, device=conc.device
)
phi_mask[missing > 0] = torch.logical_and(
lengths < (start[missing > 0] + accepted.sum(-1)).view(-1, 1),
lengths >= start[missing > 0].view(-1, 1),
)
phi[:, phi_mask] = x[:, accepted]
start[missing > 0] += accepted.sum(-1)
missing[missing > 0] -= accepted.sum(-1)
max_iter -= 1
if max_iter == 0 or torch.any(missing > 0):
raise ValueError(
"maximum number of iterations exceeded; "
"try increasing `SineBivariateVonMises.max_sample_iter`"
)
phi = torch.atan2(phi[1], phi[0])
alpha = torch.sqrt(conc[1] ** 2 + (corr * torch.sin(phi)) ** 2)
beta = torch.atan(corr / conc[1] * torch.sin(phi))
psi = VonMises(beta, alpha).sample()
phi_psi = torch.stack(
(
(phi + self.phi_loc.reshape((-1, 1)) + pi) % (2 * pi) - pi,
(psi + self.psi_loc.reshape((-1, 1)) + pi) % (2 * pi) - pi,
),
dim=-1,
).permute(1, 0, 2)
return phi_psi.reshape(*sample_shape, *self.batch_shape, *self.event_shape)
@property
def mean(self):
return torch.stack((self.phi_loc, self.psi_loc), dim=-1)
@classmethod
def infer_shapes(cls, **arg_shapes):
batch_shape = torch.Size(broadcast_shape(*arg_shapes.values()))
return batch_shape, torch.Size([2])
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(SineBivariateVonMises, _instance)
batch_shape = torch.Size(batch_shape)
for k in SineBivariateVonMises.arg_constraints.keys():
setattr(new, k, getattr(self, k).expand(batch_shape))
new.norm_const = self.norm_const.expand(batch_shape)
super(SineBivariateVonMises, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def _bfind(self, eig):
b = (
eig.shape[0]
/ 2
* torch.ones(self.batch_shape, dtype=eig.dtype, device=eig.device)
)
g1 = torch.sum(1 / (b + 2 * eig) ** 2, dim=0)
g2 = torch.sum(-2 / (b + 2 * eig) ** 3, dim=0)
return torch.where(eig.norm(0) != 0, b - g1 / g2, b)
@staticmethod
def _lbinoms(n):
ns = torch.arange(n, device=n.device)
num = torch.lgamma(2 * ns + 1)
den = torch.lgamma(ns + 1)
return num - 2 * den
|
uber/pyro
|
pyro/distributions/sine_bivariate_von_mises.py
|
Python
|
apache-2.0
| 11,333
|
[
"Gaussian"
] |
68c7c4a3c4cd8c6bb5d33f5b58417b3cea1b04cbb88d74207083281426a7f56d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
from collections import OrderedDict
from io import StringIO
import itertools
import re
import warnings
import numpy as np
import pandas as pd
from monty.json import MSONable
from ruamel.yaml import YAML
from six import string_types
from pymatgen.util.io_utils import clean_lines
from pymatgen.core.structure import SiteCollection
from pymatgen import Molecule, Element, Lattice, Structure
"""
This module implements a core class LammpsData for generating/parsing
LAMMPS data file, and other bridging classes to build LammpsData from
molecules.
Only point particle styles are supported for now (atom_style in angle,
atomic, bond, charge, full and molecular only). See the pages below for
more info.
http://lammps.sandia.gov/doc/atom_style.html
http://lammps.sandia.gov/doc/read_data.html
"""
__author__ = "Kiran Mathew, Zhi Deng"
__email__ = "kmathew@lbl.gov, z4deng@eng.ucsd.edu"
__credits__ = "Brandon Wood"
SECTION_KEYWORDS = {"atom": ["Atoms", "Velocities", "Masses",
"Ellipsoids", "Lines", "Triangles", "Bodies"],
"topology": ["Bonds", "Angles", "Dihedrals", "Impropers"],
"ff": ["Pair Coeffs", "PairIJ Coeffs", "Bond Coeffs",
"Angle Coeffs", "Dihedral Coeffs",
"Improper Coeffs"],
"class2": ["BondBond Coeffs", "BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs", "AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs", "AngleAngle Coeffs"]}
CLASS2_KEYWORDS = {"Angle Coeffs": ["BondBond Coeffs", "BondAngle Coeffs"],
"Dihedral Coeffs": ["MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs"],
"Improper Coeffs": ["AngleAngle Coeffs"]}
SECTION_HEADERS = {"Masses": ["mass"],
"Velocities": ["vx", "vy", "vz"],
"Bonds": ["type", "atom1", "atom2"],
"Angles": ["type", "atom1", "atom2", "atom3"],
"Dihedrals": ["type", "atom1", "atom2", "atom3", "atom4"],
"Impropers": ["type", "atom1", "atom2", "atom3", "atom4"]}
ATOMS_HEADERS = {"angle": ["molecule-ID", "type", "x", "y", "z"],
"atomic": ["type", "x", "y", "z"],
"bond": ["molecule-ID", "type", "x", "y", "z"],
"charge": ["type", "q", "x", "y", "z"],
"full": ["molecule-ID", "type", "q", "x", "y", "z"],
"molecular": ["molecule-ID", "type", "x", "y", "z"]}
class LammpsData(MSONable):
"""
Object for representing the data in a LAMMPS data file.
"""
def __init__(self, masses, atoms, box_bounds, box_tilt=None,
velocities=None, force_field=None, topology=None,
atom_style="full"):
"""
This is a low level constructor designed to work with parsed
data or other bridging objects (ForceField and Topology). Not
recommended to use directly.
Args:
masses (pandas.DataFrame): DataFrame with one column
["mass"] for Masses section.
atoms (pandas.DataFrame): DataFrame with multiple columns
for Atoms section. Column names vary with atom_style.
box_bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
box_tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
velocities (pandas.DataFrame): DataFrame with three columns
["vx", "vy", "vz"] for Velocities section. Optional
with default to None. If not None, its index should be
consistent with atoms.
force_field (dict): Data for force field sections. Optional
with default to None. Only keywords in force field and
class 2 force field are valid keys, and each value is a
DataFrame.
topology (dict): Data for topology sections. Optional with
default to None. Only keywords in topology are valid
keys, and each value is a DataFrame.
atom_style (str): Output atom_style. Default to "full".
"""
bounds_arr = np.array(box_bounds)
bounds_shape = bounds_arr.shape
assert bounds_shape == (3, 2), \
"Expecting a (3, 2) array for box_bounds," \
" got {}".format(bounds_shape)
box_bounds = bounds_arr.tolist()
if box_tilt is not None:
tilt_arr = np.array(box_tilt)
tilt_shape = tilt_arr.shape
assert tilt_shape == (3,),\
"Expecting a (3,) array for box_tilt," \
" got {}".format(tilt_shape)
box_tilt = tilt_arr.tolist()
if velocities is not None:
assert len(velocities) == len(atoms),\
"Inconsistency found between atoms and velocities"
if force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
force_field = {k: v for k, v in force_field.items()
if k in all_ff_kws}
if topology:
topology = {k: v for k, v in topology.items()
if k in SECTION_KEYWORDS["topology"]}
self.masses = masses
self.atoms = atoms
self.box_bounds = box_bounds
self.box_tilt = box_tilt
self.velocities = velocities
self.force_field = force_field
self.topology = topology
self.atom_style = atom_style
def __str__(self):
return self.get_string()
@property
def structure(self):
"""
Export a periodic structure object representing the simulation box.
Return:
A pymatgen structure object
"""
masses = self.masses
atoms = self.atoms
atoms["molecule-ID"] = 1
box_bounds = np.array(self.box_bounds)
box_tilt = self.box_tilt if self.box_tilt else [0.0] * 3
ld_copy = self.__class__(masses, atoms, box_bounds, box_tilt)
_, topologies = ld_copy.disassemble()
molecule = topologies[0].sites
coords = molecule.cart_coords - box_bounds[:, 0]
species = molecule.species
matrix = np.diag(box_bounds[:, 1] - box_bounds[:, 0])
matrix[1, 0] = box_tilt[0]
matrix[2, 0] = box_tilt[1]
matrix[2, 1] = box_tilt[2]
latt = Lattice(matrix)
site_properties = None if self.velocities is None \
else {"velocities": self.velocities.values}
return Structure(latt, species, coords, coords_are_cartesian=True,
site_properties=site_properties)
def get_string(self, distance=6, velocity=8, charge=3):
"""
Returns the string representation of LammpsData, essentially
the string to be written to a file.
Args:
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 3.
Returns:
String representation
"""
file_template = """Generated by pymatgen.io.lammps.data.LammpsData
{stats}
{box}
{body}
"""
box_ph = "{:.%df}" % distance
box_lines = []
for bound, d in zip(self.box_bounds, "xyz"):
fillers = bound + [d] * 2
bound_format = " ".join([box_ph] * 2 + [" {}lo {}hi"])
box_lines.append(bound_format.format(*fillers))
if self.box_tilt:
tilt_format = " ".join([box_ph] * 3 + [" xy xz yz"])
box_lines.append(tilt_format.format(*self.box_tilt))
box = "\n".join(box_lines)
body_dict = OrderedDict()
body_dict["Masses"] = self.masses
types = OrderedDict()
types["atom"] = len(self.masses)
if self.force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
ff_kws = [k for k in all_ff_kws if k in self.force_field]
for kw in ff_kws:
body_dict[kw] = self.force_field[kw]
if kw in SECTION_KEYWORDS["ff"][2:]:
types[kw.lower()[:-7]] = len(self.force_field[kw])
body_dict["Atoms"] = self.atoms
counts = OrderedDict()
counts["atoms"] = len(self.atoms)
if self.velocities is not None:
body_dict["Velocities"] = self.velocities
if self.topology:
for kw in SECTION_KEYWORDS["topology"]:
if kw in self.topology:
body_dict[kw] = self.topology[kw]
counts[kw.lower()] = len(self.topology[kw])
all_stats = list(counts.values()) + list(types.values())
stats_template = "{:>%d} {}" % len(str(max(all_stats)))
count_lines = [stats_template.format(v, k) for k, v in counts.items()]
type_lines = [stats_template.format(v, k + " types")
for k, v in types.items()]
stats = "\n".join(count_lines + [""] + type_lines)
map_coords = lambda q: ("{:.%df}" % distance).format(q)
map_velos = lambda q: ("{:.%df}" % velocity).format(q)
map_charges = lambda q: ("{:.%df}" % charge).format(q)
formatters = {"x": map_coords, "y": map_coords, "z": map_coords,
"vx": map_velos, "vy": map_velos, "vz": map_velos,
"q": map_charges}
section_template = "{kw}\n\n{df}\n"
parts = []
for k, v in body_dict.items():
index = True if k != "PairIJ Coeffs" else False
df_string = v.to_string(header=False, formatters=formatters,
index_names=False, index=index)
parts.append(section_template.format(kw=k, df=df_string))
body = "\n".join(parts)
return file_template.format(stats=stats, box=box, body=body)
def write_file(self, filename, distance=6, velocity=8, charge=3):
"""
Writes LammpsData to file.
Args:
filename (str): Filename.
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 3.
"""
with open(filename, "w") as f:
f.write(self.get_string(distance=distance, velocity=velocity,
charge=charge))
def disassemble(self, atom_labels=None, guess_element=True,
ff_label="ff_map"):
"""
Breaks down LammpsData to ForceField and a series of Topology.
RESTRICTIONS APPLIED:
1. No complex force field defined not just on atom
types, where the same type or equivalent types of topology
may have more than one set of coefficients.
2. No intermolecular topologies (with atoms from different
molecule-ID) since a Topology object includes data for ONE
molecule or structure only.
Args:
atom_labels ([str]): List of strings (must be different
from one another) for labelling each atom type found in
Masses section. Default to None, where the labels are
automaticaly added based on either element guess or
dummy specie assignment.
guess_element (bool): Whether to guess the element based on
its atomic mass. Default to True, otherwise dummy
species "Qa", "Qb", ... will be assigned to various
atom types. The guessed or assigned elements will be
reflected on atom labels if atom_labels is None, as
well as on the species of molecule in each Topology.
ff_label (str): Site property key for labeling atoms of
different types. Default to "ff_map".
Returns:
ForceField, [Topology]
"""
atoms_df = self.atoms.copy()
if "nx" in atoms_df.columns:
box_dim = np.ptp(self.box_bounds, axis=1)
atoms_df[["x", "y", "z"]] += atoms_df[["nx", "ny", "nz"]].values \
* box_dim
atoms_df = pd.concat([atoms_df, self.velocities], axis=1)
mids = atoms_df.get("molecule-ID")
if mids is None:
unique_mids = [1]
data_by_mols = {1: {"Atoms": atoms_df}}
else:
unique_mids = np.unique(mids)
data_by_mols = {}
for k in unique_mids:
df = atoms_df[atoms_df["molecule-ID"] == k]
data_by_mols[k] = {"Atoms": df}
masses = self.masses.copy()
masses["label"] = atom_labels
unique_masses = np.unique(masses["mass"])
if guess_element:
ref_masses = sorted([el.atomic_mass.real for el in Element])
diff = np.abs(np.array(ref_masses) - unique_masses[:, None])
atomic_numbers = np.argmin(diff, axis=1) + 1
symbols = [Element.from_Z(an).symbol for an in atomic_numbers]
else:
symbols = ["Q%s" % a for a in
map(chr, range(97, 97 + len(unique_masses)))]
for um, s in zip(unique_masses, symbols):
masses.loc[masses["mass"] == um, "element"] = s
if atom_labels is None: # add unique labels based on elements
for el, vc in masses["element"].value_counts().iteritems():
masses.loc[masses["element"] == el, "label"] = \
["%s%d" % (el, c) for c in range(1, vc + 1)]
assert masses["label"].nunique(dropna=False) == len(masses), \
"Expecting unique atom label for each type"
mass_info = [tuple([r["label"], r["mass"]])
for _, r in masses.iterrows()]
nonbond_coeffs, topo_coeffs = None, None
if self.force_field:
if "PairIJ Coeffs" in self.force_field:
nbc = self.force_field["PairIJ Coeffs"]
nbc = nbc.sort_values(["id1", "id2"]).drop(["id1", "id2"], axis=1)
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
elif "Pair Coeffs" in self.force_field:
nbc = self.force_field["Pair Coeffs"].sort_index()
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
topo_coeffs = {k: [] for k in SECTION_KEYWORDS["ff"][2:]
if k in self.force_field}
for kw in topo_coeffs.keys():
class2_coeffs = {k: list(v.itertuples(False, None))
for k, v in self.force_field.items()
if k in CLASS2_KEYWORDS.get(kw, [])}
ff_df = self.force_field[kw]
for t in ff_df.itertuples(True, None):
d = {"coeffs": list(t[1:]), "types": []}
if class2_coeffs:
d.update({k: list(v[t[0] - 1])
for k, v in class2_coeffs.items()})
topo_coeffs[kw].append(d)
if self.topology:
label_topo = lambda t: tuple(masses.loc[atoms_df.loc[t, "type"],
"label"])
for k, v in self.topology.items():
ff_kw = k[:-1] + " Coeffs"
for topo in v.itertuples(False, None):
topo_idx = topo[0] - 1
indices = topo[1:]
mids = atoms_df.loc[indices, "molecule-ID"].unique()
assert len(mids) == 1, \
"Do not support intermolecular topology formed " \
"by atoms with different molecule-IDs"
label = label_topo(indices)
topo_coeffs[ff_kw][topo_idx]["types"].append(label)
if data_by_mols[mids[0]].get(k):
data_by_mols[mids[0]][k].append(indices)
else:
data_by_mols[mids[0]][k] = [indices]
if topo_coeffs:
for v in topo_coeffs.values():
for d in v:
d["types"] = list(set(d["types"]))
ff = ForceField(mass_info=mass_info, nonbond_coeffs=nonbond_coeffs,
topo_coeffs=topo_coeffs)
topo_list = []
for mid in unique_mids:
data = data_by_mols[mid]
atoms = data["Atoms"]
shift = min(atoms.index)
type_ids = atoms["type"]
species = masses.loc[type_ids, "element"]
labels = masses.loc[type_ids, "label"]
coords = atoms[["x", "y", "z"]]
m = Molecule(species.values, coords.values,
site_properties={ff_label: labels.values})
charges = atoms.get("q")
velocities = atoms[["vx", "vy", "vz"]] if "vx" in atoms.columns \
else None
topologies = {}
for kw in SECTION_KEYWORDS["topology"]:
if data.get(kw):
topologies[kw] = (np.array(data[kw]) - shift).tolist()
topologies = None if not topologies else topologies
topo_list.append(Topology(sites=m, ff_label=ff_label,
charges=charges, velocities=velocities,
topologies=topologies))
return ff, topo_list
@classmethod
def from_file(cls, filename, atom_style="full", sort_id=False):
"""
Constructor that parses a file.
Args:
filename (str): Filename to read.
atom_style (str): Associated atom_style. Default to "full".
sort_id (bool): Whether sort each section by id. Default to
True.
"""
with open(filename) as f:
lines = f.readlines()
kw_pattern = r"|".join(itertools.chain(*SECTION_KEYWORDS.values()))
section_marks = [i for i, l in enumerate(lines)
if re.search(kw_pattern, l)]
parts = np.split(lines, section_marks)
float_group = r"([0-9eE.+-]+)"
header_pattern = dict()
header_pattern["counts"] = r"^\s*(\d+)\s+([a-zA-Z]+)$"
header_pattern["types"] = r"^\s*(\d+)\s+([a-zA-Z]+)\s+types$"
header_pattern["bounds"] = r"^\s*{}$".format(r"\s+".join(
[float_group] * 2 + [r"([xyz])lo \3hi"]))
header_pattern["tilt"] = r"^\s*{}$".format(r"\s+".join(
[float_group] * 3 + ["xy xz yz"]))
header = {"counts": {}, "types": {}}
bounds = {}
for l in clean_lines(parts[0][1:]): # skip the 1st line
match = None
for k, v in header_pattern.items():
match = re.match(v, l)
if match:
break
else:
continue
if match and k in ["counts", "types"]:
header[k][match.group(2)] = int(match.group(1))
elif match and k == "bounds":
g = match.groups()
bounds[g[2]] = [float(i) for i in g[:2]]
elif match and k == "tilt":
header["tilt"] = [float(i) for i in match.groups()]
header["bounds"] = [bounds.get(i, [-0.5, 0.5]) for i in "xyz"]
def parse_section(sec_lines):
title_info = sec_lines[0].split("#", 1)
kw = title_info[0].strip()
sio = StringIO("".join(sec_lines[2:])) # skip the 2nd line
df = pd.read_csv(sio, header=None, comment="#",
delim_whitespace=True)
if kw.endswith("Coeffs") and not kw.startswith("PairIJ"):
names = ["id"] + ["coeff%d" % i
for i in range(1, df.shape[1])]
elif kw == "PairIJ Coeffs":
names = ["id1", "id2"] + ["coeff%d" % i
for i in range(1, df.shape[1] - 1)]
df.index.name = None
elif kw in SECTION_HEADERS:
names = ["id"] + SECTION_HEADERS[kw]
elif kw == "Atoms":
names = ["id"] + ATOMS_HEADERS[atom_style]
if df.shape[1] == len(names):
pass
elif df.shape[1] == len(names) + 3:
names += ["nx", "ny", "nz"]
else:
raise ValueError("Format in Atoms section inconsistent"
" with atom_style %s" % atom_style)
else:
raise NotImplementedError("Parser for %s section"
" not implemented" % kw)
df.columns = names
if sort_id:
sort_by = "id" if kw != "PairIJ Coeffs" else ["id1", "id2"]
df.sort_values(sort_by, inplace=True)
if "id" in df.columns:
df.set_index("id", drop=True, inplace=True)
df.index.name = None
return kw, df
err_msg = "Bad LAMMPS data format where "
body = {}
seen_atoms = False
for part in parts[1:]:
name, section = parse_section(part)
if name == "Atoms":
seen_atoms = True
if name in ["Velocities"] + SECTION_KEYWORDS["topology"] and \
not seen_atoms: # Atoms must appear earlier than these
raise RuntimeError(err_msg + "%s section appears before"
" Atoms section" % name)
body.update({name: section})
err_msg += "Nos. of {} do not match between header and {} section"
assert len(body["Masses"]) == header["types"]["atom"], \
err_msg.format("atom types", "Masses")
atom_sections = ["Atoms", "Velocities"] \
if "Velocities" in body else ["Atoms"]
for s in atom_sections:
assert len(body[s]) == header["counts"]["atoms"], \
err_msg.format("atoms", s)
for s in SECTION_KEYWORDS["topology"]:
if header["counts"].get(s.lower(), 0) > 0:
assert len(body[s]) == header["counts"][s.lower()], \
err_msg.format(s.lower(), s)
items = {k.lower(): body[k] for k in ["Masses", "Atoms"]}
items["box_bounds"] = header["bounds"]
items["box_tilt"] = header.get("tilt")
items["velocities"] = body.get("Velocities")
ff_kws = [k for k in body if k
in SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]]
items["force_field"] = {k: body[k] for k in ff_kws} if ff_kws \
else None
topo_kws = [k for k in body if k in SECTION_KEYWORDS["topology"]]
items["topology"] = {k: body[k] for k in topo_kws} \
if topo_kws else None
items["atom_style"] = atom_style
return cls(**items)
@classmethod
def from_ff_and_topologies(cls, ff, topologies, box_bounds, box_tilt=None,
atom_style="full"):
"""
Constructor building LammpsData from a ForceField object and a
list of Topology objects. Do not support intermolecular
topologies since a Topology object includes data for ONE
molecule or structure only.
Args:
ff (ForceField): ForceField object with data for Masses and
force field sections.
topologies ([Topology]): List of Topology objects with data
for Atoms, Velocities and topology sections.
box_bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
box_tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
atom_style (str): Output atom_style. Default to "full".
"""
atom_types = set.union(*[t.species for t in topologies])
assert atom_types.issubset(ff.maps["Atoms"].keys()),\
"Unknown atom type found in topologies"
items = dict(box_bounds=box_bounds, box_tilt=box_tilt,
atom_style=atom_style, masses=ff.masses,
force_field=ff.force_field)
mol_ids, charges, coords, labels = [], [], [], []
v_collector = [] if topologies[0].velocities else None
topo_collector = {"Bonds": [], "Angles": [], "Dihedrals": [],
"Impropers": []}
topo_labels = {"Bonds": [], "Angles": [], "Dihedrals": [],
"Impropers": []}
for i, topo in enumerate(topologies):
if topo.topologies:
shift = len(labels)
for k, v in topo.topologies.items():
topo_collector[k].append(np.array(v) + shift + 1)
topo_labels[k].extend([tuple([topo.type_by_sites[j]
for j in t]) for t in v])
if isinstance(v_collector, list):
v_collector.append(topo.velocities)
mol_ids.extend([i + 1] * len(topo.sites))
labels.extend(topo.type_by_sites)
coords.append(topo.sites.cart_coords)
q = [0.0] * len(topo.sites) if not topo.charges else topo.charges
charges.extend(q)
atoms = pd.DataFrame(np.concatenate(coords), columns=["x", "y", "z"])
atoms["molecule-ID"] = mol_ids
atoms["q"] = charges
atoms["type"] = list(map(ff.maps["Atoms"].get, labels))
atoms.index += 1
atoms = atoms[ATOMS_HEADERS[atom_style]]
velocities = None
if v_collector:
velocities = pd.DataFrame(np.concatenate(v_collector),
columns=SECTION_HEADERS["Velocities"])
velocities.index += 1
topology = {k: None for k, v in topo_labels.items() if len(v) > 0}
for k in topology:
df = pd.DataFrame(np.concatenate(topo_collector[k]),
columns=SECTION_HEADERS[k][1:])
df["type"] = list(map(ff.maps[k].get, topo_labels[k]))
if any(pd.isnull(df["type"])): # Throw away undefined topologies
warnings.warn("Undefined %s detected and removed" % k.lower())
df.dropna(subset=["type"], inplace=True)
df.reset_index(drop=True, inplace=True)
df.index += 1
topology[k] = df[SECTION_HEADERS[k]]
topology = {k: v for k, v in topology.items() if not v.empty}
items.update({"atoms": atoms, "velocities": velocities,
"topology": topology})
return cls(**items)
@classmethod
def from_dict(cls, d):
decode_df = lambda s: pd.read_json(s, orient="split")
items = dict()
items["masses"] = decode_df(d["masses"])
items["atoms"] = decode_df(d["atoms"])
items["box_bounds"] = d["box_bounds"]
items["box_tilt"] = d["box_tilt"]
items["atom_style"] = d["atom_style"]
velocities = d["velocities"]
if velocities:
velocities = decode_df(velocities)
items["velocities"] = velocities
force_field = d["force_field"]
if force_field:
force_field = {k: decode_df(v) for k, v in force_field.items()}
items["force_field"] = force_field
topology = d["topology"]
if topology:
topology = {k: decode_df(v) for k, v in topology.items()}
items["topology"] = topology
return cls(**items)
def as_dict(self):
encode_df = lambda df: df.to_json(orient="split")
d = dict()
d["@module"] = self.__class__.__module__
d["class"] = self.__class__.__name__
d["masses"] = encode_df(self.masses)
d["atoms"] = encode_df(self.atoms)
d["box_bounds"] = self.box_bounds
d["box_tilt"] = self.box_tilt
d["atom_style"] = self.atom_style
d["velocities"] = None if self.velocities is None \
else encode_df(self.velocities)
d["force_field"] = None if not self.force_field \
else {k: encode_df(v) for k, v in self.force_field.items()}
d["topology"] = None if not self.topology \
else {k: encode_df(v) for k, v in self.topology.items()}
return d
class Topology(MSONable):
"""
Class carrying most data in Atoms, Velocities and molecular
topology sections for ONE SINGLE Molecule or Structure
object, or a plain list of Sites.
"""
def __init__(self, sites, ff_label=None, charges=None, velocities=None,
topologies=None):
"""
Args:
sites ([Site] or SiteCollection): A group of sites in a
list or as a Molecule/Structure.
ff_label (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
topologies (dict): Bonds, angles, dihedrals and improper
dihedrals defined by site indices. Default to None,
i.e., no additional topology. All four valid keys
listed below are optional.
{
"Bonds": [[i, j], ...],
"Angles": [[i, j, k], ...],
"Dihedrals": [[i, j, k, l], ...],
"Impropers": [[i, j, k, l], ...]
}
"""
if not isinstance(sites, SiteCollection):
sites = Molecule.from_sites(sites)
if ff_label:
type_by_sites = sites.site_properties.get(ff_label)
else:
type_by_sites = [site.species_string for site in sites]
# search for site property if not override
if charges is None:
charges = sites.site_properties.get("charge")
if velocities is None:
velocities = sites.site_properties.get("velocities")
# validate shape
if charges is not None:
charge_arr = np.array(charges)
assert charge_arr.shape == (len(sites),),\
"Wrong format for charges"
charges = charge_arr.tolist()
if velocities is not None:
velocities_arr = np.array(velocities)
assert velocities_arr.shape == (len(sites), 3), \
"Wrong format for velocities"
velocities = velocities_arr.tolist()
if topologies:
topologies = {k: v for k, v in topologies.items()
if k in SECTION_KEYWORDS["topology"]}
self.sites = sites
self.ff_label = ff_label
self.charges = charges
self.velocities = velocities
self.topologies = topologies
self.type_by_sites = type_by_sites
self.species = set(type_by_sites)
@classmethod
def from_bonding(cls, molecule, bond=True, angle=True, dihedral=True,
ff_label=None, charges=None, velocities=None, tol=0.1):
"""
Another constructor that creates an instance from a molecule.
Covalent bonds and other bond-based topologies (angles and
dihedrals) can be automatically determined. Cannot be used for
non bond-based topologies, e.g., improper dihedrals.
Args:
molecule (Molecule): Input molecule.
bond (bool): Whether find bonds. If set to False, angle and
dihedral searching will be skipped. Default to True.
angle (bool): Whether find angles. Default to True.
dihedral (bool): Whether find dihedrals. Default to True.
ff_label (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
tol (float): Bond distance tolerance. Default to 0.1.
Not recommended to alter.
"""
real_bonds = molecule.get_covalent_bonds(tol=tol)
bond_list = [list(map(molecule.index, [b.site1, b.site2]))
for b in real_bonds]
if not all((bond, bond_list)):
# do not search for others if not searching for bonds or no bonds
return cls(sites=molecule, ff_label=ff_label, charges=charges,
velocities=velocities)
else:
angle_list, dihedral_list = [], []
dests, freq = np.unique(bond_list, return_counts=True)
hubs = dests[np.where(freq > 1)]
bond_arr = np.array(bond_list)
if len(hubs) > 0:
hub_spokes = {}
for hub in hubs:
ix = np.any(np.isin(bond_arr, hub), axis=1)
bonds = list(np.unique(bond_arr[ix]))
bonds.remove(hub)
hub_spokes[hub] = bonds
# skip angle or dihedral searching if too few bonds or hubs
dihedral = False if len(bond_list) < 3 or len(hubs) < 2 \
else dihedral
angle = False if len(bond_list) < 2 or len(hubs) < 1 else angle
if angle:
for k, v in hub_spokes.items():
angle_list.extend([[i, k, j] for i, j in
itertools.combinations(v, 2)])
if dihedral:
hub_cons = bond_arr[np.all(np.isin(bond_arr, hubs), axis=1)]
for i, j in hub_cons:
ks = [k for k in hub_spokes[i] if k != j]
ls = [l for l in hub_spokes[j] if l != i]
dihedral_list.extend([[k, i, j, l] for k,l in
itertools.product(ks, ls)
if k != l])
topologies = {k: v for k, v
in zip(SECTION_KEYWORDS["topology"][:3],
[bond_list, angle_list, dihedral_list])
if len(v) > 0}
topologies = None if len(topologies) == 0 else topologies
return cls(sites=molecule, ff_label=ff_label, charges=charges,
velocities=velocities, topologies=topologies)
class ForceField(MSONable):
"""
Class carrying most data in Masses and force field sections.
Attributes:
masses (pandas.DataFrame): DataFrame for Masses section.
force_field (dict): Force field section keywords (keys) and
data (values) as DataFrames.
maps (dict): Dict for labeling atoms and topologies.
"""
_is_valid = lambda self, df: not pd.isnull(df).values.any()
def __init__(self, mass_info, nonbond_coeffs=None, topo_coeffs=None):
"""
Args:
mass_into (list): List of atomic mass info. Elements,
strings (symbols) and floats are all acceptable for the
values, with the first two converted to the atomic mass
of an element. It is recommended to use
OrderedDict.items() to prevent key duplications.
[("C", 12.01), ("H", Element("H")), ("O", "O"), ...]
nonbond_coeffs [coeffs]: List of pair or pairij
coefficients, of which the sequence must be sorted
according to the species in mass_dict. Pair or PairIJ
determined by the length of list. Optional with default
to None.
topo_coeffs (dict): Dict with force field coefficients for
molecular topologies. Optional with default
to None. All four valid keys listed below are optional.
Each value is a list of dicts with non optional keys
"coeffs" and "types", and related class2 force field
keywords as optional keys.
{
"Bond Coeffs":
[{"coeffs": [coeff],
"types": [("C", "C"), ...]}, ...],
"Angle Coeffs":
[{"coeffs": [coeff],
"BondBond Coeffs": [coeff],
"types": [("H", "C", "H"), ...]}, ...],
"Dihedral Coeffs":
[{"coeffs": [coeff],
"BondBond13 Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
"Improper Coeffs":
[{"coeffs": [coeff],
"AngleAngle Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
}
Topology of same type or equivalent types (e.g.,
("C", "H") and ("H", "C") bonds) are NOT ALLOWED to
be defined MORE THAN ONCE with DIFFERENT coefficients.
"""
map_mass = lambda v: v.atomic_mass.real if isinstance(v, Element) \
else Element(v).atomic_mass.real if isinstance(v, string_types) \
else v
index, masses, self.mass_info, atoms_map = [], [], [], {}
for i, m in enumerate(mass_info):
index.append(i + 1)
mass = map_mass(m[1])
masses.append(mass)
self.mass_info.append((m[0], mass))
atoms_map[m[0]] = i + 1
self.masses = pd.DataFrame({"mass": masses}, index=index)
self.maps = {"Atoms": atoms_map}
ff_dfs = {}
self.nonbond_coeffs = nonbond_coeffs
if self.nonbond_coeffs:
ff_dfs.update(self._process_nonbond())
self.topo_coeffs = topo_coeffs
if self.topo_coeffs:
self.topo_coeffs = {k: v for k, v in self.topo_coeffs.items()
if k in SECTION_KEYWORDS["ff"][2:]}
for k in self.topo_coeffs.keys():
coeffs, mapper = self._process_topo(k)
ff_dfs.update(coeffs)
self.maps.update(mapper)
self.force_field = None if len(ff_dfs) == 0 else ff_dfs
def _process_nonbond(self):
pair_df = pd.DataFrame(self.nonbond_coeffs)
assert self._is_valid(pair_df), \
"Invalid nonbond coefficients with rows varying in length"
npair, ncoeff = pair_df.shape
pair_df.columns = ["coeff%d" % i for i in range(1, ncoeff + 1)]
nm = len(self.mass_info)
ncomb = int(nm * (nm + 1) / 2)
if npair == nm:
kw = "Pair Coeffs"
pair_df.index = range(1, nm + 1)
elif npair == ncomb:
kw = "PairIJ Coeffs"
ids = list(itertools.
combinations_with_replacement(range(1, nm + 1), 2))
id_df = pd.DataFrame(ids, columns=["id1", "id2"])
pair_df = pd.concat([id_df, pair_df], axis=1)
else:
raise ValueError("Expecting {} Pair Coeffs or "
"{} PairIJ Coeffs for {} atom types,"
" got {}".format(nm, ncomb, nm, npair))
return {kw: pair_df}
def _process_topo(self, kw):
def find_eq_types(label, section):
if section.startswith("Improper"):
label_arr = np.array(label)
seqs = [[0, 1, 2, 3], [0, 2, 1, 3],
[3, 1, 2, 0], [3, 2, 1, 0]]
return [tuple(label_arr[s]) for s in seqs]
else:
return [label] + [label[::-1]]
main_data, distinct_types = [], []
class2_data = {k: [] for k in self.topo_coeffs[kw][0].keys()
if k in CLASS2_KEYWORDS.get(kw, [])}
for i, d in enumerate(self.topo_coeffs[kw]):
main_data.append(d["coeffs"])
distinct_types.append(d["types"])
for k in class2_data.keys():
class2_data[k].append(d[k])
distinct_types = [set(itertools.
chain(*[find_eq_types(t, kw)
for t in dt])) for dt in distinct_types]
type_counts = sum([len(dt) for dt in distinct_types])
type_union = set.union(*distinct_types)
assert len(type_union) == type_counts, "Duplicated items found " \
"under different coefficients in %s" % kw
atoms = set(np.ravel(list(itertools.chain(*distinct_types))))
assert atoms.issubset(self.maps["Atoms"].keys()), \
"Undefined atom type found in %s" % kw
mapper = {}
for i, dt in enumerate(distinct_types):
for t in dt:
mapper[t] = i + 1
def process_data(data):
df = pd.DataFrame(data)
assert self._is_valid(df),\
"Invalid coefficients with rows varying in length"
n, c = df.shape
df.columns = ["coeff%d" % i for i in range(1, c + 1)]
df.index = range(1, n + 1)
return df
all_data = {kw: process_data(main_data)}
if class2_data:
all_data.update({k: process_data(v) for k, v
in class2_data.items()})
return all_data, {kw[:-7] + "s": mapper}
def to_file(self, filename):
"""
Saves object to a file in YAML format.
Args:
filename (str): Filename.
"""
d = {"mass_info": self.mass_info,
"nonbond_coeffs": self.nonbond_coeffs,
"topo_coeffs": self.topo_coeffs}
yaml = YAML(typ="safe")
with open(filename, "w") as f:
yaml.dump(d, f)
@classmethod
def from_file(cls, filename):
"""
Constructor that reads in a file in YAML format.
Args:
filename (str): Filename.
"""
yaml = YAML(typ="safe")
with open(filename, "r") as f:
d = yaml.load(f)
return cls.from_dict(d)
@classmethod
def from_dict(cls, d):
d["mass_info"] = [tuple(m) for m in d["mass_info"]]
if d.get("topo_coeffs"):
for v in d["topo_coeffs"].values():
for c in v:
c["types"] = [tuple(t) for t in c["types"]]
return cls(d["mass_info"], d["nonbond_coeffs"], d["topo_coeffs"])
def structure_2_lmpdata(structure, ff_elements=None, atom_style="charge"):
"""
Converts a structure to a LammpsData object with no force field
parameters and topologies.
Args:
structure (Structure): Input structure.
ff_elements ([str]): List of strings of elements that must be
present due to force field settings but not necessarily in
the structure. Default to None.
atom_style (str): Choose between "atomic" (neutral) and
"charge" (charged). Default to "charge".
Returns:
LammpsData
"""
s = structure.get_sorted_structure()
a, b, c = s.lattice.abc
m = s.lattice.matrix
xhi = a
xy = np.dot(m[1], m[0] / xhi)
yhi = np.sqrt(b ** 2 - xy ** 2)
xz = np.dot(m[2], m[0] / xhi)
yz = (np.dot(m[1], m[2]) - xy * xz) / yhi
zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2)
box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]]
box_tilt = [xy, xz, yz]
box_tilt = None if not any(box_tilt) else box_tilt
new_latt = Lattice([[xhi, 0, 0], [xy, yhi, 0], [xz, yz, zhi]])
s.modify_lattice(new_latt)
symbols = list(s.symbol_set)
if ff_elements:
symbols.extend(ff_elements)
elements = sorted(Element(el) for el in set(symbols))
mass_info = [tuple([i.symbol] * 2) for i in elements]
ff = ForceField(mass_info)
topo = Topology(s)
return LammpsData.from_ff_and_topologies(ff=ff, topologies=[topo],
box_bounds=box_bounds,
box_tilt=box_tilt,
atom_style=atom_style)
|
johnson1228/pymatgen
|
pymatgen/io/lammps/data.py
|
Python
|
mit
| 45,831
|
[
"LAMMPS",
"pymatgen"
] |
7092db4e675761e7580ba99e34265d2bbd1d094c609d18e865008e876f5e3624
|
import logging
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter('{levelname}:{name}:{msg}', style='{'))
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
root_logger.addHandler(handler)
from fluggo.media import process, libav, x264, matroska, faac
import sys
import datetime
import struct
import fractions
import collections
import math
process.enable_glib_logging(True)
if not process.check_context_supported():
print("Sorry, your drivers don't support the minimum")
print("requirements for this library. Consider using a")
print("software driver.")
exit()
packet_source = libav.AVDemuxer(sys.argv[1], 0)
dv_decoder = libav.AVVideoDecoder(packet_source, 'dvvideo')
dv_reconstruct = process.DVReconstructionFilter(dv_decoder)
mpeg2_subsample = process.MPEG2SubsampleFilter(dv_reconstruct)
audio_packet_source = libav.AVDemuxer(sys.argv[1], 1)
audio_decoder = libav.AVAudioDecoder(audio_packet_source, 'pcm_s16le', 2)
params = x264.X264EncoderParams(preset='ultrafast', width=720, height=480,
frame_rate=fractions.Fraction(30000, 1001), constant_ratefactor=23.0,
sample_aspect_ratio=fractions.Fraction(10, 11), annex_b=False, repeat_headers=False, interlaced=True)
encoder = x264.X264VideoEncoder(mpeg2_subsample, 0, 1000, params)
with open('test.mkv', mode='wb') as myfile:
writer = matroska.MatroskaWriter(myfile)
# Matroska test writing; much of this is based on the x264 Matroska muxer
ns = 1000000000
timescale = 1000000
frame_rate = fractions.Fraction(30000, 1001)
sar = 40.0 / 33.0
writer.write_start(
writing_app='Brian\'s test muxer',
duration=0.0,
timecode_scale=timescale)
# You want a rhyme or reason for this, ask the x264 devs
private = bytearray()
sps = encoder.sps[4:]
pps = encoder.pps[4:]
private.append(1)
private.extend(sps[1:4])
private.extend(b'\xFF\xE1') # One SPS
private.extend(len(sps).to_bytes(2, byteorder='big'))
private.extend(sps)
private.append(1) # One PPS
private.extend(len(pps).to_bytes(2, byteorder='big'))
private.extend(pps)
video_track = matroska.Track(
number=1,
uid=1,
type_=matroska.TrackType.VIDEO,
codec_id='V_MPEG4/ISO/AVC',
codec_private=private,
lacing=False,
default_duration_ns=int(float(ns) / float(frame_rate)),
video=matroska.TrackVideo(720, 480,
interlaced=True,
display_width=int(round(720 * sar)),
display_height=480,
display_unit=matroska.DisplayUnit.PIXELS))
writer.write_tracks([video_track])
# Time to actually code stuff!
cluster = None
cluster_time = 0
cluster_size = 0
first_frame = True
frames_written = 0
try:
packet = encoder.get_next_packet()
while packet:
print('Writing packet pts {0} dts {1}'.format(packet.pts, packet.dts))
scaled_pts = (packet.pts * ns * frame_rate.denominator) // (frame_rate.numerator * timescale)
data = packet.data
# Stick the SEI in the first frame
if first_frame:
data = encoder.sei + packet.data
first_frame = False
# Write the block
# Note that if we know which frames are B-frames, we can set skippable
writer.write_simple_block(1, scaled_pts, data, keyframe=packet.keyframe)
frames_written += 1
packet = encoder.get_next_packet()
finally:
writer.write_end(duration=float(ns * frames_written)/(float(frame_rate) * float(timescale)))
|
fluggo/Canvas
|
scripts/encode_x264_mkv.py
|
Python
|
gpl-3.0
| 3,696
|
[
"Brian"
] |
1db5df1eb7882f4ec480610ea7b9105cd2b3596de9f46d8c17f59afdd1a9f832
|
#-------------------------------------------------------------------------------
# . File : TeraChemOutputFile.py
# . Program : MolarisTools
# . Copyright : USC, Mikolaj Feliks (2015-2018)
# . License : GNU GPL v3.0 (http://www.gnu.org/licenses/gpl-3.0.en.html)
#-------------------------------------------------------------------------------
import collections, exceptions, os
from MolarisTools.Units import HARTREE_TO_KCAL_MOL, HARTREE_BOHR_TO_KCAL_MOL_ANGSTROM, GRADIENT_TO_FORCE
from MolarisTools.Utilities import TokenizeLine
Atom = collections.namedtuple ("Atom", "symbol x y z charge")
Force = collections.namedtuple ("Force", "x y z")
class TeraChemOutputFile (object):
"""A class to read a TeraChem output file."""
def __init__ (self, filename="tc.out", deep=True):
"""Constructor."""
self.inputfile = filename
self.deep = deep
self._Parse ()
def _Parse (self):
lines = open (self.inputfile)
try:
while True:
line = next (lines)
if line.startswith ("FINAL ENERGY:"):
tokens = TokenizeLine (line, converters=[None, None, float, None])
self.Efinal = tokens[2] * HARTREE_TO_KCAL_MOL
elif line.startswith ("Scratch directory:"):
tokens = TokenizeLine (line, converters=[None, None, None])
self.scratchFolder = tokens[2]
elif line.startswith ("Total atoms:"):
tokens = TokenizeLine (line, converters=[None, None, int])
natoms = tokens[2]
# if (natoms != len (tempatoms)):
# pass
# ****** QM coordinates ******
# C -0.0178447840 0.0103903440 -0.0015978260
# H -0.0463346130 0.0459578690 1.0997854660
# H 1.0186858510 0.0532897140 -0.3692509610
# H -0.5543873770 -0.8695391090 -0.3892902580
# Cl -0.8673247020 1.4796754130 -0.6190902640
# Cl 1.5376616510 -2.4337214350 0.8399845050
#
elif line.startswith ("****** QM coordinates ******"):
tempatoms = []
while True:
line = next (lines)
if (line.strip () == ""):
break
tokens = TokenizeLine (line, converters=[None, float, float, float])
atom = Atom (symbol=tokens[0], x=tokens[1], y=tokens[2], z=tokens[3], charge=0.0)
tempatoms.append (atom)
# ESP unrestraint charges:
# Atom X Y Z Charge Exposure
# -----------------------------------------------------------
# C -0.033722 0.019635 -0.003019 -0.161628 0.0529
# H -0.087560 0.086848 2.078293 0.135770 0.5000
# H 1.925037 0.100703 -0.697783 0.150080 0.4130
# H -1.047640 -1.643191 -0.735652 0.144291 0.4348
# Cl -1.639006 2.796181 -1.169911 -0.328851 0.8444
# Cl 2.905759 -4.599067 1.587341 -0.939662 0.9270
# -----------------------------------------------------------
elif line.startswith ("ESP unrestraint charges:"):
next (lines)
next (lines)
self.espcharges = []
while True:
line = next (lines)
if line.startswith ("----"):
break
tokens = TokenizeLine (line, converters=[None, float, float, float, float, float])
(symbol, charge) = (tokens[0], tokens[4])
self.espcharges.append (charge)
# Gradient units are Hartree/Bohr
# ---------------------------------------------------
# dE/dX dE/dY dE/dZ
# -0.0094514443 0.0141415195 -0.0068751376
# -0.0018028819 0.0035487049 0.0113054990
# 0.0100877721 0.0051162387 -0.0050948764
# -0.0088062409 -0.0065296736 -0.0054152317
# 0.0115697382 -0.0189485242 0.0072745597
# -0.0015969432 0.0026717342 -0.0011948132
# ---------------------------------------------------
elif line.startswith ("Gradient units are Hartree/Bohr"):
next (lines)
next (lines)
self.forces = []
while True:
line = next (lines)
if (line.startswith ("----") or line.strip () == ""):
break
tokens = TokenizeLine (line, converters=[float, float, float])
(fx, fy, fz) = tokens
force = Force (x=(fx * GRADIENT_TO_FORCE * HARTREE_BOHR_TO_KCAL_MOL_ANGSTROM), y=(fy * GRADIENT_TO_FORCE * HARTREE_BOHR_TO_KCAL_MOL_ANGSTROM), z=(fz * GRADIENT_TO_FORCE * HARTREE_BOHR_TO_KCAL_MOL_ANGSTROM))
self.forces.append (force)
# . Get job time in seconds
elif line.count ("Total processing time:"):
tokens = TokenizeLine (line, converters=[None, None, None, float, None])
self.jobtime = tokens[3]
except exceptions.StopIteration:
pass
lines.close ()
if (self.deep):
# . Deep parsing (aka parse files in the scratch folder)
if hasattr (self, "scratchFolder"):
# . Collect Mulliken charges
lines = open (os.path.join (self.scratchFolder, "charge_mull.xls"))
self.charges = []
for i in range (natoms):
line = next (lines)
tokens = TokenizeLine (line, converters=[int, None, float])
self.charges.append (tokens[2])
lines.close ()
# . Collect coordinates
fileGeometry = os.path.join (self.scratchFolder, "xyz.xyz")
if (os.path.exists (fileGeometry)):
lines = open (fileGeometry)
next (lines)
next (lines)
self.atoms = []
for i in range (natoms):
line = next (lines)
tokens = TokenizeLine (line, converters=[None, float, float, float])
atom = Atom (symbol=tokens[0], x=tokens[1], y=tokens[2], z=tokens[3], charge=self.charges[i])
self.atoms.append (atom)
lines.close ()
# . Finalize after reading all files
# if (not hasattr (self, "atoms")):
# self.atoms = tempatoms
@property
def natoms (self):
if (hasattr (self, "atoms")):
return len (self.atoms)
return 0
@property
def ncharges (self):
if (hasattr (self, "pointCharges")):
return len (self.pointCharges)
return 0
def WriteMolarisForces (self, filename="forces.out", Eref=0., useESPCharges=False):
"""Write a file in the Molaris-suitable format."""
pass
#===============================================================================
# . Main program
#===============================================================================
if (__name__ == "__main__"): pass
|
mfx9/molaris-tools
|
MolarisTools/Parser/TeraChemOutputFile.py
|
Python
|
gpl-3.0
| 7,929
|
[
"TeraChem"
] |
37698b43e87ac6c9da3110b3f0d64ab173f3698ca9ed5409b5e44c565b6c8ca8
|
version_name = "OzFluxQC"
version_number = "V2.9.6f"
# V2.9.6f - re-write of qcio.xl_read_series() to trap different
# number of rows on different worksheets
# V2.9.6e - bug fix in qcts.MassmanStandard()
# - fixed calculation of effective time constant
# V2.9.6d - bug fix in gfSOLO_main
# - variable was being pulled from L4 data structure prior
# to running QC checks before gap filling so any u* filtering
# at L5 was lost
# - variable now pulled from L5 data structure
# - bug introduced shortly after return from Berlin in August 2016
# - rationalised code that calls L1 processing
# - implemented qcls.l1qc()
# - call for L1 processing and API for call now follows calling
# and API for L2 to L6
# - done to make implementation of Kepler verson easier
# V2.9.6c - bug fix in qcfunc.DateTimeFromTimeStamp()
# - datetimes in non-ISO format were interpreted incorrectly
# - added format option to routine call to specify order
# in which year, month and day appear in datetime string
# - reinstated calculation of syntheic Fsd at L1
# V2.9.6b - bug fix of ustar implementation at L5
# - gfSOLO was picking the target data from the L4 (not filtered)
# data structure not the L5 (filtered) data structure so
# filtered data in L5 was being overwritten by gap filled,
# unfiltered data
# - changed source of target data in gfSOLO_runsolo,
# gfSOLO_runseqsolo and gfSOLO_plot from dsa to dsb
# V2.9.6a - implementation of ustar filtering at L5 or L6
# - previous versions applied the ustar filter at L6
# after gap filling at L5 which meant the NN used
# for gap filling at L5 was being trained on Fc
# observations from periods when ustar was below the
# threshold.
# V2.9.5 - implementation of new respiration options
# - removed NN related code from qcrp.py and placed this
# in a stand-alone module qcrpNN.py.
# - implemented Ian McHugh's code for Lloyd-Taylor
# V2.9.4 - major bug fix
# - a bug was introduced in V2.8.7 on 15/04/2015 that caused
# Fg corrected for heat storage in the layer above the
# ground heat flux plates to be replaced with uncorrected
# Fg during the L3 processing
# - this release fixes the bug
# V2.9.3 - updates and bug fixes
# - implemented batch processing for L1 to L6 including climatology,
# CPD, conatenation
# - completed implementation of plot_path in control files
# - fixed bug that caused the units of NEE, NEP, GPP and ER
# in the L6 output file to be gC/m2
# - fixed bug on gfalternate_matchstartendtimes
# V2.9.2 - updates and bug fixes
# - implemented summary output to Excel file and plots at L6
# - implemented "ols_thru0", "rma" and "odr" fit types at L4
# - fixed bug in qcrp.GetERFromFc that let gap filled Fc data
# through when estimating ecosystem respiration (ER) from
# u*-filtered, nocturnal Fc
# V2.9.1 - hopefully completed the major re-write of the gap filling
# routines for L4 and L5
# - much testing and tweaking of gfalternate_autocomplete
# to get it to run, the logic is rather tortuous at present
# and needs a re-working, there is a gfalternate_autocomplete_rewrite
# routine, just needs completion
# - updated QCCPD code with Ian McHugh's latest version and added
# code to trap empty results data frame before plotting histograms
# V2.9.0 - major re-write of gap filling routines to simplify workflow
# - will document later
# V2.8.7 - fixed several bugs in the gap filling routine and improved
# the gap filling workflow, implemented ability to split a
# netCDF file at specified dates
# V2.8.6 - added a new switch "UseL2Fluxes" to the L3 processing:
# - if true, skip calculating fluxes from covariances and skip corrections
# - if false (default), use covariances as normal
# V2.8.5 - miscellaneous changes arising from use of V2.8.4 at the
# 2014 OzFlux Data Workshop
# V2.8.4 - changes as follows:
# - split gap filling into L4 (meteorological drivers) and
# L5 (fluxes), partitioning is now L6
# - associated changes to the template control files
# - implemented gap filling from BIOS2
# - implemented "Import" at L4 to allow importing MODIS data
# into OzFluxQC data path
# V2.8.3 - implemented estimation of u* threshold by CPD (Barr et al)
# V2.8.2 - implemented;
# - estimation of ecosystem respiration from nocturnal Fc
# using SOLO and FFNET.
# - several bug fixes
# V2.8.1 - refactor of gfACCESS_plotdetailed and associated code
# V2.8.0 - implemented;
# - gap filling using ACCESS data (works for any alternate site file)
# - menu at top of OzFluxQC GUI
# V2.7.2 - several enhancements, mainly to do with gap filling
# - implemented "interpolated daily" method of
# gap filling from climatology
# - implemented gap filling at L3
# V2.7.1 - fixed bug in CorrectFcForStorage, Fc_storage_in typo
# V2.7.0 - major bug fixes as for V2.6.3 above
# - minor bug fixes to clean up use of switches in ['Options'] section
# - minor fixes to check that requested files exist
# - implemented compare_ep.py to automate comparison of OzFluxQC and
# EddyPro results
# V2.6.3 - clean up of code after comparing EddyPro and OzFluxQC
# - fix bugs in mf.vapourpressure and mf.RHfromabsolutehumidity
# - deprecated WPLcov after finding an error in Fc_WPLcov (units of wA term)
# - rationalised calculation of dry and moist air densities and parial
# density of water vapour
# - implemented EddyPro method of calculating rho*Cp
# - tidyied up use of densities in WPL correction and used flux
# form (WPL80 Eqn 42a and 44) for Fe and Fc
# - implemented EddyPro method of calculating Fh from Fhv
# - rationalised use of densities and rho*Cp when calculating fluxes
# V2.6.2 - implement Lloyd-Taylor ER
# V2.6.1 - fix 2D coordinate rotation for momentum covariances
# V2.6.0 - fix ConvertCO2 units bug
# V2.5.2 - implement sofm/solo/seqsolo
# V2.5.1 - post-Cairns 2013 version
|
OzFlux/OzFluxQC
|
scripts/cfg.py
|
Python
|
gpl-3.0
| 6,755
|
[
"NetCDF"
] |
dbd932db11b1b300a33ade7632ffff1edf0e2086dc408e4da4afc527bf443a82
|
# Copyright (C) 2016 Collin Capano, Christopher M. Biwer, Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes and functions for drawing and calculating the
probability density function of distributions.
"""
# imports needed for functions below
from pycbc.workflow import ConfigParser as _ConfigParser
from pycbc.distributions import constraints
from pycbc import VARARGS_DELIM as _VARARGS_DELIM
# Promote some classes/functions to the distributions name space
from pycbc.distributions.angular import UniformAngle, SinAngle, CosAngle, \
UniformSolidAngle
from pycbc.distributions.arbitrary import Arbitrary, FromFile
from pycbc.distributions.gaussian import Gaussian
from pycbc.distributions.power_law import UniformPowerLaw, UniformRadius
from pycbc.distributions.sky_location import UniformSky
from pycbc.distributions.uniform import Uniform
from pycbc.distributions.uniform_log import UniformLog10
from pycbc.distributions.spins import IndependentChiPChiEff
from pycbc.distributions.qnm import UniformF0Tau
from pycbc.distributions.joint import JointDistribution
# a dict of all available distributions
distribs = {
IndependentChiPChiEff.name : IndependentChiPChiEff,
Arbitrary.name : Arbitrary,
FromFile.name : FromFile,
Gaussian.name : Gaussian,
UniformPowerLaw.name : UniformPowerLaw,
UniformRadius.name : UniformRadius,
Uniform.name : Uniform,
UniformAngle.name : UniformAngle,
CosAngle.name : CosAngle,
SinAngle.name : SinAngle,
UniformSolidAngle.name : UniformSolidAngle,
UniformSky.name : UniformSky,
UniformLog10.name : UniformLog10,
UniformF0Tau.name : UniformF0Tau,
}
def read_distributions_from_config(cp, section="prior"):
"""Returns a list of PyCBC distribution instances for a section in the
given configuration file.
Parameters
----------
cp : WorflowConfigParser
An open config file to read.
section : {"prior", string}
Prefix on section names from which to retrieve the distributions.
Returns
-------
list
A list of the parsed distributions.
"""
dists = []
variable_args = []
for subsection in cp.get_subsections(section):
name = cp.get_opt_tag(section, "name", subsection)
dist = distribs[name].from_config(cp, section, subsection)
if set(dist.params).isdisjoint(variable_args):
dists.append(dist)
variable_args += dist.params
else:
raise ValueError("Same parameter in more than one distribution.")
return dists
def _convert_liststring_to_list(lstring):
"""Checks if an argument of the configuration file is a string of a list
and returns the corresponding list (of strings).
The argument is considered to be a list if it starts with '[' and ends
with ']'. List elements should be comma separated. For example, passing
`'[foo bar, cat]'` will result in `['foo bar', 'cat']` being returned. If
the argument does not start and end with '[' and ']', the argument will
just be returned as is.
"""
if lstring[0]=='[' and lstring[-1]==']':
lstring = [str(lstring[1:-1].split(',')[n].strip().strip("'"))
for n in range(len(lstring[1:-1].split(',')))]
return lstring
def read_params_from_config(cp, prior_section='prior',
vargs_section='variable_args',
sargs_section='static_args'):
"""Loads static and variable parameters from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
prior_section : str, optional
Check that priors exist in the given section. Default is 'prior.'
vargs_section : str, optional
The section to get the parameters that will be varied/need priors
defined for them. Default is 'variable_args'.
sargs_section : str, optional
The section to get the parameters that will remain fixed. Default is
'static_args'.
Returns
-------
variable_args : list
The names of the parameters to vary in the PE run.
static_args : dict
Dictionary of names -> values giving the parameters to keep fixed.
"""
# sanity check that each parameter in [variable_args] has a priors section
variable_args = cp.options(vargs_section)
subsections = cp.get_subsections(prior_section)
tags = set([p for tag in subsections for p in tag.split('+')])
missing_prior = set(variable_args) - tags
if any(missing_prior):
raise KeyError("You are missing a priors section in the config file "
"for parameter(s): {}".format(', '.join(missing_prior)))
# sanity check that each parameter with a priors section is in
# [variable_args]
missing_variable = tags - set(variable_args)
if any(missing_variable):
raise KeyError("Prior section found for parameter(s) {} but not "
"listed as variable parameter(s)."
.format(', '.join(missing_variable)))
# get static args
try:
static_args = dict([(key, cp.get_opt_tags(sargs_section, key, []))
for key in cp.options(sargs_section)])
except _ConfigParser.NoSectionError:
static_args = {}
# sanity check that each parameter in [variable_args]
# is not repeated in [static_args]
for arg in variable_args:
if arg in static_args:
raise KeyError("Parameter {} found both in static_args and in "
"variable_args sections.".format(arg))
# try converting values to float
for key in static_args:
val = static_args[key]
try:
# the following will raise a ValueError if it cannot be cast to
# float (as we would expect for string arguments)
static_args[key] = float(val)
except ValueError:
# try converting to a list of strings; this function will just
# return val if it does not begin (end) with [ (])
static_args[key] = _convert_liststring_to_list(val)
return variable_args, static_args
def read_constraints_from_config(cp, transforms=None,
constraint_section='constraint'):
"""Loads parameter constraints from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
transforms : list, optional
List of transforms to apply to parameters before applying constraints.
constraint_section : str, optional
The section to get the constraints from. Default is 'constraint'.
Returns
-------
list
List of ``Constraint`` objects. Empty if no constraints were provided.
"""
cons = []
for subsection in cp.get_subsections(constraint_section):
name = cp.get_opt_tag(constraint_section, "name", subsection)
constraint_arg = cp.get_opt_tag(
constraint_section, "constraint_arg", subsection)
# get any other keyword arguments
kwargs = {}
section = constraint_section + "-" + subsection
extra_opts = [key for key in cp.options(section)
if key not in ["name", "constraint_arg"]]
for key in extra_opts:
val = cp.get(section, key)
if key == "required_parameters":
val = val.split(_VARARGS_DELIM)
else:
try:
val = float(val)
except ValueError:
pass
kwargs[key] = val
cons.append(constraints.constraints[name](constraint_arg,
transforms=transforms,
**kwargs))
return cons
|
stevereyes01/pycbc
|
pycbc/distributions/__init__.py
|
Python
|
gpl-3.0
| 8,568
|
[
"Gaussian"
] |
17008eddbe4a0162e4a3e487016d3caa8eff2b65ada8276cdd0ab2b2b942e95e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.