text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
The B{0install list-feeds} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _
from zeroinstall.cmd import UsageError
from zeroinstall.injector import model, reader
syntax = "URI"
def add_options(parser):
pass
def handle(config, options, args):
"""@type args: [str]"""
if len(args) != 1: raise UsageError()
uri = model.canonical_iface_uri(args[0])
iface = config.iface_cache.get_interface(uri)
if iface.extra_feeds:
for f in iface.extra_feeds:
print(f.uri)
else:
print(_("(no feeds)"))
|
rammstein/0install
|
zeroinstall/cmd/list_feeds.py
|
Python
|
lgpl-2.1
| 653
|
[
"VisIt"
] |
04b4ed3b814a47fbdd45786515a7def8d3f4edd56e215ad5fc061bd7fbc142b2
|
#! /usr/bin/env python
"""
Usage:
calc-blast-cover.py reference.fa query.x.reference.blastn minmatch query.fa
calc-blast-cover calculates the fraction of bases in 'reference.fa' that are
covered by BLAST matches from 'query.fa', for sequence in 'query.fa' that are
longer than 'minmatch'.
"""
import sys
import blastparser
import screed
if len(sys.argv) != 5:
print>>sys.stderr, "Usage: calc-blast-cover.py b.seqs a.x.b matchlen a.seqs"
sys.exit(-1)
MIN_SCORE=200
MIN_QUERY_LEN = int(sys.argv[3])
# load in the query sequences into a list
query_seqs = set([ record.name for record in screed.open(sys.argv[4]) \
if len(record.sequence) >= MIN_QUERY_LEN ])
# create empty lists representing the total number of bases in the reference
covs = {}
for n, record in enumerate(screed.open(sys.argv[1])):
if n % 1000 == 0:
sys.stdout.write('+')
sys.stdout.flush()
covs[record.name] = [0] * len(record.sequence)
# run through the BLAST records in the query, and calculate how much of
# the reference is covered by the query.
for n, record in enumerate(blastparser.parse_fp(open(sys.argv[2]))):
if n % 100 == 0:
sys.stdout.write('.')
sys.stdout.flush()
if record.query_name not in query_seqs:
continue
for hit in record.hits:
for match in hit.matches:
if match.score < MIN_SCORE:
continue
cov = covs.get(hit.subject_name)
if not cov:
continue
start = min(match.subject_start, match.subject_end) - 1
end = max(match.subject_start, match.subject_end)
for i in range(start, end):
cov[i] = 1
print ''
# print out summary statistics for each of the reference.
coved = 0
total = 0
for name in covs:
coved += sum(covs[name])
total += len(covs[name])
f = sum(covs[name]) / float(len(covs[name]))
#print name, sum(covs[name]), len(covs[name]), f
print 'total bases in reference:', total
print 'total ref bases covered :', coved
print 'fraction :', coved / float(total)
print 'reference :', sys.argv[1]
print 'blast file :', sys.argv[2]
print 'query sequences :', sys.argv[4]
#print coved, total, coved / float(total), sys.argv[1], sys.argv[2], MIN_QUERY_LEN
|
jrherr/bioinformatics_scripts
|
python_scripts/calc_blast_cover.py
|
Python
|
mit
| 2,343
|
[
"BLAST"
] |
30a8f802487b1a99da0eebe9d6c17e4b8962ae6f980d83a93bc15c711976e6fc
|
import warnings
from datetime import datetime
from numbers import Number as NumberBase
from itertools import chain
from functools import partial
from . import controlled_vocabulary
from lxml import etree
def make_counter(start=1):
'''
Create a functor whose only internal piece of data is a mutable container
with a reference to an integer, `start`. When the functor is called, it returns
current `int` value of `start` and increments the mutable value by one.
Parameters
----------
start: int, optional
The number to start counting from. Defaults to `1`.
Returns
-------
int:
The next number in the count progression.
'''
start = [start]
def count_up():
ret_val = start[0]
start[0] += 1
return ret_val
return count_up
def camelize(name):
parts = name.split("_")
if len(parts) > 1:
return ''.join(parts[0] + [part.title() if part != "ref" else "_ref" for part in parts[1:]])
else:
return name
def id_maker(type_name, id_number):
return "%s_%d" % (type_name.upper(), id_number)
NO_TRACK = object()
class CountedType(type):
_cache = {}
def __new__(cls, name, parents, attrs):
new_type = type.__new__(cls, name, parents, attrs)
tag_name = attrs.get("tag_name")
new_type.counter = staticmethod(make_counter())
if attrs.get("_track") is NO_TRACK:
return new_type
cls._cache[name] = new_type
if tag_name is not None:
cls._cache[tag_name] = new_type
return new_type
class TagBase(object):
__metaclass__ = CountedType
type_attrs = {}
def __init__(self, tag_name=None, text="", **attrs):
self.tag_name = tag_name or self.tag_name
_id = attrs.pop('id', None)
self.attrs = {}
self.attrs.update(self.type_attrs)
self.text = text
self.attrs.update(attrs)
if _id is None:
self._id_number = self.counter()
self._id_string = None
elif isinstance(_id, int):
self._id_number = _id
self._id_string = None
elif isinstance(_id, basestring):
self._id_number = None
self._id_string = _id
def __getattr__(self, key):
try:
return self.attrs[key]
except KeyError:
try:
return self.attrs[camelize(key)]
except KeyError:
raise AttributeError("%s has no attribute %s" % (self.__class__.__name__, key))
@property
def id(self):
if self._id_string is None:
self._id_string = id_maker(self.tag_name, self._id_number)
return self._id_string
def element(self, xml_file=None, with_id=False):
attrs = {k: str(v) for k, v in self.attrs.items()}
if with_id:
attrs['id'] = self.id
if xml_file is None:
return etree.Element(self.tag_name, **attrs)
else:
return xml_file.element(self.tag_name, **attrs)
def write(self, xml_file, with_id=False):
el = self.element(with_id=with_id)
xml_file.write(el)
__call__ = element
def __repr__(self):
return "<%s id=\"%s\" %s>" % (self.tag_name, self.id, " ".join("%s=\"%s\"" % (
k, str(v)) for k, v in self.attrs.items()))
def __eq__(self, other):
try:
return self.attrs == other.attrs
except AttributeError:
return False
def __ne__(self, other):
try:
return self.attrs != other.attrs
except AttributeError:
return True
def __hash__(self):
return hash((self.tag_name, frozenset(self.attrs.items())))
class MzIdentML(TagBase):
type_attrs = {
"xmlns": "http://psidev.info/psi/pi/mzIdentML/1.1",
"version": "1.1.0",
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
"xsi:schemaLocation": "http://psidev.info/psi/pi/mzIdentML/1.1 ../../schema/mzIdentML1.1.0.xsd"
}
def __init__(self, **attrs):
attrs.setdefault('creationDate', datetime.utcnow())
super(MzIdentML, self).__init__("MzIdentML", **attrs)
class CVParam(TagBase):
tag_name = "cvParam"
@classmethod
def param(cls, name, value=None):
if isinstance(name, cls):
return name.write
else:
if value is None:
return cls(name=name).write
else:
return cls(name=name, value=value).write
def __init__(self, accession=None, name=None, ref=None, value=None, **attrs):
if ref is not None:
attrs["cvRef"] = ref
if accession is not None:
attrs["accession"] = accession
if name is not None:
attrs["name"] = name
if value is not None:
attrs['value'] = value
super(CVParam, self).__init__(self.tag_name, **attrs)
self.patch_accession(accession, ref)
@property
def value(self):
return self.attrs.get("value")
@value.setter
def value(self, value):
self.attrs['value'] = value
@property
def ref(self):
return self.attrs['cvRef']
@property
def name(self):
return self.attrs['name']
@property
def accession(self):
return self.attrs['accession']
def __call__(self, *args, **kwargs):
self.write(*args, **kwargs)
def patch_accession(self, accession, ref):
if accession is not None:
if isinstance(accession, int):
accession = "%s:%d" % (ref, accession)
self.attrs['accession'] = accession
else:
self.attrs['accession'] = accession
class UserParam(CVParam):
tag_name = "userParam"
class CV(TagBase):
tag_name = 'cv'
def __init__(self, id, uri, **kwargs):
super(CV, self).__init__(id=id, uri=uri, **kwargs)
self._vocabulary = None
def load(self, handle=None):
if handle is None:
fp = controlled_vocabulary.obo_cache.resolve(self.uri)
cv = controlled_vocabulary.ControlledVocabulary.from_obo(fp)
else:
cv = controlled_vocabulary.ControlledVocabulary.from_obo(handle)
try:
cv.id = self.id
except:
pass
return cv
def __getitem__(self, key):
if self._vocabulary is None:
self._vocabulary = self.load()
return self._vocabulary[key]
class ProvidedCV(CV):
_track = NO_TRACK
def __init__(self, id, uri, **kwargs):
super(ProvidedCV, self).__init__(id, uri, **kwargs)
self._provider = None
def load(self, handle=None):
cv = controlled_vocabulary.obo_cache.resolve(self.uri)
try:
cv.id = self.id
except:
pass
return cv
def __getitem__(self, key):
if self._provider is None:
self._provider = self.load()
return self._provider[key]
def _make_tag_type(name, **attrs):
return type(name, (TagBase,), {"tag_name": name, "type_attrs": attrs})
def _element(_tag_name, *args, **kwargs):
try:
eltype = CountedType._cache[_tag_name]
except KeyError:
eltype = _make_tag_type(_tag_name)
return eltype(*args, **kwargs)
def element(xml_file, _tag_name, *args, **kwargs):
with_id = kwargs.pop("with_id", False)
if isinstance(_tag_name, basestring):
el = _element(_tag_name, *args, **kwargs)
else:
el = _tag_name
return el.element(xml_file=xml_file, with_id=with_id)
default_cv_list = [
_element(
"cv", id="PSI-MS",
uri=("http://psidev.cvs.sourceforge.net/viewvc/*checkout*/psidev"
"/psi/psi-ms/mzML/controlledVocabulary/psi-ms.obo"),
version="2.25.0", fullName="PSI-MS"),
_element(
"cv", id="UO",
uri="http://obo.cvs.sourceforge.net/*checkout*/obo/obo/ontology/phenotype/unit.obo",
fullName="UNIT-ONTOLOGY"),
ProvidedCV(id="UNIMOD", uri="http://www.unimod.org/obo/unimod.obo", fullName="UNIMOD")
]
common_units = {
"parts per million": "UO:0000169",
"dalton": "UO:0000221"
}
class ChildTrackingMeta(type):
def __new__(cls, name, parents, attrs):
if not hasattr(cls, "_cache"):
cls._cache = dict()
new_type = type.__new__(cls, name, parents, attrs)
cls._cache[name] = new_type
return new_type
class SpecializedContextCache(dict):
def __init__(self, type_name):
self.type_name = type_name
def __getitem__(self, key):
try:
item = dict.__getitem__(self, key)
return item
except KeyError:
warnings.warn("No reference was found for %d in %s" % (key, self.type_name), stacklevel=3)
new_value = id_maker(self.type_name, key)
self[key] = new_value
return new_value
def __repr__(self):
return '%s\n%s' % (self.type_name, dict.__repr__(self))
class VocabularyResolver(object):
def __init__(self, vocabularies=None):
if vocabularies is None:
vocabularies = default_cv_list
self.vocabularies = vocabularies
def param(self, name, value=None, cv_ref=None, **kwargs):
accession = kwargs.get("accession")
if isinstance(name, CVParam):
return name
else:
if cv_ref is None:
for cv in self.vocabularies:
try:
term = cv[name]
name = term["name"]
accession = term["id"]
cv_ref = cv.id
except:
pass
if cv_ref is None:
return UserParam(name=name, value=value, **kwargs)
else:
return CVParam(name=name, accession=accession, value=value, ref=cv_ref, **kwargs)
def term(self, name):
for cv in self.vocabularies:
try:
term = cv[name]
return term
except:
pass
else:
raise KeyError(name)
class DocumentContext(dict, VocabularyResolver):
def __init__(self, vocabularies=None):
dict.__init__(self)
VocabularyResolver.__init__(self, vocabularies)
def __missing__(self, key):
self[key] = SpecializedContextCache(key)
return self[key]
NullMap = DocumentContext()
class ReprBorrowingPartial(partial):
"""
Create a partial instance that uses the wrapped callable's
`__repr__` method instead of a generic partial
"""
def __init__(self, func, *args, **kwargs):
super(ReprBorrowingPartial, self).__init__(func, *args, **kwargs)
def __repr__(self):
return repr(self.func)
class ComponentDispatcher(object):
"""
A container for a :class:`DocumentContext` which provides
an automatically parameterized version of all :class:`ComponentBase`
types which use this instance's context.
Attributes
----------
context : :class:`DocumentContext`
The mapping responsible for managing the global
state of all created components.
"""
def __init__(self, context=None, vocabularies=None):
if context is None:
context = DocumentContext(vocabularies=vocabularies)
else:
if vocabularies is not None:
context.vocabularies.extend(vocabularies)
self.context = context
def __getattr__(self, name):
"""
Provide access to an automatically parameterized
version of all :class:`ComponentBase` types which
use this instance's context.
Parameters
----------
name : str
Component Name
Returns
-------
ReprBorrowingPartial
A partially parameterized instance constructor for
the :class:`ComponentBase` type requested.
"""
component = ChildTrackingMeta._cache[name]
return ReprBorrowingPartial(component, context=self.context)
def register(self, entity_type, id):
"""
Pre-declare an entity in the document context. Ensures that
a reference look up will be satisfied.
Parameters
----------
entity_type : str
An entity type, either a tag name or a component name
id : int
The unique id number for the thing registered
Returns
-------
str
The constructed reference id
"""
value = id_maker(entity_type, id)
self.context[entity_type][id] = value
return value
@property
def vocabularies(self):
return self.context.vocabularies
def param(self, *args, **kwargs):
return self.context.param(*args, **kwargs)
def term(self, *args, **kwargs):
return self.context.term(*args, **kwargs)
# ------------------------------------------
# Base Component Definitions
class ComponentBase(object):
__metaclass__ = ChildTrackingMeta
def __init__(self, *args, **kwargs):
pass
def __getattr__(self, key):
try:
return self.element.attrs[key]
except KeyError:
raise AttributeError(key)
def write(self, xml_file):
raise NotImplementedError()
def __call__(self, xml_file):
self.write(xml_file)
class GenericCollection(ComponentBase):
def __init__(self, tag_name, members, context=NullMap):
self.members = members
self.tag_name = tag_name
self.element = _element(tag_name, xmlns="http://psidev.info/psi/pi/mzIdentML/1.1")
def write(self, xml_file):
with self.element.element(xml_file, with_id=False):
for member in self.members:
member.write(xml_file)
class IDGenericCollection(GenericCollection):
def __init__(self, tag_name, members, id, context=NullMap):
self.members = members
self.tag_name = tag_name
self.element = _element(tag_name, xmlns="http://psidev.info/psi/pi/mzIdentML/1.1", id=id)
context[tag_name][id] = self.element.id
def write(self, xml_file):
with self.element.element(xml_file, with_id=True):
for member in self.members:
member.write(xml_file)
# --------------------------------------------------
# Input File Information
class SourceFile(ComponentBase):
def __init__(self, location, file_format, id=None, context=NullMap):
self.file_format = file_format
self.element = _element("SourceFile", location=location, id=id)
self.context = context
context["SourceFile"][id] = self.element.id
def write(self, xml_file):
with self.element.element(xml_file, with_id=True):
with element(xml_file, "FileFormat"):
self.context.param(self.file_format)(xml_file)
class SearchDatabase(ComponentBase):
def __init__(self, name, file_format, location=None, id=None, context=NullMap):
self.location = location
self.file_format = file_format
self.element = _element("SearchDatabase", location=location, name=name, id=id)
context["SearchDatabase"][id] = self.element.id
self.context = context
def write(self, xml_file):
with self.element.element(xml_file, with_id=True):
with element(xml_file, "FileFormat"):
self.context.param(self.file_format)(xml_file)
with element(xml_file, "DatabaseName"):
UserParam(name=self.name).write(xml_file)
class SpectraData(ComponentBase):
def __init__(self, location, file_format, spectrum_id_format, id=None, context=NullMap):
self.file_format = file_format
self.spectrum_id_format = spectrum_id_format
self.element = _element("SpectraData", id=id, location=location)
context['SpectraData'][id] = self.element.id
self.context = context
def write(self, xml_file):
with self.element.element(xml_file, with_id=True):
with element(xml_file, "FileFormat"):
self.context.param(self.file_format)(xml_file)
with element(xml_file, "SpectrumIDFormat"):
self.context.param(self.spectrum_id_format)(xml_file)
class Inputs(GenericCollection):
def __init__(self, source_files=tuple(), search_databases=tuple(), spectra_data=tuple(), context=NullMap):
items = list()
items.extend(source_files)
items.extend(search_databases)
items.extend(spectra_data)
super(Inputs, self).__init__("Inputs", items, context=context)
# --------------------------------------------------
# Identification Information
class DBSequence(ComponentBase):
def __init__(self, accession, sequence, id, search_database_id=1, context=NullMap):
self.sequence = sequence
self.search_database_ref = context['SearchDatabase'][search_database_id]
self.element = _element(
"DBSequence", accession=accession, id=id,
length=len(sequence), searchDatabase_ref=self.search_database_ref)
context["DBSequence"][id] = self.element.id
def write(self, xml_file):
protein = self.sequence
with self.element.element(xml_file, with_id=True):
with element(xml_file, "Seq"):
xml_file.write(protein)
class Peptide(ComponentBase):
def __init__(self, peptide_sequence, id, modifications=tuple(), context=NullMap):
self.peptide_sequence = peptide_sequence
self.modifications = modifications
self.element = _element("Peptide", id=id)
context["Peptide"][id] = self.element.id
def write(self, xml_file):
with self.element.element(xml_file, with_id=True):
with element(xml_file, "PeptideSequence"):
xml_file.write(self.peptide_sequence)
for mod in self.modifications:
mod.write(xml_file)
class PeptideEvidence(ComponentBase):
def __init__(self, peptide_id, db_sequence_id, id, start_position, end_position,
is_decoy=False, pre='', post='', context=NullMap):
self.peptide_id = peptide_id
self.db_sequence_id = db_sequence_id
self.element = _element(
"PeptideEvidence", isDecoy=is_decoy, start=start_position,
end=end_position, peptide_ref=context["Peptide"][peptide_id],
dBSequence_ref=context['DBSequence'][db_sequence_id],
pre=pre, post=post, id=id)
context["PeptideEvidence"][id] = self.element.id
def write(self, xml_file):
xml_file.write(self.element(with_id=True))
class SpectrumIdentificationResult(ComponentBase):
def __init__(self, spectra_data_id, spectrum_id, id=None, identifications=tuple(), context=NullMap):
self.identifications = identifications
self.element = _element(
"SpectrumIdentificationResult", spectraData_ref=context["SpectraData"][spectra_data_id],
spectrumID=spectrum_id, id=id)
def write(self, xml_file):
with self.element.element(xml_file, with_id=True):
for item in self.identifications:
item.write(xml_file)
class SpectrumIdentificationItem(ComponentBase):
def __init__(self, calculated_mass_to_charge, experimental_mass_to_charge,
charge_state, peptide_id, peptide_evidence_id, score, id, cv_params=None,
pass_threshold=True, rank=1, context=NullMap):
self.peptide_evidence_ref = context["PeptideEvidence"][peptide_evidence_id]
self.cv_params = cv_params
self.score = score
self.element = _element(
"SpectrumIdentificationItem", calculatedMassToCharge=calculated_mass_to_charge, chargeState=charge_state,
experimentalMassToCharge=experimental_mass_to_charge, id=id, passThreshold=pass_threshold,
peptide_ref=context['Peptide'][peptide_id]
)
context['SpectrumIdentificationItem'][id] = self.element.id
self.context = context
def write(self, xml_file):
with self.element.element(xml_file, with_id=True):
_element(
"PeptideEvidenceRef",
peptideEvidence_ref=self.peptide_evidence_ref).write(
xml_file)
if isinstance(self.score, CVParam):
self.score.write(xml_file)
else:
self.context.param(name="score", value=self.score)(xml_file)
for cvp in self.cv_params:
self.context.param(cvp)(xml_file)
class SpectrumIdentificationList(IDGenericCollection):
def __init__(self, identification_results, id, context=NullMap):
super(SpectrumIdentificationList, self).__init__(
"SpectrumIdentificationList", identification_results, id, context=context)
class AnalysisData(GenericCollection):
def __init__(self, identification_lists=tuple(), protein_detection_lists=tuple(), context=NullMap):
items = list()
items.extend(identification_lists)
items.extend(protein_detection_lists)
super(AnalysisData, self).__init__("AnalysisData", items, context)
# --------------------------------------------------
# Meta-collections
class DataCollection(GenericCollection):
def __init__(self, inputs, analysis_data, context=NullMap):
super(DataCollection, self).__init__("DataCollection", [inputs, analysis_data], context)
class SequenceCollection(GenericCollection):
def __init__(self, db_sequences, peptides, peptide_evidence, context=NullMap):
super(SequenceCollection, self).__init__("SequenceCollection", chain.from_iterable(
[db_sequences, peptides, peptide_evidence]))
# --------------------------------------------------
# Software Execution Protocol Information
class Enzyme(ComponentBase):
def __init__(self, name, missed_cleavages=1, id=None, semi_specific=False, site_regexp=None, context=NullMap):
self.name = name
if site_regexp is None:
term = context.term(name)
try:
regex_ref = term['has_regexp']
regex_ent = context.term(regex_ref)
regex = regex_ent['name']
site_regexp = regex
except:
pass
self.site_regexp = site_regexp
self.element = _element(
"Enzyme", semiSpecific=semi_specific, missedCleavages=missed_cleavages,
id=id)
context["Enzyme"][id] = self.element.id
self.context = context
def write(self, xml_file):
with self.element.element(xml_file, with_id=True):
if self.site_regexp is not None:
regex = _element("SiteRegexp").element()
regex.text = etree.CDATA(self.site_regexp)
xml_file.write(regex)
with element(xml_file, "EnzymeName"):
self.context.param(self.name)(xml_file)
class _Tolerance(ComponentBase):
def __init__(self, low, high=None, unit="parts per million", context=NullMap):
if isinstance(low, NumberBase):
low = CVParam(
accession="MS:1001413", ref="PSI-MS", unitCvRef="UO", unitName=unit,
unitAccession=common_units[unit], value=low,
name="search tolerance minus value")
if high is None:
high = CVParam(
accession="MS:1001412", ref="PSI-MS", unitCvRef="UO", unitName=unit,
unitAccession=common_units[unit], value=low.value,
name="search tolerance plus value")
elif isinstance(high, NumberBase):
high = CVParam(
accession="MS:1001412", ref="PSI-MS", unitCvRef="UO", unitName=unit,
unitAccession=common_units[unit], value=high,
name="search tolerance plus value")
self.low = low
self.high = high
def write(self, xml_file):
with element(xml_file, self.tag_name):
self.low.write(xml_file)
self.high.write(xml_file)
class FragmentTolerance(_Tolerance):
tag_name = "FragmentTolerance"
class ParentTolerance(_Tolerance):
tag_name = "ParentTolerance"
class Threshold(ComponentBase):
no_threshold = CVParam(accession="MS:1001494", ref="PSI-MS", name="no threshold")
def __init__(self, name=None, context=NullMap):
if name is None:
name = self.no_threshold
self.name = name
self.context = context
def write(self, xml_file):
with element(xml_file, "Threshold"):
self.context.param(self.name)(xml_file)
class SpectrumIdentificationProtocol(ComponentBase):
def __init__(self, search_type, analysis_software_id=1, id=1, additional_search_params=tuple(),
modification_params=tuple(), enzymes=tuple(), fragment_tolerance=None, parent_tolerance=None,
threshold=None, context=NullMap):
if threshold is None:
threshold = Threshold(context=context)
self.parent_tolerance = parent_tolerance
self.fragment_tolerance = fragment_tolerance
self.threshold = threshold
self.enzymes = enzymes
self.modification_params = modification_params
self.additional_search_params = additional_search_params
self.search_type = search_type
self.element = _element(
"SpectrumIdentificationProtocol", id=id,
analysisSoftware_ref=context['AnalysisSoftware'][analysis_software_id])
context["SpectrumIdentificationProtocol"][id] = self.element.id
self.context = context
def write(self, xml_file):
with self.element(xml_file, with_id=True):
with element(xml_file, "SearchType"):
self.context.param(self.search_type)(xml_file)
with element(xml_file, "AdditionalSearchParams"):
for search_param in self.additional_search_params:
self.contex.param(search_param)(xml_file)
with element(xml_file, "ModificationParams"):
for mod in self.modification_params:
mod.write(xml_file)
with element(xml_file, "Enzymes"):
for enzyme in self.enzymes:
enzyme.write(xml_file)
if self.fragment_tolerance is not None:
self.fragment_tolerance.write(xml_file)
if self.parent_tolerance is not None:
self.parent_tolerance.write(xml_file)
self.threshold.write(xml_file)
class ProteinDetectionProtocol(ComponentBase):
def __init__(self, id=1, analysis_software_id=1, threshold=None, context=NullMap):
if threshold is None:
threshold = Threshold(context=context)
self.analysis_software_id = analysis_software_id
self.element = _element(
"ProteinDetectionProtocol", id=id,
analysisSoftware_ref=context["AnalysisSoftware"][analysis_software_id])
context["ProteinDetectionProtocol"][id] = self.element.id
def write(self, xml_file):
with self.element.element(xml_file, with_id=True):
self.threshold.write(xml_file)
class AnalysisProtocolCollection(GenericCollection):
def __init__(self, spectrum_identification_protocols=tuple(),
protein_detection_protocols=tuple(), context=NullMap):
items = list()
items.extend(spectrum_identification_protocols)
items.extend(protein_detection_protocols)
super(AnalysisProtocolCollection, self).__init__(self, items, context)
# --------------------------------------------------
# Analysis Collection - Data-to-Analysis
class SpectrumIdentification(ComponentBase):
def __init__(self, spectra_data_ids_used=None, search_database_ids_used=None, spectrum_identification_list_id=1,
spectrum_identification_protocol_id=1, id=1, context=NullMap):
self.spectra_data_ids_used = [context["SpectraData"][x] for x in (spectra_data_ids_used or [])]
self.search_database_ids_used = [context["SpectraData"][x] for x in (search_database_ids_used or [])]
self.element = _element(
"SpectrumIdentification", id=id,
spectrumIdentificationList_ref=context["SpectrumIdentificationList"][
spectrum_identification_list_id],
spectrumIdentificationProtocol_ref=context["SpectrumIdentificationProtocol"][
spectrum_identification_protocol_id])
context["SpectrumIdentification"] = self.element.id
def write(self, xml_file):
with self.element(xml_file, with_id=True):
for spectra_data_id in self.spectra_data_ids_used:
_element("InputSpectra", spectraData_ref=spectra_data_id).write(xml_file)
for spearch_database_id in self.search_database_ids_used:
_element("SearchDatabaseRef", searchDatabase_ref=spectra_data_id).write(xml_file)
# --------------------------------------------------
# Misc. Providence Management
DEFAULT_CONTACT_ID = "PERSON_DOC_OWNER"
DEFAULT_ORGANIZATION_ID = "ORG_DOC_OWNER"
class CVList(ComponentBase):
def __init__(self, cv_list=None, context=NullMap):
if cv_list is None:
cv_list = default_cv_list
self.cv_list = cv_list
def write(self, xml_file):
with element(xml_file, 'cvList'):
for member in self.cv_list:
xml_file.write(member.element(with_id=True))
class AnalysisSoftware(ComponentBase):
def __init__(self, name, id=1, version=None, uri=None, contact=DEFAULT_CONTACT_ID, context=NullMap, **kwargs):
self.name = name
self.version = version
self.uri = uri
self.contact = contact
self.kwargs = kwargs
self.element = _element("AnalysisSoftware", id=id, name=self.name, version=self.version, uri=self.uri)
context["AnalysisSoftware"][id] = self.element.id
def write(self, xml_file):
with self.element(xml_file, with_id=True):
with element(xml_file, "ContactRole", contact_ref=self.contact):
with element(xml_file, "Role"):
xml_file.write(CVParam(accession="MS:1001267", name="software vendor", cvRef="PSI-MS").element())
class Provider(ComponentBase):
def __init__(self, id="PROVIDER", contact=DEFAULT_CONTACT_ID, context=NullMap):
self.id = id
self.contact = contact
def write(self, xml_file):
with element(xml_file, "Provider", id=self.id, xmlns="http://psidev.info/psi/pi/mzIdentML/1.1"):
with element(xml_file, "ContactRole", contact_ref=self.contact):
with element(xml_file, "Role"):
xml_file.write(CVParam(accession="MS:1001271", name="researcher", cvRef="PSI-MS").element())
class Person(ComponentBase):
def __init__(self, first_name='first_name', last_name='last_name', id=DEFAULT_CONTACT_ID,
affiliation=DEFAULT_ORGANIZATION_ID, context=NullMap):
self.first_name = first_name
self.last_name = last_name
self.id = id
self.affiliation = affiliation
self.element = _element("Person", firstName=first_name, last_name=last_name, id=id)
context["Person"][id] = self.element.id
def write(self, xml_file):
with self.element.element(xml_file, with_id=True):
element(xml_file, 'Affiliation', organization_ref=self.affiliation)
class Organization(ComponentBase):
def __init__(self, name="name", id=DEFAULT_ORGANIZATION_ID, context=NullMap):
self.name = name
self.id = id
self.element = _element("Organization", name=name, id=id)
context["Organization"][id] = self.id
def write(self, xml_file):
xml_file.write(self.element.element())
DEFAULT_PERSON = Person()
DEFAULT_ORGANIZATION = Organization()
class AuditCollection(ComponentBase):
def __init__(self, persons=None, organizations=None, context=NullMap):
if persons is None:
persons = (DEFAULT_PERSON,)
if organizations is None:
organizations = (DEFAULT_ORGANIZATION,)
self.persons = persons
self.organizations = organizations
def write(self, xml_file):
with element(xml_file, "AuditCollection", xmlns="http://psidev.info/psi/pi/mzIdentML/1.1"):
for person in self.persons:
person.write(xml_file)
for organization in self.organizations:
organization.write(xml_file)
|
mobiusklein/mzidentml_writer
|
mzident_writer/components.py
|
Python
|
apache-2.0
| 32,651
|
[
"Dalton"
] |
eeb62b8e83cb58a49e8db97bea9a76ff6c9d026c3bf83e63e4334767496d26f0
|
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel:
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import array2d, atleast2d_or_csr
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils import safe_asarray
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
Returns
-------
safe_X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = Y = atleast2d_or_csr(X)
else:
X = atleast2d_or_csr(X)
Y = atleast2d_or_csr(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
if not (X.dtype == Y.dtype == np.float32):
if Y is X:
X = Y = safe_asarray(X, dtype=np.float)
else:
X = safe_asarray(X, dtype=np.float)
Y = safe_asarray(Y, dtype=np.float)
return X, Y
# Distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_1, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_2, n_features]
Y_norm_squared : array-like, shape = [n_samples_2], optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape = [n_samples_1, n_samples_2]
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = array2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x] = np.where(
flags, min_indices + chunk_y.start, indices[chunk_x])
values[chunk_x] = np.where(
flags, min_values, values[chunk_x])
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X, Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
size_threshold : int, default=5e8
Avoid creating temporary matrices bigger than size_threshold (in
bytes). If the problem size gets too big, the implementation then
breaks it down in smaller problems.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise l1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
if issparse(X) or issparse(Y):
raise ValueError("manhattan_distance does not support sparse"
" matrices.")
X, Y = check_pairwise_arrays(X, Y)
temporary_size = X.size * Y.shape[-1]
# Convert to bytes
temporary_size *= X.itemsize
if temporary_size > size_threshold and sum_over_features:
# Broadcasting the full thing would be too big: it's on the order
# of magnitude of the gigabyte
D = np.empty((X.shape[0], Y.shape[0]), dtype=X.dtype)
index = 0
increment = 1 + int(size_threshold / float(temporary_size) *
X.shape[0])
while index < X.shape[0]:
this_slice = slice(index, index + increment)
tmp = X[this_slice, np.newaxis, :] - Y[np.newaxis, :, :]
tmp = np.abs(tmp, tmp)
tmp = np.sum(tmp, axis=2)
D[this_slice] = tmp
index += increment
else:
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((-1, X.shape[1]))
return D
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Please note that support for sparse matrices is currently limited to
'euclidean', 'l2' and 'cosine'.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
# FIXME: np.zeros can be replaced by np.empty
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise ValueError("Unknown kernel %r" % metric)
|
johnowhitaker/bobibabber
|
sklearn/metrics/pairwise.py
|
Python
|
mit
| 37,117
|
[
"Gaussian"
] |
dee422c9b516b4733ea08b1dabc447936bdd980c7f4a404e6b951cc03d63cc27
|
"""
This is the courseware context_processor module.
This is meant to simplify the process of sending user preferences (espec. time_zone and pref-lang)
to the templates without having to append every view file.
"""
import string
from django.utils.translation import get_language
from pytz import timezone
from pytz.exceptions import UnknownTimeZoneError
from edx_django_utils.cache import TieredCache
from lms.djangoapps.courseware.models import LastSeenCoursewareTimezone
from openedx.core.djangoapps.site_configuration.helpers import get_value
from openedx.core.djangoapps.user_api.errors import UserAPIInternalError, UserNotFound
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference, get_user_preferences
from openedx.core.lib.cache_utils import get_cache
RETRIEVABLE_PREFERENCES = {
'user_timezone': 'time_zone',
'user_language': 'pref-lang'
}
CACHE_NAME = "context_processor.user_timezone_preferences"
def user_timezone_locale_prefs(request):
"""
Checks if request has an authenticated user.
If so, sends set (or none if unset) time_zone and language prefs.
If site-wide language is set, that language is used over the language set
in user preferences.
This interacts with the DateUtils to either display preferred or attempt to determine
system/browser set time_zones and languages
"""
cached_value = get_cache(CACHE_NAME)
if not cached_value:
user_prefs = {
'user_timezone': None,
'user_language': get_language(),
}
if hasattr(request, 'user') and request.user.is_authenticated:
try:
user_preferences = get_user_preferences(request.user)
except (UserNotFound, UserAPIInternalError):
cached_value.update(user_prefs)
else:
user_prefs = {
key: user_preferences.get(pref_name, None)
for key, pref_name in RETRIEVABLE_PREFERENCES.items()
}
site_wide_language = get_value('LANGUAGE_CODE', None)
if site_wide_language:
user_prefs['user_language'] = site_wide_language
cached_value.update(user_prefs)
return cached_value
def get_last_seen_courseware_timezone(user):
"""
The above method is for the timezone that is set on the user's account.
That timezone is often not set, so this field retrieves the browser timezone
from a recent courseware visit (updated daily)
"""
cache_key = 'browser_timezone_{}'.format(str(user.id))
cached_value = TieredCache.get_cached_response(cache_key)
if not cached_value.is_found:
try:
LastSeenCoursewareTimezone.objects.get(user=user)
except LastSeenCoursewareTimezone.DoesNotExist:
return None
else:
return cached_value.value
def get_user_timezone_or_last_seen_timezone_or_utc(user):
"""
Helper method for returning a reasonable timezone for a user.
This method returns the timezone in the user's account if that is set.
If that is not set, it returns a recent timezone that we have recorded from a user's visit to the courseware.
If that is not set or the timezone is unknown, it returns UTC.
"""
user_timezone = (
get_user_preference(user, 'time_zone') or
get_last_seen_courseware_timezone(user) or
'UTC'
)
# We have seen non-printable characters (i.e. \x00) showing up in the
# user_timezone (I believe via the get_last_seen_courseware_timezone method).
# This sanitizes the user_timezone before passing it in.
user_timezone = filter(lambda l: l in string.printable, user_timezone)
user_timezone = ''.join(user_timezone)
try:
return timezone(user_timezone)
except UnknownTimeZoneError as err:
return timezone('UTC')
|
eduNEXT/edx-platform
|
lms/djangoapps/courseware/context_processor.py
|
Python
|
agpl-3.0
| 3,856
|
[
"VisIt"
] |
4b67a9bcfbcba485c14b774374739cbad3e89a839600dbb4bdb8e872a8d4f791
|
"""
Pyinvoke tasks.py file for automating releases and admin stuff.
Author: Shyue Ping Ong
"""
import datetime
import glob
import json
import os
import re
import subprocess
import webbrowser
import requests
from invoke import task
from monty.os import cd
from pymatgen.core import __version__ as CURRENT_VER
@task
def make_doc(ctx):
"""
Generate API documentation + run Sphinx.
:param ctx:
"""
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split(r"\-{3,}", contents)
n = len(toks[0].split()[-1])
changes = [toks[0]]
changes.append("\n" + "\n".join(toks[1].strip().split("\n")[0:-1]))
changes = ("-" * n).join(changes)
with open("docs_rst/latest_changes.rst", "w") as f:
f.write(changes)
with cd("docs_rst"):
ctx.run("cp ../CHANGES.rst change_log.rst")
ctx.run("rm pymatgen.*.rst", warn=True)
ctx.run("sphinx-apidoc --implicit-namespaces --separate -d 7 -o . -f ../pymatgen")
ctx.run("rm *.tests.*rst")
for f in glob.glob("*.rst"):
if f.startswith("pymatgen") and f.endswith("rst"):
newoutput = []
suboutput = []
subpackage = False
with open(f, "r") as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("pymatgen") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, "w") as fid:
fid.write("".join(newoutput))
ctx.run("make html")
ctx.run("cp _static/* ../docs/html/_static", warn=True)
with cd("docs"):
ctx.run("rm *.html", warn=True)
ctx.run("cp -r html/* .", warn=True)
ctx.run("rm -r html", warn=True)
ctx.run("rm -r doctrees", warn=True)
ctx.run("rm -r _sources", warn=True)
ctx.run("rm -r _build", warn=True)
# This makes sure pymatgen.org works to redirect to the Github page
ctx.run('echo "pymatgen.org" > CNAME')
# Avoid the use of jekyll so that _dir works as intended.
ctx.run("touch .nojekyll")
@task
def make_dash(ctx):
"""
Make customized doc version for Dash
:param ctx:
"""
ctx.run("cp docs_rst/conf-docset.py docs_rst/conf.py")
make_doc(ctx)
ctx.run("rm docs/_static/pymatgen.docset.tgz", warn=True)
ctx.run("doc2dash docs -n pymatgen -i docs/_images/pymatgen.png -u https://pymatgen.org/")
plist = "pymatgen.docset/Contents/Info.plist"
xml = []
with open(plist, "rt") as f:
for l in f:
xml.append(l.strip())
if l.strip() == "<dict>":
xml.append("<key>dashIndexFilePath</key>")
xml.append("<string>index.html</string>")
with open(plist, "wt") as f:
f.write("\n".join(xml))
ctx.run('tar --exclude=".DS_Store" -cvzf pymatgen.tgz pymatgen.docset')
# xml = []
# with open("docs/pymatgen.xml") as f:
# for l in f:
# l = l.strip()
# if l.startswith("<version>"):
# xml.append("<version>%s</version>" % version)
# else:
# xml.append(l)
# with open("docs/pymatgen.xml", "wt") as f:
# f.write("\n".join(xml))
ctx.run("rm -r pymatgen.docset")
ctx.run("cp docs_rst/conf-normal.py docs_rst/conf.py")
@task
def contribute_dash(ctx, version):
make_dash(ctx)
ctx.run("cp pymatgen.tgz ../Dash-User-Contributions/docsets/pymatgen/pymatgen.tgz")
with cd("../Dash-User-Contributions/docsets/pymatgen"):
with open("docset.json", "rt") as f:
data = json.load(f)
data["version"] = version
with open("docset.json", "wt") as f:
json.dump(data, f, indent=4)
ctx.run('git commit --no-verify -a -m "Update to v%s"' % version)
ctx.run("git push")
ctx.run("rm pymatgen.tgz")
@task
def submit_dash_pr(ctx, version):
with cd("../Dash-User-Contributions/docsets/pymatgen"):
payload = {
"title": "Update pymatgen docset to v%s" % version,
"body": "Update pymatgen docset to v%s" % version,
"head": "Dash-User-Contributions:master",
"base": "master",
}
response = requests.post(
"https://api.github.com/repos/materialsvirtuallab/Dash-User-Contributions/pulls", data=json.dumps(payload)
)
print(response.text)
@task
def update_doc(ctx):
"""
Update the web documentation.
:param ctx:
"""
ctx.run("cp docs_rst/conf-normal.py docs_rst/conf.py")
make_doc(ctx)
ctx.run("git add .")
ctx.run('git commit -a -m "Update docs"')
ctx.run("git push")
@task
def publish(ctx):
"""
Upload release to Pypi using twine.
:param ctx:
"""
ctx.run("rm dist/*.*", warn=True)
ctx.run("python setup.py sdist bdist_wheel")
ctx.run("twine upload dist/*")
@task
def set_ver(ctx, version):
with open("pymatgen/core/__init__.py", "rt") as f:
contents = f.read()
contents = re.sub(r"__version__ = .*\n", '__version__ = "%s"\n' % version, contents)
with open("pymatgen/core/__init__.py", "wt") as f:
f.write(contents)
with open("setup.py", "rt") as f:
contents = f.read()
contents = re.sub(r"version=([^,]+),", 'version="%s",' % version, contents)
with open("setup.py", "wt") as f:
f.write(contents)
@task
def release_github(ctx, version):
"""
Release to Github using Github API.
:param ctx:
"""
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split(r"\-+", contents)
desc = toks[1].strip()
toks = desc.split("\n")
desc = "\n".join(toks[:-1]).strip()
payload = {
"tag_name": "v" + version,
"target_commitish": "master",
"name": "v" + version,
"body": desc,
"draft": False,
"prerelease": False,
}
response = requests.post(
"https://api.github.com/repos/materialsproject/pymatgen/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]},
)
print(response.text)
@task
def post_discourse(ctx, version):
"""
Post release announcement to http://discuss.matsci.org/c/pymatgen.
:param ctx:
"""
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split(r"\-+", contents)
desc = toks[1].strip()
toks = desc.split("\n")
desc = "\n".join(toks[:-1]).strip()
raw = "v" + version + "\n\n" + desc
payload = {
"topic_id": 36,
"raw": raw,
}
response = requests.post(
"https://discuss.matsci.org/c/pymatgen/posts.json",
data=payload,
params={"api_username": os.environ["DISCOURSE_API_USERNAME"], "api_key": os.environ["DISCOURSE_API_KEY"]},
)
print(response.text)
@task
def update_changelog(ctx, version, sim=False):
"""
Create a preliminary change log using the git logs.
:param ctx:
"""
output = subprocess.check_output(["git", "log", "--pretty=format:%s", "v%s..HEAD" % CURRENT_VER])
lines = []
misc = []
for l in output.decode("utf-8").strip().split("\n"):
m = re.match(r"Merge pull request \#(\d+) from (.*)", l)
if m:
pr_number = m.group(1)
contrib, pr_name = m.group(2).split("/", 1)
response = requests.get(
f"https://api.github.com/repos/materialsproject/pymatgen/pulls/{pr_number}"
)
lines.append(f"* PR #{pr_number} from @{contrib} {pr_name}")
for ll in response.json()["body"].split("\n"):
ll = ll.strip()
if ll in ["", "## Summary"]:
continue
elif ll.startswith("## Checklist") or ll.startswith("## TODO"):
break
lines.append(f" {ll}")
misc.append(l)
with open("CHANGES.rst") as f:
contents = f.read()
l = "=========="
toks = contents.split(l)
head = "\n\nv%s\n" % version + "-" * (len(version) + 1) + "\n"
toks.insert(-1, head + "\n".join(lines))
if not sim:
with open("CHANGES.rst", "w") as f:
f.write(toks[0] + l + "".join(toks[1:]))
ctx.run("open CHANGES.rst")
else:
print(toks[0] + l + "".join(toks[1:]))
print("The following commit messages were not included...")
print("\n".join(misc))
@task
def release(ctx, version, nodoc=False):
"""
Run full sequence for releasing pymatgen.
:param ctx:
:param nodoc: Whether to skip doc generation.
"""
ctx.run("rm -r dist build pymatgen.egg-info", warn=True)
set_ver(ctx, version)
if not nodoc:
make_doc(ctx)
ctx.run("git add .")
ctx.run('git commit -a -m "Update docs"')
ctx.run("git push")
release_github(ctx, version)
# post_discourse(ctx, warn=True)
@task
def open_doc(ctx):
"""
Open local documentation in web browser.
:param ctx:
"""
pth = os.path.abspath("docs/_build/html/index.html")
webbrowser.open("file://" + pth)
@task
def lint(ctx):
for cmd in ["pycodestyle", "mypy", "flake8", "pydocstyle"]:
ctx.run("%s pymatgen" % cmd)
|
gmatteo/pymatgen
|
tasks.py
|
Python
|
mit
| 9,817
|
[
"pymatgen"
] |
49ad6a3caa085f9da5722886c232e2eb754ad5bee2f0382a36959e199f6711c3
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Common functions for coordinate reading --- :mod:`MDAnalysis.coordinates.core`
==============================================================================
Important base classes are collected in :mod:`MDAnalysis.coordinates.base`.
.. autofunction:: reader
.. autofunction:: writer
Helper functions:
.. autofunction:: get_reader_for
.. autofunction:: get_writer_for
"""
from __future__ import absolute_import
import six
from ..lib import util
from ..lib.mdamath import triclinic_box, triclinic_vectors, box_volume
from ..core._get_readers import get_reader_for, get_writer_for
def reader(filename, **kwargs):
"""Provide a trajectory reader instance for *filename*.
This function guesses the file format from the extension of *filename* and
it will throw a :exc:`TypeError` if the extension is not recognized.
In most cases, no special keyword arguments are necessary.
All other keywords are passed on to the underlying Reader classes; see
their documentation for details.
Parameters
----------
filename : str or tuple
filename (or tuple of filenames) of the input coordinate file
kwargs
Keyword arguments for the selected Reader class.
Returns
-------
A Reader object
.. SeeAlso:: For trajectory formats: :class:`~DCD.DCDReader`,
:class:`~XTC.XTCReader`, :class:`~TRR.TRRReader`,
:class:`~XYZ.XYZReader`. For single frame formats:
:class:`~CRD.CRDReader`, and
:class:`~PDB.PDBReader`, :class:`~GRO.GROReader`,
.. deprecated:: 0.15.0
The "permissive" flag is not used anymore (and effectively
defaults to True); it will be completely removed in 0.16.0.
"""
if isinstance(filename, tuple):
Reader = get_reader_for(filename[0],
format=filename[1])
return Reader(filename[0], **kwargs)
else:
Reader = get_reader_for(filename)
return Reader(filename, **kwargs)
def writer(filename, n_atoms=None, **kwargs):
"""Initialize a trajectory writer instance for *filename*.
Parameters
----------
filename : str
Output filename of the trajectory; the extension determines the
format.
n_atoms : int, optional
The number of atoms in the output trajectory; can be ommitted
for single-frame writers.
multiframe : bool, optional
``True``: write a trajectory with multiple frames; ``False``
only write a single frame snapshot; ``None`` first try to get
a multiframe writer and then fall back to single frame [``None``]
kwargs : optional
Keyword arguments for the writer; all trajectory Writers accept
at least
*start*
starting time [0]
*step*
step size in frames [1]
*dt*
length of time between two frames, in ps [1.0]
Some readers accept additional arguments, which need to be looked
up in the documentation of the reader.
Returns
-------
A Writer object
See Also
--------
MDAnalysis.coordinates.DCD.DCDWriter : DCD trajectories
MDAnalysis.coordinates.XTC.XTCWriter : Gromacs XTC trajectories
MDAnalysis.coordinates.TRR.TRRWriter : Gromacs TRR trajectories
.. versionchanged:: 0.7.6
Added *multiframe* keyword. See also :func:`get_writer_for`.
"""
Writer = get_writer_for(filename, format=kwargs.pop('format', None),
multiframe=kwargs.pop('multiframe', None))
return Writer(filename, n_atoms=n_atoms, **kwargs)
|
kain88-de/mdanalysis
|
package/MDAnalysis/coordinates/core.py
|
Python
|
gpl-2.0
| 4,610
|
[
"Gromacs",
"MDAnalysis"
] |
48842b2af38246a8a02e71d3d1e8e785fa11eb1aa5bff062a51650b9b077a3e6
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Licence: BSD 3 clause
from __future__ import division
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from warnings import warn
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import array2d, check_random_state
from ..utils.fixes import unique
from ..utils.validation import check_arrays
from ._tree import Criterion, Splitter, Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE}
SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.random_state = random_state
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_mask=None, X_argsorted=None, check_input=True,
sample_weight=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. Use ``dtype=np.float32`` for maximum
efficiency.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Use ``dtype=np.float64`` and ``order='C'`` for maximum
efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Deprecations
if sample_mask is not None:
warn("The sample_mask parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if X_argsorted is not None:
warn("The X_argsorted parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
# Convert data
if check_input:
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense",
check_ccontiguous=True)
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = (2 ** 31) - 1 if self.max_depth is None else self.max_depth
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features_)
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_,
self.n_outputs_, splitter, max_depth,
min_samples_split, self.min_samples_leaf,
random_state)
self.tree_.build(X, y, sample_weight=sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`classes_` : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
`n_classes_` : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
`feature_importances_` : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None):
super(DecisionTreeClassifier, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in xrange(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`feature_importances_` : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None):
super(DecisionTreeRegressor, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None):
super(ExtraTreeClassifier, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None):
super(ExtraTreeRegressor, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
|
Eric89GXL/scikit-learn
|
sklearn/tree/tree.py
|
Python
|
bsd-3-clause
| 29,327
|
[
"Brian"
] |
b2cad71d982cbd929382a45eddc4dca3ba435ae2fe53ca039cf9f6d406a741d3
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 11:02:59 2013
Author: Josef Perktold
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.nonparametric.api as npar
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.distributions.mixture_rvs import mixture_rvs
# example from test_kde.py mixture of two normal distributions
np.random.seed(12345)
x = mixture_rvs([.25,.75], size=200, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1, scale=.5),dict(loc=1, scale=.5)))
x.sort() # not needed
kde = npar.KDEUnivariate(x)
kde.fit('gau')
ci = kde.kernel.density_confint(kde.density, len(x))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.hist(x, bins=15, density=True, alpha=0.25)
ax.plot(kde.support, kde.density, lw=2, color='red')
ax.fill_between(kde.support, ci[:,0], ci[:,1],
color='grey', alpha='0.7')
ax.set_title('Kernel Density Gaussian (bw = %4.2f)' % kde.bw)
# use all kernels directly
x_grid = np.linspace(np.min(x), np.max(x), 51)
x_grid = np.linspace(-3, 3, 51)
kernel_names = ['Biweight', 'Cosine', 'Epanechnikov', 'Gaussian',
'Triangular', 'Triweight', #'Uniform',
]
fig = plt.figure()
for ii, kn in enumerate(kernel_names):
ax = fig.add_subplot(2, 3, ii+1) # without uniform
ax.hist(x, bins=10, density=True, alpha=0.25)
#reduce bandwidth for Gaussian and Uniform which are to large in example
if kn in ['Gaussian', 'Uniform']:
args = (0.5,)
else:
args = ()
kernel = getattr(kernels, kn)(*args)
kde_grid = [kernel.density(x, xi) for xi in x_grid]
confint_grid = kernel.density_confint(kde_grid, len(x))
ax.plot(x_grid, kde_grid, lw=2, color='red', label=kn)
ax.fill_between(x_grid, confint_grid[:,0], confint_grid[:,1],
color='grey', alpha='0.7')
ax.legend(loc='upper left')
plt.show()
|
bashtage/statsmodels
|
statsmodels/examples/ex_kde_confint.py
|
Python
|
bsd-3-clause
| 1,941
|
[
"Gaussian"
] |
77de82f590cbb90c07de86de3f088d5149abb9d6d5cc49f368520f96abf82a36
|
# electronics.py ---
#
# Filename: electronics.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed Feb 22 00:53:38 2012 (+0530)
# Version:
# Last-Updated: Tue Jul 10 10:28:40 2012 (+0530)
# By: subha
# Update #: 221
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
# 2012-02-22 23:22:30 (+0530) Subha - the circuitry put in a class.
#
# Code:
import numpy
import moose
class ClampCircuit(moose.Neutral):
"""Container for a Voltage-Clamp/Current clamp circuit."""
defaults = {
'level1': 25.0,
'width1': 50.0,
'delay1': 2.0,
'delay2': 1e6,
'trigMode': 0,
'delay3': 1e9
}
def __init__(self, path, compartment):
moose.Neutral.__init__(self, path)
self.pulsegen = moose.PulseGen(path+"/pulse") # holding voltage/current generator
self.pulsegen.count = 2
self.pulsegen.firstLevel = 25.0
self.pulsegen.firstWidth = 50.0
self.pulsegen.firstDelay = 2.0
self.pulsegen.secondDelay = 0.0
self.pulsegen.trigMode = 2
self.gate = moose.PulseGen(path+"/gate") # holding voltage/current generator
self.gate.level[0] = 1.0
self.gate.delay[0] = 0.0
self.gate.width[0] = 1e9
moose.connect(self.gate, 'output', self.pulsegen, 'input')
self.lowpass = moose.RC(path+"/lowpass") # lowpass filter
self.lowpass.R = 1.0
self.lowpass.C = 0.03
self.vclamp = moose.DiffAmp(path+"/vclamp")
self.vclamp.gain = 0.0
self.vclamp.saturation = 1e10
self.iclamp = moose.DiffAmp(path+"/iclamp")
self.iclamp.gain = 0.0
self.iclamp.saturation = 1e10
self.pid = moose.PIDController(path+"/pid")
self.pid.gain = 0.5
self.pid.tauI = 0.02
self.pid.tauD = 0.005
self.pid.saturation = 1e10
# Connect current clamp circuitry
moose.connect(self.pulsegen, "output", self.iclamp, "plusIn")
moose.connect(self.iclamp, "output", compartment, "injectMsg")
# Connect voltage clamp circuitry
moose.connect(self.pulsegen, "output", self.lowpass, "injectIn")
moose.connect(self.lowpass, "output", self.vclamp, "plusIn")
moose.connect(self.vclamp, "output", self.pid, "commandIn")
moose.connect(compartment, "VmOut", self.pid, "sensedIn")
moose.connect(self.pid, "output", compartment, "injectMsg")
current_table = moose.Table("/data/Im")
moose.connect(current_table, "requestOut", compartment, "getIm")
def configure_pulses(self, baseLevel=0.0, firstLevel=0.1, firstDelay=5.0, firstWidth=40.0, secondLevel=0.0, secondDelay=1e6, secondWidth=0.0, singlePulse=True):
"""Set up the pulse generator."""
self.pulsegen.baseLevel = baseLevel
self.pulsegen.firstLevel = firstLevel
self.pulsegen.firstWidth = firstWidth
self.pulsegen.firstDelay = firstDelay
self.pulsegen.secondLevel = secondLevel
self.pulsegen.secondDelay = secondDelay
self.pulsegen.secondWidth = secondWidth
if singlePulse:
self.pulsegen.trigMode = 1
else:
self.pulsegen.trigMode = 0
def do_voltage_clamp(self):
"""Switch to voltage clamp circuitry. After this the simdt may
need to be changed for correct performance."""
self.vclamp.gain = 1.0
self.iclamp.gain = 0.0
self.pid.gain = 0.5
self.pid.tauD = 0.005
self.pid.tauI = 0.02
def do_current_clamp(self):
"""Switch to current clamp circuitry. After this the simdt may
need to be changed for correct performance."""
self.iclamp.gain = 1.0
self.vclamp.gain = 0.0
self.pid.gain = 0.0
#
# electronics.py ends here
|
BhallaLab/moose
|
moose-examples/squid/electronics.py
|
Python
|
gpl-3.0
| 3,885
|
[
"MOOSE"
] |
51af2b4cec81cf71251f59205b0c1ef005bf5e6ae6f3af8c44053f5de05026a3
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for Student's Profile Page.
"""
from contextlib import contextmanager
from datetime import datetime
from bok_choy.web_app_test import WebAppTest
from nose.plugins.attrib import attr
from ...pages.common.logout import LogoutPage
from ...pages.lms.account_settings import AccountSettingsPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.learner_profile import LearnerProfilePage
from ...pages.lms.dashboard import DashboardPage
from ..helpers import EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = u'all_users'
PRIVACY_PRIVATE = u'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed"
def log_in_as_unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
profile_page.value_for_dropdown_field('language_proficiencies', 'English')
profile_page.value_for_dropdown_field('country', 'United Arab Emirates')
profile_page.set_value_for_textarea_field('bio', 'Nothing Special')
def visit_profile_page(self, username, privacy=None):
"""
Visit a user's profile page and if a privacy is specified and
is different from the displayed value, then set the privacy to that value.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
# Change the privacy setting if it is not the desired one already
profile_page.privacy = privacy
# Verify the current setting is as expected
if privacy == self.PRIVACY_PUBLIC:
self.assertEqual(profile_page.privacy, 'all_users')
else:
self.assertEqual(profile_page.privacy, 'private')
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.start_time = datetime.now() # pylint: disable=attribute-defined-outside-init
# Load the page
profile_page.visit()
return profile_page
def set_birth_year(self, birth_year):
"""
Set birth year for the current user to the specified value.
"""
account_settings_page = AccountSettingsPage(self.browser)
account_settings_page.visit()
account_settings_page.wait_for_page()
self.assertEqual(
account_settings_page.value_for_dropdown_field('year_of_birth', str(birth_year)),
str(birth_year)
)
def verify_profile_page_is_public(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently public.
"""
self.assertEqual(profile_page.visible_fields, self.PUBLIC_PROFILE_FIELDS)
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.editable_fields, self.PUBLIC_PROFILE_EDITABLE_FIELDS)
else:
self.assertEqual(profile_page.editable_fields, [])
def verify_profile_page_is_private(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently private.
"""
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.visible_fields, self.PRIVATE_PROFILE_FIELDS)
def verify_profile_page_view_event(self, requesting_username, profile_user_id, visibility=None):
"""
Verifies that the correct view event was captured for the profile page.
"""
actual_events = self.wait_for_events(
start_time=self.start_time,
event_filter={'event_type': 'edx.user.settings.viewed', 'username': requesting_username},
number_of_matches=1)
self.assert_events_match(
[
{
'username': requesting_username,
'event': {
'user_id': int(profile_user_id),
'page': 'profile',
'visibility': unicode(visibility)
}
}
],
actual_events
)
@contextmanager
def verify_pref_change_event_during(self, username, user_id, setting, **kwargs):
"""Assert that a single setting changed event is emitted for the user_api_userpreference table."""
expected_event = {
'username': username,
'event': {
'setting': setting,
'user_id': int(user_id),
'table': 'user_api_userpreference',
'truncated': []
}
}
expected_event['event'].update(kwargs)
event_filter = {
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'username': username,
}
with self.assert_events_match_during(event_filter=event_filter, expected_events=[expected_event]):
yield
def initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
@attr('shard_4')
class OwnLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest):
"""
Tests that verify a student's own profile page.
"""
def verify_profile_forced_private_message(self, username, birth_year, message=None):
"""
Verify age limit messages for a user.
"""
if birth_year is None:
birth_year = ""
self.set_birth_year(birth_year=birth_year)
profile_page = self.visit_profile_page(username)
self.assertTrue(profile_page.privacy_field_visible)
if message:
self.assertTrue(profile_page.age_limit_message_present)
else:
self.assertFalse(profile_page.age_limit_message_present)
self.assertIn(message, profile_page.profile_forced_private_message)
def test_profile_defaults_to_public(self):
"""
Scenario: Verify that a new user's profile defaults to public.
Given that I am a new user.
When I go to my profile page.
Then I see that the profile visibility is set to public.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
self.verify_profile_page_is_public(profile_page)
def assert_default_image_has_public_access(self, profile_page):
"""
Assert that profile image has public access.
"""
self.assertTrue(profile_page.profile_has_default_image)
self.assertTrue(profile_page.profile_has_image_with_public_access())
def test_make_profile_public(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my private profile page
And I set the profile visibility to public
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as public
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=self.PRIVACY_PRIVATE, new=self.PRIVACY_PUBLIC
):
profile_page.privacy = self.PRIVACY_PUBLIC
# Reload the page and verify that the profile is now public
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_public(profile_page)
def test_make_profile_private(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my public profile page
And I set the profile visibility to private
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as private
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=None, new=self.PRIVACY_PRIVATE
):
profile_page.privacy = self.PRIVACY_PRIVATE
# Reload the page and verify that the profile is now private
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_private(profile_page)
def test_dashboard_learner_profile_link(self):
"""
Scenario: Verify that my profile link is present on dashboard page and we can navigate to correct page.
Given that I am a registered user.
When I go to Dashboard page.
And I click on username dropdown.
Then I see Profile link in the dropdown menu.
When I click on Profile link.
Then I will be navigated to Profile page.
"""
username, user_id = self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertIn('Profile', dashboard_page.username_dropdown_link_text)
dashboard_page.click_my_profile_link()
my_profile_page = LearnerProfilePage(self.browser, username)
my_profile_page.wait_for_page()
def test_fields_on_my_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own private profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to private.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_page_is_private(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_fields_on_my_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own public profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see all the profile fields are shown.
And `location`, `language` and `about me` fields are editable.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.verify_profile_page_is_public(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PUBLIC)
def _test_dropdown_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a dropdown field.
"""
profile_page.value_for_dropdown_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def _test_textarea_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a textarea field.
"""
profile_page.set_value_for_textarea_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def test_country_field(self):
"""
Test behaviour of `Country` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set country value to `Pakistan`.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I reload the page.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I make `country` field editable
Then `country` field mode should be `edit`
And `country` field icon should be visible.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'country', 'Pakistan', 'Pakistan', 'display')
profile_page.make_field_editable('country')
self.assertEqual(profile_page.mode_for_field('country'), 'edit')
self.assertTrue(profile_page.field_icon_present('country'))
def test_language_field(self):
"""
Test behaviour of `Language` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set language value to `Urdu`.
Then displayed language should be `Urdu` and language field mode should be `display`
And I reload the page.
Then displayed language should be `Urdu` and language field mode should be `display`
Then I set empty value for language.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I reload the page.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I make `language` field editable
Then `language` field mode should be `edit`
And `language` field icon should be visible.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'language_proficiencies', 'Urdu', 'Urdu', 'display')
self._test_dropdown_field(profile_page, 'language_proficiencies', '', 'Add language', 'placeholder')
profile_page.make_field_editable('language_proficiencies')
self.assertTrue(profile_page.mode_for_field('language_proficiencies'), 'edit')
self.assertTrue(profile_page.field_icon_present('language_proficiencies'))
def test_about_me_field(self):
"""
Test behaviour of `About Me` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set about me value to `ThisIsIt`.
Then displayed about me should be `ThisIsIt` and about me field mode should be `display`
And I reload the page.
Then displayed about me should be `ThisIsIt` and about me field mode should be `display`
Then I set empty value for about me.
Then displayed about me should be `Tell other edX learners a little about yourself: where you live,
what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me
field mode should be `placeholder`
And I reload the page.
Then displayed about me should be `Tell other edX learners a little about yourself: where you live,
what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me
field mode should be `placeholder`
And I make `about me` field editable
Then `about me` field mode should be `edit`
"""
placeholder_value = (
"Tell other learners a little about yourself: where you live, what your interests are, "
"why you're taking courses, or what you hope to learn."
)
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_textarea_field(profile_page, 'bio', 'ThisIsIt', 'ThisIsIt', 'display')
self._test_textarea_field(profile_page, 'bio', '', placeholder_value, 'placeholder')
profile_page.make_field_editable('bio')
self.assertTrue(profile_page.mode_for_field('bio'), 'edit')
def test_birth_year_not_set(self):
"""
Verify message if birth year is not set.
Given that I am a registered user.
And birth year is not set for the user.
And I visit my profile page.
Then I should see a message that the profile is private until the year of birth is set.
"""
username, user_id = self.log_in_as_unique_user()
message = "You must specify your birth year before you can share your full profile."
self.verify_profile_forced_private_message(username, birth_year=None, message=message)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_is_under_age(self):
"""
Verify message if user is under age.
Given that I am a registered user.
And birth year is set so that age is less than 13.
And I visit my profile page.
Then I should see a message that the profile is private as I am under thirteen.
"""
username, user_id = self.log_in_as_unique_user()
under_age_birth_year = datetime.now().year - 10
self.verify_profile_forced_private_message(
username,
birth_year=under_age_birth_year,
message='You must be over 13 to share a full profile.'
)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_can_only_see_default_image_for_private_profile(self):
"""
Scenario: Default profile image behaves correctly for under age user.
Given that I am on my profile page with private access
And I can see default image
When I move my cursor to the image
Then i cannot see the upload/remove image text
And i cannot upload/remove the image.
"""
year_of_birth = datetime.now().year - 5
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_forced_private_message(
username,
year_of_birth,
message='You must be over 13 to share a full profile.'
)
self.assertTrue(profile_page.profile_has_default_image)
self.assertFalse(profile_page.profile_has_image_with_private_access())
def test_user_can_see_default_image_for_public_profile(self):
"""
Scenario: Default profile image behaves correctly for public profile.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
And i am able to upload new image
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
def test_user_can_upload_the_profile_image_with_success(self):
"""
Scenario: Upload profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new image via file uploader
Then i can see the changed image
And i can also see the latest image after reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
profile_page.visit()
self.assertTrue(profile_page.image_upload_success)
def test_user_can_see_error_for_exceeding_max_file_size_limit(self):
"""
Scenario: Upload profile image does not work for > 1MB image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new > 1MB image via file uploader
Then i can see the error message for file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='larger_image.jpg')
self.assertEqual(profile_page.profile_image_message, "The file must be smaller than 1 MB in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_file_size_below_the_min_limit(self):
"""
Scenario: Upload profile image does not work for < 100 Bytes image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new < 100 Bytes image via file uploader
Then i can see the error message for minimum file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='list-icon-visited.png')
self.assertEqual(profile_page.profile_image_message, "The file must be at least 100 bytes in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_wrong_file_type(self):
"""
Scenario: Upload profile image does not work for wrong file types.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new csv file via file uploader
Then i can see the error message for wrong/unsupported file type
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='cohort_users_only_username.csv')
self.assertEqual(
profile_page.profile_image_message,
"The file must be one of the following types: .gif, .png, .jpeg, .jpg."
)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_remove_profile_image(self):
"""
Scenario: Remove profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i click on the remove image link
Then i can see the default image
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
self.assertTrue(profile_page.remove_profile_image())
self.assertTrue(profile_page.profile_has_default_image)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
def test_user_cannot_remove_default_image(self):
"""
Scenario: Remove profile image does not works for default images.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see only the upload image text
And i cannot see the remove image text
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
self.assertFalse(profile_page.remove_link_present)
def test_eventing_after_multiple_uploads(self):
"""
Scenario: An event is fired when a user with a profile image uploads another image
Given that I am on my profile page with public access
And I upload a new image via file uploader
When I upload another image via the file uploader
Then two upload events have been emitted
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg', wait_for_upload_button=False)
@attr('shard_4')
class DifferentUserLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest):
"""
Tests that verify viewing the profile page of a different user.
"""
def test_different_user_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's private profile.
Given that I am a registered user.
And I visit a different user's private profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PRIVATE)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_under_age(self):
"""
Scenario: Verify that an under age user's profile is private to others.
Given that I am a registered user.
And I visit an under age user's profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see that only the private fields are shown.
"""
under_age_birth_year = datetime.now().year - 10
different_username, different_user_id = self.initialize_different_user(
privacy=self.PRIVACY_PUBLIC,
birth_year=under_age_birth_year
)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's public profile.
Given that I am a registered user.
And I visit a different user's public profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then all the profile fields are shown.
Then I shouldn't see the profile visibility selector dropdown.
Also `location`, `language` and `about me` fields are not editable.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.wait_for_public_fields()
self.verify_profile_page_is_public(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PUBLIC)
@attr('a11y')
class LearnerProfileA11yTest(LearnerProfileTestMixin, WebAppTest):
"""
Class to test learner profile accessibility.
"""
def test_editable_learner_profile_a11y(self):
"""
Test the accessibility of the editable version of the profile page
(user viewing her own public profile).
"""
username, _ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'color-contrast', # TODO: AC-232
'skip-link', # TODO: AC-179
'link-href', # TODO: AC-231
],
})
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('language_proficiencies')
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('bio')
profile_page.a11y_audit.check_for_accessibility_errors()
def test_read_only_learner_profile_a11y(self):
"""
Test the accessibility of the read-only version of a public profile page
(user viewing someone else's profile page).
"""
# initialize_different_user should cause country, language, and bio to be filled out (since
# privacy is public). It doesn't appear that this is happening, although the method
# works in regular bokchoy tests. Perhaps a problem with phantomjs? So this test is currently
# only looking at a read-only profile page with a username.
different_username, _ = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'skip-link', # TODO: AC-179
'link-href', # TODO: AC-231
],
})
profile_page.a11y_audit.check_for_accessibility_errors()
|
ahmadiga/min_edx
|
common/test/acceptance/tests/lms/test_learner_profile.py
|
Python
|
agpl-3.0
| 34,284
|
[
"VisIt"
] |
54df58916cae3e33fc93e4408383771300729b7d7863bd4429dfda0e852ed743
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ..script_interface import PScriptInterface
class Mpiio:
"""MPI-IO object.
Used to output particle data using MPI-IO to binary files.
.. note::
See the :meth:`write` and :meth:`read` methods for documentation.
"""
def __init__(self):
self._instance = PScriptInterface(
"ScriptInterface::MPIIO::MPIIOScript")
def write(self, prefix=None, positions=False, velocities=False,
types=False, bonds=False):
"""MPI-IO write.
Outputs binary data using MPI-IO to several files starting with prefix.
Suffixes are:
- head: Information about fields that are dumped,
- pref: Information about processes: 1 int per process,
- id: Particle ids: 1 int per particle,
- pos: Position information (if dumped): 3 doubles per particle,
- vel: Velocity information (if dumped): 3 doubles per particle,
- typ: Type information (if dumped): 1 int per particle,
- bond: Bond information (if dumped): variable amount of data,
- boff: Bond offset information (if bonds are dumped): 1 int per particle.
.. note::
Do not read the files on a machine with a different architecture!
Parameters
----------
prefix : :obj:`str`
Common prefix for the filenames.
positions : :obj:`bool`, optional
Indicates if positions should be dumped.
velocities : :obj:`bool`, optional
Indicates if velocities should be dumped.
types : :obj:`bool`, optional
Indicates if types should be dumped.
bonds : :obj:`bool`, optional
Indicates if bonds should be dumped.
Raises
------
ValueError
If no prefix was given or none of the output fields are chosen.
"""
if prefix is None:
raise ValueError(
"Need to supply output prefix via 'prefix' kwarg.")
if not positions and not velocities and not types and not bonds:
raise ValueError("No output fields chosen.")
self._instance.call_method(
"write", prefix=prefix, pos=positions, vel=velocities, typ=types, bond=bonds)
def read(self, prefix=None, positions=False, velocities=False,
types=False, bonds=False):
"""MPI-IO read.
This function reads data dumped by :meth`write`. See the :meth`write`
documentation for details.
.. note::
The files must be read on the same number of processes that wrote
the data. The data must be read on a machine with the same
architecture (otherwise, this might silently fail).
"""
if prefix is None:
raise ValueError(
"Need to supply output prefix via 'prefix' kwarg.")
if not positions and not velocities and not types and not bonds:
raise ValueError("No output fields chosen.")
self._instance.call_method(
"read", prefix=prefix, pos=positions, vel=velocities, typ=types, bond=bonds)
mpiio = Mpiio()
|
psci2195/espresso-ffans
|
src/python/espressomd/io/mpiio.py
|
Python
|
gpl-3.0
| 3,867
|
[
"ESPResSo"
] |
33f6711c19f36a3ecf50fe73e6380208b1bdc5e05f0242db3a4a24aeeb1c3612
|
"""
A number of function that enhance IDLE on MacOSX when it used as a normal
GUI application (as opposed to an X11 application).
"""
import sys
import Tkinter
from os import path
_appbundle = None
def runningAsOSXApp():
"""
Returns True if Python is running from within an app on OSX.
If so, assume that Python was built with Aqua Tcl/Tk rather than
X11 Tcl/Tk.
"""
global _appbundle
if _appbundle is None:
_appbundle = (sys.platform == 'darwin' and '.app' in sys.executable)
return _appbundle
_carbonaquatk = None
def isCarbonAquaTk(root):
"""
Returns True if IDLE is using a Carbon Aqua Tk (instead of the
newer Cocoa Aqua Tk).
"""
global _carbonaquatk
if _carbonaquatk is None:
_carbonaquatk = (runningAsOSXApp() and
'aqua' in root.tk.call('tk', 'windowingsystem') and
'AppKit' not in root.tk.call('winfo', 'server', '.'))
return _carbonaquatk
def tkVersionWarning(root):
"""
Returns a string warning message if the Tk version in use appears to
be one known to cause problems with IDLE. The Apple Cocoa-based Tk 8.5
that was shipped with Mac OS X 10.6.
"""
if (runningAsOSXApp() and
('AppKit' in root.tk.call('winfo', 'server', '.')) and
(root.tk.call('info', 'patchlevel') == '8.5.7') ):
return (r"WARNING: The version of Tcl/Tk (8.5.7) in use may"
r" be unstable.\n"
r"Visit http://www.python.org/download/mac/tcltk/"
r" for current information.")
else:
return False
def addOpenEventSupport(root, flist):
"""
This ensures that the application will respond to open AppleEvents, which
makes is feasible to use IDLE as the default application for python files.
"""
def doOpenFile(*args):
for fn in args:
flist.open(fn)
# The command below is a hook in aquatk that is called whenever the app
# receives a file open event. The callback can have multiple arguments,
# one for every file that should be opened.
root.createcommand("::tk::mac::OpenDocument", doOpenFile)
def hideTkConsole(root):
try:
root.tk.call('console', 'hide')
except Tkinter.TclError:
# Some versions of the Tk framework don't have a console object
pass
def overrideRootMenu(root, flist):
"""
Replace the Tk root menu by something that's more appropriate for
IDLE.
"""
# The menu that is attached to the Tk root (".") is also used by AquaTk for
# all windows that don't specify a menu of their own. The default menubar
# contains a number of menus, none of which are appropriate for IDLE. The
# Most annoying of those is an 'About Tck/Tk...' menu in the application
# menu.
#
# This function replaces the default menubar by a mostly empty one, it
# should only contain the correct application menu and the window menu.
#
# Due to a (mis-)feature of TkAqua the user will also see an empty Help
# menu.
from Tkinter import Menu, Text, Text
from idlelib.EditorWindow import prepstr, get_accelerator
from idlelib import Bindings
from idlelib import WindowList
from idlelib.MultiCall import MultiCallCreator
menubar = Menu(root)
root.configure(menu=menubar)
menudict = {}
menudict['windows'] = menu = Menu(menubar, name='windows')
menubar.add_cascade(label='Window', menu=menu, underline=0)
def postwindowsmenu(menu=menu):
end = menu.index('end')
if end is None:
end = -1
if end > 0:
menu.delete(0, end)
WindowList.add_windows_to_menu(menu)
WindowList.register_callback(postwindowsmenu)
def about_dialog(event=None):
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About IDLE')
def config_dialog(event=None):
from idlelib import configDialog
root.instance_dict = flist.inversedict
configDialog.ConfigDialog(root, 'Settings')
def help_dialog(event=None):
from idlelib import textView
fn = path.join(path.abspath(path.dirname(__file__)), 'help.txt')
textView.view_file(root, 'Help', fn)
root.bind('<<about-idle>>', about_dialog)
root.bind('<<open-config-dialog>>', config_dialog)
root.createcommand('::tk::mac::ShowPreferences', config_dialog)
if flist:
root.bind('<<close-all-windows>>', flist.close_all_callback)
# The binding above doesn't reliably work on all versions of Tk
# on MacOSX. Adding command definition below does seem to do the
# right thing for now.
root.createcommand('exit', flist.close_all_callback)
if isCarbonAquaTk(root):
# for Carbon AquaTk, replace the default Tk apple menu
menudict['application'] = menu = Menu(menubar, name='apple')
menubar.add_cascade(label='IDLE', menu=menu)
Bindings.menudefs.insert(0,
('application', [
('About IDLE', '<<about-idle>>'),
None,
]))
tkversion = root.tk.eval('info patchlevel')
if tuple(map(int, tkversion.split('.'))) < (8, 4, 14):
# for earlier AquaTk versions, supply a Preferences menu item
Bindings.menudefs[0][1].append(
('_Preferences....', '<<open-config-dialog>>'),
)
else:
# assume Cocoa AquaTk
# replace default About dialog with About IDLE one
root.createcommand('tkAboutDialog', about_dialog)
# replace default "Help" item in Help menu
root.createcommand('::tk::mac::ShowHelp', help_dialog)
# remove redundant "IDLE Help" from menu
del Bindings.menudefs[-1][1][0]
def setupApp(root, flist):
"""
Perform setup for the OSX application bundle.
"""
if not runningAsOSXApp(): return
hideTkConsole(root)
overrideRootMenu(root, flist)
addOpenEventSupport(root, flist)
|
bussiere/pypyjs
|
website/demo/home/rfk/repos/pypy/lib-python/2.7/idlelib/macosxSupport.py
|
Python
|
mit
| 6,028
|
[
"VisIt"
] |
d7f97d85bfe48bad1fa4baa9e6c817922f09288efb5b229bd3da7e86cb482640
|
#Ignore this sed code echo "126,55,55,71INTER,Cameron" | sed 's/\([0-9].*,[0-9].*,[0-9].*\)/\1/'
#\1 is the reference to the first stored pattern.
import csv
import os
import pickle
d={}#declare a dictionary
dl=[]#A list to store the dictionary in.
fn = open('neocortex_region.txt')
neocortex = [line.strip() for line in open('neocortex_region.txt', 'r')]
fh = open('hippocampus_region.txt')
hippocampus = [line.strip() for line in open('hippocampus_region.txt', 'r')]
fbg = open('basalganglia_region.txt')
basalganglia = [line.strip() for line in open('basalganglia_region.txt', 'r')]
fbf = open('basalforebrainregion.txt')
basalforebrain = [line.strip() for line in open('basalforebrainregion.txt', 'r')]
fi = open('list_interneurons.txt')
interneurons = [line.strip() for line in open('list_interneurons.txt', 'r')]
fa = open('list_aspiny_cell.txt')
aspiny = [line.strip() for line in open('list_aspiny_cell.txt', 'r')]
fp = open('pyramidalcell.txt')
pyramid = [line.strip() for line in open('pyramidalcell.txt', 'r')]
with open('cngrid.csv', 'rb') as csvfile:#First just read the values.
allrows=list(csv.reader(open('cngrid2.csv')))
os.chdir('main')
os.system('ls *.swc > swc_names.txt')
os.system('sed -i "s/.CNG.swc/ /" swc_names.txt')
f = open('swc_names.txt')
names = [line.strip() for line in open('swc_names.txt', 'r')]
f.close()
for x in range(3,len(names)-1): names[x]=names[x+1]
#now that I have names. Find them in the dictionary.
os.chdir('../')
import re # ie regexp
cnt=0
for i in range(0,len(allrows)):
a=allrows[i]
allrows2=[]
for j in range(0,len(interneurons)-1):
#if(s['name']==names[j]):
if(interneurons[j]==a[3]):
allrows[i].append("interneuron") #type 3
allrows[i].append(3)
#print allrows[i]
for k in range(0,len(aspiny)-1):
#if(s['name']==names[j]):
if(aspiny[k]==a[3]):
allrows[i].append("aspiny") #type 4
allrows[i].append(4)
#print allrows[i]
for l in range(0,len(pyramid)-1):
#if(s['name']==names[j]):
if(pyramid[l]==a[3]):
allrows[i].append("pyramid") # type 5
allrows[i].append(5)
#print allrows[i]
for m in range(0,len(neocortex)-1):
#if(s['name']==names[j]):
if(neocortex[m]==a[3]):
allrows[i].append("neocortex") # type 5
#allrows[i].append(5)
#print allrows[i]
for n in range(0,len(hippocampus)-1):
#if(s['name']==names[j]):
if(hippocampus[n]==a[3]):
allrows[i].append("hippocampus") # type 5
#allrows[i].append(5)
#print allrows[i]
for o in range(0,len(basalforebrain)-1):
#if(s['name']==names[j]):
if(basalforebrain[o]==a[3]):
allrows[i].append("basalforebrain") # type 5
#allrows[i].append(5)
#print allrows[i]
for p in range(0,len(basalganglia)-1):
#if(s['name']==names[j]):
if(basalganglia[p]==a[3]):
allrows[i].append("basalganglia") # type 5
#allrows[i].append(5)
#print allrows[i]
#basalforebrain
for i in range(0,len(allrows)):
a=allrows[i]
allrows2=[]
for j in range(0,len(names)-1):
#if(s['name']==names[j]):
if(names[j]==a[3]): #if the cell name is found.
#print j
#print a[3], names[j] , a[4]==names[j], i, j
a[3]=names[j]+".CNG.swc"
#print a[3]
allrows[i].append(a[3])
allrows[i].append(1)
#print allrows[i]
#allrows2.append(allrows[i])
cnt+=1
while i in range(1,len(allrows)-1):
b=allrows[i]
##print allrows[i]
if(len(b)==5):
#print allrows[i]
allrows.remove(allrows[i])
#print allrows[3759+1]
pickle.pickledump(allrows, open("allrows.p", "wb" ) )
"""
import pandas as pd
csvalues = pd.read_csv('cngrid2.csv')
#columns=dict([(x[0:],x[1:]) for x in zip(*allrows)])
#atlasreader = csv.reader(csvfile,delimiter=',')#, fieldname='x'
for row in csv.DictReader(csvfile,delimiter=','):
for key in row:
#print 'key=%s, value=%s' % (key, row[key])
#d = dict(filter(None, csv.reader('cngrid.csv')))
#d = {rows[0]:rows[1] for rows in atlasreader}
#d={rows[:4] for rows in atlasreader if rows}
#atlasreader = csv.DictReader(csvfile)#,delimiter=',')#, fieldname='x'
#for row in list(csv.reader(open("copy-john.csv", "rb")))[1:]:
cnt=0
# The problem is to do with getting rows instead of columns.
# I need to go back to my general task of getting the trivial networks to plot stuff.
#
#
for row in atlasreader:
#(key,val)=xposition,x
if(row==1):
d['xp']=row[0]#x
d['yp']=row[1]#y
d['zp']=row[2]#z
d['name']=row[3]#name
d['repo']=row[4]#repo
d['number']=cnt
d['returned']=0#assign zero. Reassign 1 if NEURON can import morphology.
d['filename']=0
#print d, row
dl.append(d)
cnt+=1
#csv[csv.repo=='Markram']
class Nposition(object):
def __init__(self):
with open('cngrid.csv', 'rb') as csvfile:#First just read the values.
atlasreader = csv.reader(csvfile,delimiter=',')#, fieldname='x'
#atlasreader = csv.DictReader(csvfile)#,delimiter=',')#, fieldname='x'
#for row in list(csv.reader(open("copy-john.csv", "rb")))[1:]:
for row in atlasreader:
self.x=row[0]
self.y=row[1]
self.z=row[2]
self.name=row[3]
self.repo=row[4]
def get(self):
return (self.repo, self.name, self.x, self.y, self.z)
# #print row
# for key in row:
# #print 'key=%s, value=%s' % (key, row[key])
#for row in atlasreader:
##print row
"""
|
russelljjarvis/neurogateway
|
atlas_reader3.py
|
Python
|
gpl-3.0
| 5,865
|
[
"NEURON"
] |
77d50e34539fd0102d70a260e68fbaee666d5e057fbb3c8343ef8ad59871d7aa
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
import numpy as np
import unittest as ut
import unittest_decorators as utx
import tests_common
class InteractionsNonBondedTest(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
box_l = 10.
start_pos = np.random.rand(3) * box_l
axis = np.random.rand(3)
axis /= np.linalg.norm(axis)
step = axis * 0.01
step_width = np.linalg.norm(step)
def setUp(self):
self.system.box_l = [self.box_l] * 3
self.system.cell_system.skin = 0.
self.system.time_step = .1
self.system.part.add(id=0, pos=self.start_pos, type=0)
self.system.part.add(id=1, pos=self.start_pos, type=0)
def tearDown(self):
self.system.non_bonded_inter.reset()
self.system.part.clear()
# Required, since assertAlmostEqual does NOT check significant places
def assertFractionAlmostEqual(self, a, b, **args):
if abs(b) < 1E-8:
self.assertAlmostEqual(a, b, **args)
else:
self.assertAlmostEqual(a / b, 1., **args)
def assertItemsFractionAlmostEqual(self, a, b):
for i, ai in enumerate(a):
self.assertFractionAlmostEqual(ai, b[i])
#
# Tests
#
# Test Generic Lennard-Jones Potential
@utx.skipIfMissingFeatures("LENNARD_JONES_GENERIC")
def test_lj_generic(self):
lj_eps = 2.12
lj_sig = 1.37
lj_cut = 2.122
lj_off = 0.185
lj_b1 = 4.22
lj_b2 = 3.63
lj_e1 = 10.32
lj_e2 = 5.81
lj_shift = -0.13
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, offset=lj_off,
b1=lj_b1, b2=lj_b2, e1=lj_e1, e2=lj_e2, shift=lj_shift)
E_ref = tests_common.lj_generic_potential(
r=np.arange(1, 232) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2, shift=lj_shift)
for i in range(231):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.lj_generic_force(
espressomd, r=(i + 1) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref[i])
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=0.)
# Test WCA Potential
@utx.skipIfMissingFeatures("WCA")
def test_wca(self):
wca_eps = 2.12
wca_sig = 1.37
wca_cutoff = wca_sig * 2.**(1. / 6.)
wca_shift = -((wca_sig / wca_cutoff)**12 - (wca_sig / wca_cutoff)**6)
self.system.non_bonded_inter[0, 0].wca.set_params(epsilon=wca_eps,
sigma=wca_sig)
E_ref = tests_common.lj_generic_potential(
r=np.arange(1, 232) * self.step_width, eps=wca_eps, sig=wca_sig,
cutoff=wca_cutoff, shift=4. * wca_shift)
for i in range(231):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.lj_generic_force(
espressomd, r=(i + 1) * self.step_width, eps=wca_eps,
sig=wca_sig, cutoff=wca_cutoff)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref[i])
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].wca.set_params(epsilon=0., sigma=1.)
# Test Generic Lennard-Jones Softcore Potential
@utx.skipIfMissingFeatures("LJGEN_SOFTCORE")
def test_lj_generic_softcore(self):
lj_eps = 2.12
lj_sig = 1.37
lj_cut = 2.125
lj_off = 0.182
lj_b1 = 6.22
lj_b2 = 3.63
lj_e1 = 13.32
lj_e2 = 3.74
lj_shift = 0.13
lj_delta = 0.1
lj_lam = 0.34
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, offset=lj_off,
b1=lj_b1, b2=lj_b2, e1=lj_e1, e2=lj_e2, shift=lj_shift,
delta=lj_delta, lam=lj_lam)
for i in range(231):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_generic_potential(
r=(i + 1) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2, shift=lj_shift, delta=lj_delta, lam=lj_lam)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.lj_generic_force(
espressomd, r=(i + 1) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2, delta=lj_delta, lam=lj_lam)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=0.)
# Test Lennard-Jones Potential
@utx.skipIfMissingFeatures("LENNARD_JONES")
def test_lj(self):
lj_eps = 1.92
lj_sig = 1.03
lj_cut = 1.123
lj_shift = 0.92
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift=lj_shift)
for i in range(113):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_potential(
(i + 1) * self.step_width, lj_eps, lj_sig, lj_cut,
shift=lj_shift)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * \
tests_common.lj_force(espressomd, r=(i + 1) * self.step_width,
eps=lj_eps, sig=lj_sig, cutoff=lj_cut)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(epsilon=0.)
# Test Lennard-Jones Cosine Potential
@utx.skipIfMissingFeatures("LJCOS")
def test_lj_cos(self):
ljcos_eps = 3.32
ljcos_sig = 0.73
ljcos_cut = 1.523
ljcos_offset = 0.223
self.system.non_bonded_inter[0, 0].lennard_jones_cos.set_params(
epsilon=ljcos_eps, sigma=ljcos_sig, cutoff=ljcos_cut,
offset=ljcos_offset)
for i in range(175):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_cos_potential(
(i + 1) * self.step_width, eps=ljcos_eps, sig=ljcos_sig,
cutoff=ljcos_cut, offset=ljcos_offset)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.lj_cos_force(
espressomd, (i + 1) * self.step_width, eps=ljcos_eps,
sig=ljcos_sig, cutoff=ljcos_cut, offset=ljcos_offset)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[
0, 0].lennard_jones_cos.set_params(epsilon=0.)
# Test Lennard-Jones Cosine^2 Potential
@utx.skipIfMissingFeatures("LJCOS2")
def test_lj_cos2(self):
ljcos2_eps = 0.31
ljcos2_sig = 0.73
ljcos2_width = 1.523
ljcos2_offset = 0.321
self.system.non_bonded_inter[0, 0].lennard_jones_cos2.set_params(
epsilon=ljcos2_eps, sigma=ljcos2_sig, offset=ljcos2_offset,
width=ljcos2_width)
for i in range(267):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_cos2_potential(
(i + 1) * self.step_width, eps=ljcos2_eps, sig=ljcos2_sig,
offset=ljcos2_offset, width=ljcos2_width)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.lj_cos2_force(
espressomd, r=(i + 1) * self.step_width, eps=ljcos2_eps,
sig=ljcos2_sig, offset=ljcos2_offset, width=ljcos2_width)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[
0, 0].lennard_jones_cos2.set_params(epsilon=0.)
# Test Smooth-step Potential
@utx.skipIfMissingFeatures("SMOOTH_STEP")
def test_smooth_step(self):
sst_eps = 4.92
sst_sig = 3.03
sst_cut = 1.253
sst_d = 2.52
sst_n = 11
sst_k0 = 2.13
self.system.non_bonded_inter[0, 0].smooth_step.set_params(
eps=sst_eps, sig=sst_sig, cutoff=sst_cut, d=sst_d, n=sst_n,
k0=sst_k0)
for i in range(126):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.smooth_step_potential(
r=(i + 1) * self.step_width, eps=sst_eps, sig=sst_sig,
cutoff=sst_cut, d=sst_d, n=sst_n, k0=sst_k0)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.smooth_step_force(
r=(i + 1) * self.step_width, eps=sst_eps, sig=sst_sig,
cutoff=sst_cut, d=sst_d, n=sst_n, k0=sst_k0)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].smooth_step.set_params(d=0., eps=0.)
# Test BMHTF Potential
@utx.skipIfMissingFeatures("BMHTF_NACL")
def test_bmhtf(self):
bmhtf_a = 3.92
bmhtf_b = 2.43
bmhtf_c = 1.23
bmhtf_d = 3.33
bmhtf_sig = 0.123
bmhtf_cut = 1.253
self.system.non_bonded_inter[0, 0].bmhtf.set_params(
a=bmhtf_a, b=bmhtf_b, c=bmhtf_c, d=bmhtf_d, sig=bmhtf_sig,
cutoff=bmhtf_cut)
for i in range(126):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.bmhtf_potential(
r=(i + 1) * self.step_width, a=bmhtf_a, b=bmhtf_b, c=bmhtf_c,
d=bmhtf_d, sig=bmhtf_sig, cutoff=bmhtf_cut)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.bmhtf_force(
r=(i + 1) * self.step_width, a=bmhtf_a, b=bmhtf_b, c=bmhtf_c,
d=bmhtf_d, sig=bmhtf_sig, cutoff=bmhtf_cut)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].bmhtf.set_params(a=0., c=0., d=0.)
# Test Morse Potential
@utx.skipIfMissingFeatures("MORSE")
def test_morse(self):
m_eps = 1.92
m_alpha = 3.03
m_cut = 1.253
m_rmin = 0.123
self.system.non_bonded_inter[0, 0].morse.set_params(
eps=m_eps, alpha=m_alpha, cutoff=m_cut, rmin=m_rmin)
for i in range(126):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.morse_potential(
r=(i + 1) * self.step_width, eps=m_eps, alpha=m_alpha,
cutoff=m_cut, rmin=m_rmin)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.morse_force(
r=(i + 1) * self.step_width, eps=m_eps, alpha=m_alpha,
cutoff=m_cut, rmin=m_rmin)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].morse.set_params(eps=0.)
# Test Buckingham Potential
@utx.skipIfMissingFeatures("BUCKINGHAM")
def test_buckingham(self):
b_a = 3.71
b_b = 2.92
b_c = 5.32
b_d = 4.11
b_disc = 1.03
b_cut = 2.253
b_shift = 0.133
b_f1 = 0.123
b_f2 = 0.123
self.system.non_bonded_inter[0, 0].buckingham.set_params(
a=b_a, b=b_b, c=b_c, d=b_d, discont=b_disc, cutoff=b_cut,
shift=b_shift)
for i in range(226):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.buckingham_potential(
r=(i + 1) * self.step_width, a=b_a, b=b_b, c=b_c, d=b_d,
discont=b_disc, cutoff=b_cut, shift=b_shift)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.buckingham_force(
r=(i + 1) * self.step_width, a=b_a, b=b_b, c=b_c, d=b_d,
discont=b_disc, cutoff=b_cut, shift=b_shift)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[
0, 0].buckingham.set_params(a=0., c=0., d=0., shift=0.)
# Test Soft-sphere Potential
@utx.skipIfMissingFeatures("SOFT_SPHERE")
def test_soft_sphere(self):
ss_a = 1.92
ss_n = 3.03
ss_cut = 1.123
ss_off = 0.123
self.system.non_bonded_inter[0, 0].soft_sphere.set_params(
a=ss_a, n=ss_n, cutoff=ss_cut, offset=ss_off)
for i in range(12):
self.system.part[1].pos = self.system.part[1].pos + self.step
for i in range(113):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.soft_sphere_potential(
r=(i + 13) * self.step_width, a=ss_a, n=ss_n, cutoff=ss_cut,
offset=ss_off)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.soft_sphere_force(
r=(i + 13) * self.step_width, a=ss_a, n=ss_n, cutoff=ss_cut,
offset=ss_off)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].soft_sphere.set_params(a=0.)
# Test Hertzian Potential
@utx.skipIfMissingFeatures("HERTZIAN")
def test_hertzian(self):
h_eps = 6.92
h_sig = 2.432
self.system.non_bonded_inter[0, 0].hertzian.set_params(
eps=h_eps, sig=h_sig)
for i in range(244):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.hertzian_potential(
r=(i + 1) * self.step_width, eps=h_eps, sig=h_sig)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.hertzian_force(
r=(i + 1) * self.step_width, eps=h_eps, sig=h_sig)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].hertzian.set_params(eps=0.)
# Test Gaussian Potential
@utx.skipIfMissingFeatures("GAUSSIAN")
def test_gaussian(self):
g_eps = 6.92
g_sig = 4.03
g_cut = 1.243
self.system.non_bonded_inter[0, 0].gaussian.set_params(
eps=g_eps, sig=g_sig, cutoff=g_cut)
for i in range(125):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.gaussian_potential(
r=(i + 1) * self.step_width, eps=g_eps, sig=g_sig, cutoff=g_cut)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.gaussian_force(
r=(i + 1) * self.step_width, eps=g_eps, sig=g_sig, cutoff=g_cut)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].gaussian.set_params(eps=0.)
# Test the Gay-Berne potential and the resulting force and torque
@utx.skipIfMissingFeatures("GAY_BERNE")
def test_gb(self):
# helper function definitions
def gradient(func, x0, dx=1.0e-7):
"""
Approximate the gradient of a function at a point x0
using the two-point central difference formula with spacing 2dx.
Parameters
----------
func: :obj:`function`
function for which the gradient is calculated
x0: (3,) array_like of :obj:`float`
Point in N-dimensional space where the derivatives are calculated
dx: :obj:`float`, optional
Spacing
Returns
-------
(3,) array_like of obj:`float`
the approximated gradient of func at x0
"""
partial_x = lambda x: (func(x0 + x) - func(x0 - x)) / (
2.0 * np.linalg.norm(x))
delta = np.array([dx, 0.0, 0.0])
return np.array([partial_x(np.roll(delta, i)) for i in range(3)])
def setup_system(gb_params):
k_1, k_2, mu, nu, sigma_0, epsilon_0, cut = gb_params
self.system.part.clear()
self.system.part.add(
id=0, pos=(1, 2, 3), rotation=(1, 1, 1), type=0)
self.system.part.add(
id=1, pos=(2.2, 2.1, 2.9), rotation=(1, 1, 1), type=0)
self.system.non_bonded_inter[0, 0].gay_berne.set_params(
sig=sigma_0, cut=cut, eps=epsilon_0, k1=k_1, k2=k_2, mu=mu,
nu=nu)
def advance_and_rotate_part(particle):
particle.pos = particle.pos + self.step
particle.rotate(axis=(1, 2, 3), angle=0.3)
particle.rotate(axis=(1, -2, -4), angle=1.2)
def get_simulation_energy():
return self.system.analysis.energy()["non_bonded"]
def get_reference_energy(gb_params, r, director1, director2):
k_1, k_2, mu, nu, sigma_0, epsilon_0, cut = gb_params
r_cut = r * cut / np.linalg.norm(r)
E_ref = tests_common.gay_berne_potential(
r, director1, director2, epsilon_0, sigma_0, mu, nu, k_1, k_2)
E_ref -= tests_common.gay_berne_potential(
r_cut, director1, director2, epsilon_0, sigma_0, mu, nu,
k_1, k_2)
return E_ref
def get_reference_force(gb_params, r, dir1, dir2):
return -gradient(
lambda x: get_reference_energy(gb_params, x, dir1, dir2),
x0=r, dx=1.0e-7)
def get_reference_torque(gb_params, r, dir1, dir2):
force_in_dir1 = gradient(
lambda x: get_reference_energy(gb_params, r, x, dir2),
x0=dir1, dx=1.0e-7)
return np.cross(-dir1, force_in_dir1)
# actual tests of the gb potential
k_1 = 1.2
k_2 = 2.4
mu = 2.
nu = 5.
sigma_0 = 1.2
epsilon_0 = 0.8
cut = 3.3
gb_params = (k_1, k_2, mu, nu, sigma_0, epsilon_0, cut)
setup_system(gb_params)
p1 = self.system.part[0]
p2 = self.system.part[1]
delta = 1.0e-6
for _ in range(100):
advance_and_rotate_part(p2)
self.system.integrator.run(recalc_forces=True, steps=0)
r = self.system.distance_vec(p1, p2)
director1 = p1.director
director2 = p2.director
# Calc energies
E_sim = get_simulation_energy()
E_ref = get_reference_energy(gb_params, r, director1, director2)
# Test energies
self.assertAlmostEqual(E_sim, E_ref, delta=delta)
# Calc forces
f1_sim = p1.f
f2_sim = p2.f
f2_ref = get_reference_force(gb_params, r, director1, director2)
# Test forces
# force equals minus the counter-force
self.assertTrue((f1_sim == -f2_sim).all())
# compare force to reference force
for i in range(3):
self.assertAlmostEqual(f2_sim[i], f2_ref[i], delta=delta)
# Calc torques
torque1_sim = p1.torque_lab
torque2_sim = p2.torque_lab
torque1_ref = get_reference_torque(
gb_params, r, director1, director2)
torque2_ref = get_reference_torque(
gb_params, r, director2, director1)
# Test torques
for i in range(3):
self.assertAlmostEqual(
torque1_sim[i],
torque1_ref[i],
delta=delta)
self.assertAlmostEqual(
torque2_sim[i],
torque2_ref[i],
delta=delta)
# Test zero energy
self.system.non_bonded_inter[0, 0].gay_berne.set_params(
sig=sigma_0, cut=0, eps=0, k1=k_1, k2=k_2, mu=mu, nu=nu)
self.system.integrator.run(0)
self.assertEqual(self.system.analysis.energy()["non_bonded"], 0.0)
if __name__ == '__main__':
ut.main()
|
mkuron/espresso
|
testsuite/python/interactions_non-bonded.py
|
Python
|
gpl-3.0
| 27,497
|
[
"ESPResSo",
"Gaussian"
] |
d9709b4fad87e242fb638cf7444962d8d802f38b4f9f526651e70358ec52fce2
|
import sys, os, re, types, HTMLParser, urllib2
import parser, db, data, constants
def _rune_cost(generator, filter_data, record, *args):
cost = 0
for rune_type in xrange(0, 4):
for i in xrange(0, getattr(record, 'rune_cost_%d' % (rune_type + 1))):
cost |= 1 << (rune_type * 2 + i)
return args[0] % cost
class DataGenerator(object):
_class_names = [ None, 'Warrior', 'Paladin', 'Hunter', 'Rogue', 'Priest', 'Death Knight', 'Shaman', 'Mage', 'Warlock', 'Monk', 'Druid' ]
_class_masks = [ None, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, 0x100, 0x200, 0x400 ]
_race_names = [ None, 'Human', 'Orc', 'Dwarf', 'Night Elf', 'Undead', 'Tauren', 'Gnome', 'Troll', 'Goblin', 'Blood Elf', 'Draenei' ] + [ None ] * 10 + [ 'Worgen', None, 'Pandaren', 'Pandaren', 'Pandaren', None ]
_race_masks = [ None, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, 0x100, 0x200, 0x400 ] + [ None ] * 10 + [ 0x200000, None, 0x800000, 0x1000000, 0x2000000, None ]
_pet_names = [ None, 'Ferocity', 'Tenacity', None, 'Cunning' ]
_pet_masks = [ None, 0x1, 0x2, None, 0x4 ]
def debug(self, msg):
if self._options.debug == True:
sys.stderr.write("%s: %s\n" % ( self.__class__.__name__, msg ))
def dbc_version(self, wow, build):
if self._options.wowversion == 0:
return self._options.build >= build
else:
return self._options.wowversion >= wow and self._options.build >= build
def __init__(self, options):
self._options = options
self._class_map = { }
# Build some maps to help us output things
for i in xrange(0, len(DataGenerator._class_names)):
if not DataGenerator._class_names[i]:
continue
self._class_map[DataGenerator._class_names[i]] = i
self._class_map[1 << (i - 1)] = i
#self._class_map[DataGenerator._class_masks[i]] = i
self._race_map = { }
for i in xrange(0, len(DataGenerator._race_names)):
if not DataGenerator._race_names[i]:
continue
self._race_map[DataGenerator._race_names[i]] = i
self._race_map[1 << (i - 1)] = i
#print self._class_map, self._race_map
def format_str(self, string):
return '%s%s%s' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
string,
self._options.suffix and ('_%s' % self._options.suffix) or '' )
def initialize(self):
for i in self._dbc:
dbcname = i.replace('-', '_').lower()
setattr(self, '_%s' % dbcname,
parser.DBCParser(self._options, os.path.abspath(os.path.join(self._options.path, i))))
dbc = getattr(self, '_%s' % dbcname)
if not dbc.open_dbc():
return False
if '_%s_db' % dbc.name() not in dir(self):
setattr(self, '_%s_db' % dbc.name(), db.DBCDB(dbc._class))
dbase = getattr(self, '_%s_db' % dbc.name())
record = dbc.next_record()
while record != None:
dbase[record.id] = record
record = dbc.next_record()
if not self._options.cache_dir or not os.access(os.path.abspath(self._options.cache_dir), os.R_OK):
return True
cache_files = []
files = os.listdir(self._options.cache_dir)
for f in files:
fn = f[:f.find('.')]
if fn in self._dbc:
cache_files.append((fn, os.path.abspath(os.path.join(self._options.cache_dir, f))))
cache_parsers = { }
for cache_file in cache_files:
if cache_file[0] not in cache_parsers:
cache_parsers[cache_file[0]] = { 'parsers': [], 'ids': [ ] }
p = parser.DBCParser(self._options, cache_file[1])
if not p.open_dbc():
continue
cache_parsers[cache_file[0]]['parsers'].append(p)
for dbc, data in cache_parsers.iteritems():
if len(data['parsers']) == 0:
continue
data['parsers'].sort(cmp = lambda x, y: y._timestamp - x._timestamp)
dbase = getattr(self, '_%s_db' % data['parsers'][0].name())
for cache_parser in data['parsers']:
record = cache_parser.next_record()
while record != None:
if record.id not in data['ids']:
if dbase.get(record.id):
self.debug('Overwrote id %d using cache %s' % (record.id, cache_parser._fname))
else:
self.debug('Added id %d using cache %s' % (record.id, cache_parser._fname))
dbase[record.id] = record
data['ids'].append(record.id)
record = cache_parser.next_record()
return True
def filter(self):
return None
def generate(self, ids = None):
return ''
class RealPPMModifierGenerator(DataGenerator):
def __init__(self, options):
DataGenerator.__init__(self, options)
self._dbc = [ 'ChrSpecialization', 'SpellProcsPerMinute', 'SpellProcsPerMinuteMod', 'SpellAuraOptions' ]
self._specmap = { 0: 'SPEC_NONE' }
def initialize(self):
DataGenerator.initialize(self)
for i, data in self._chrspecialization_db.iteritems():
if data.class_id > 0:
self._specmap[i] = '%s_%s' % (
DataGenerator._class_names[data.class_id].upper().replace(" ", "_"),
data.name.upper().replace(" ", "_"),
)
return True
def generate(self, ids = None):
output_data = []
for i, data in self._spellprocsperminutemod_db.iteritems():
if data.id_chr_spec not in self._specmap.keys() or data.id_chr_spec == 0:
continue
spell_id = 0
for aopts_id, aopts_data in self._spellauraoptions_db.iteritems():
if aopts_data.id_ppm != data.id_ppm:
continue
spell_id = aopts_data.id_spell
break
if spell_id == 0:
continue
output_data.append((data.id_chr_spec, data.coefficient, spell_id))
output_data.sort(cmp = lambda l, r: l[2] - r[2])
s = '#include "specialization.hpp"\n'
s += '#include "data_definitions.hh"\n\n'
s += '#define %sRPPMMOD%s_SIZE (%d)\n\n' % (
(self._options.prefix and ('%s_' % self._options.prefix) or '').upper(),
(self._options.suffix and ('_%s' % self._options.suffix) or '').upper(),
len(output_data)
)
s += '// %d RPPM Modifiers, wow build level %d\n' % ( len(output_data), self._options.build )
s += 'static struct rppm_modifier_t __%srppmmodifier%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or ''
)
for data in output_data + [(0, 0, 0)]:
s += ' { %6u, %-20s, %.4f },\n' % (data[2], self._specmap[data[0]], data[1])
s += '};\n'
return s
class SpecializationEnumGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'ChrSpecialization' ]
DataGenerator.__init__(self, options)
def generate(self, ids = None):
enum_ids = [
[ None, None, None, None ], # pets come here
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
]
spec_translations = [
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
]
spec_to_idx_map = [ ]
max_specialization = 0
for spec_id, spec_data in self._chrspecialization_db.iteritems():
if spec_data.class_id > 0:
spec_name = '%s_%s' % (
DataGenerator._class_names[spec_data.class_id].upper().replace(" ", "_"),
spec_data.name.upper().replace(" ", "_"),
)
if spec_data.spec_id > max_specialization:
max_specialization = spec_data.spec_id
if len(spec_to_idx_map) < spec_id + 1:
spec_to_idx_map += [ -1 ] * ( ( spec_id - len(spec_to_idx_map) ) + 1 )
spec_to_idx_map[ spec_id ] = spec_data.spec_id
else:
spec_name = 'PET_%s' % (
spec_data.name.upper().replace(" ", "_")
)
for i in xrange(0, (max_specialization + 1) - len(enum_ids[ spec_data.class_id ] ) ):
enum_ids[ spec_data.class_id ].append( None )
enum_ids[ spec_data.class_id ][ spec_data.spec_id ] = { 'id': spec_id, 'name': spec_name }
spec_arr = []
s = 'enum specialization_e {\n'
s += ' SPEC_NONE = 0,\n'
s += ' SPEC_PET = 1,\n'
for cls in xrange(0, len(enum_ids)):
if enum_ids[cls][0] == None:
continue
for spec in xrange(0, len(enum_ids[cls])):
if enum_ids[cls][spec] == None:
continue
enum_str = ' %s%s= %u,\n' % (
enum_ids[cls][spec]['name'],
( 21 - len(enum_ids[cls][spec]['name']) ) * ' ',
enum_ids[cls][spec]['id'] )
s += enum_str
spec_arr.append('%s' % enum_ids[cls][spec]['name'])
s += '};\n\n'
spec_idx_str = ''
for i in xrange(0, len(spec_to_idx_map)):
if i % 25 == 0:
spec_idx_str += '\n '
spec_idx_str += '%2d' % spec_to_idx_map[i]
if i < len(spec_to_idx_map) - 1:
spec_idx_str += ','
if (i + 1) % 25 != 0:
spec_idx_str += ' '
# Ugliness abound, but the easiest way to iterate over all specs is this ...
s += 'namespace specdata {\n';
s += 'static const unsigned n_specs = %u;\n' % len(spec_arr);
s += 'static const specialization_e __specs[%u] = {\n %s\n};\n\n' % (len(spec_arr), ', \n '.join(spec_arr))
s += 'static const int __idx_specs[%u] = {%s\n};\n\n' % (len(spec_to_idx_map), spec_idx_str)
s += 'inline unsigned spec_count()\n{ return n_specs; }\n\n';
s += 'inline specialization_e spec_id( unsigned idx )\n{ assert( idx < n_specs ); return __specs[ idx ]; }\n\n'
s += 'inline int spec_idx( specialization_e spec )\n{ assert( spec < %d ); return __idx_specs[ spec ]; }\n\n' % len(spec_to_idx_map)
s += '}\n'
return s
class SpecializationListGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'ChrSpecialization' ]
DataGenerator.__init__(self, options)
def generate(self, ids = None):
enum_ids = [
[ None, None, None, None ], # pets come here
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
[ None, None, None, None ],
]
spec_translations = [
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
]
max_specialization = 0
for spec_id, spec_data in self._chrspecialization_db.iteritems():
if spec_data.class_id > 0:
spec_name = '%s_%s' % (
DataGenerator._class_names[spec_data.class_id].upper().replace(" ", "_"),
spec_data.name.upper().replace(" ", "_"),
)
else:
spec_name = 'PET_%s' % (
spec_data.name.upper().replace(" ", "_")
)
if spec_data.spec_id > max_specialization:
max_specialization = spec_data.spec_id
for i in xrange(0, (max_specialization + 1) - len(enum_ids[ spec_data.class_id ] ) ):
enum_ids[ spec_data.class_id ].append( None )
enum_ids[ spec_data.class_id ][ spec_data.spec_id ] = { 'id': spec_id, 'name': spec_name }
s = '#define MAX_SPECS_PER_CLASS (%u)\n' % (max_specialization + 1)
s += '#define MAX_SPEC_CLASS (%u)\n\n' % len(enum_ids)
s += 'static specialization_e __class_spec_id[MAX_SPEC_CLASS][MAX_SPECS_PER_CLASS] = \n{\n'
for cls in xrange(0, len(enum_ids)):
if enum_ids[cls][0] == None:
s += ' {\n'
s += ' SPEC_NONE,\n'
s += ' },\n'
continue
s += ' {\n'
for spec in xrange(0, len(enum_ids[cls])):
if enum_ids[cls][spec] == None:
s += ' SPEC_NONE,\n'
continue
s += ' %s,\n' % enum_ids[cls][spec]['name']
s += ' },\n'
s += '};\n\n'
return s
class BaseScalingDataGenerator(DataGenerator):
def __init__(self, options, scaling_data):
if isinstance(scaling_data, str):
self._dbc = [ scaling_data ]
else:
self._dbc = scaling_data
DataGenerator.__init__(self, options)
def generate(self, ids = None):
s = ''
for i in self._dbc:
s += '// Base scaling data for classes, wow build %d\n' % self._options.build
s += 'static double __%s%s%s[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
re.sub(r'([A-Z]+)', r'_\1', i).lower(),
self._options.suffix and ('_%s' % self._options.suffix) or '' )
s += '%20.15f, ' % 0
for k in xrange(0, len(self._class_names) - 1):
val = getattr(self, '_%s_db' % i.lower())[k]
s += '%20.15f, ' % val.gt_value
if k > 0 and (k + 2) % 5 == 0:
s += '\n'
s += '\n};\n\n'
return s
class LevelScalingDataGenerator(DataGenerator):
def __init__(self, options, scaling_data):
if isinstance(scaling_data, str):
self._dbc = [ scaling_data ]
else:
self._dbc = scaling_data
DataGenerator.__init__(self, options)
def generate(self, ids = None):
s = ''
for i in self._dbc:
s += '// Level scaling data, wow build %d\n' % self._options.build
s += 'static double __%s%s%s[%u] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
re.sub(r'([A-Z]+)', r'_\1', i).lower(),
self._options.suffix and ('_%s' % self._options.suffix) or '',
self._options.level)
for k in xrange(0, self._options.level):
val = getattr(self, '_%s_db' % i.lower())[k]
s += '%20.15f, ' % val.gt_value
if k > 0 and (k + 1) % 5 == 0:
s += '\n'
s += '\n};\n\n'
return s
class MonsterLevelScalingDataGenerator(DataGenerator):
def __init__(self, options, scaling_data):
if isinstance(scaling_data, str):
self._dbc = [ scaling_data ]
else:
self._dbc = scaling_data
DataGenerator.__init__(self, options)
def generate(self, ids = None):
s = ''
for i in self._dbc:
s += '// Monster(?) Level scaling data, wow build %d\n' % self._options.build
s += 'static double __%s%s%s[%u] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
re.sub(r'([A-Z]+)', r'_\1', i).lower(),
self._options.suffix and ('_%s' % self._options.suffix) or '',
self._options.level + 3)
for k in xrange(0, self._options.level + 3):
val = getattr(self, '_%s_db' % i.lower())[k]
s += '%20.10f' % val.gt_value
if k < self._options.level + 3 - 1:
s += ', '
if k > 0 and (k + 1) % 5 == 0:
s += '\n'
s += '\n};\n\n'
return s
class IlevelScalingDataGenerator(DataGenerator):
def __init__(self, options, scaling_data):
if isinstance(scaling_data, str):
self._dbc = [ scaling_data ]
else:
self._dbc = scaling_data
DataGenerator.__init__(self, options)
def generate(self, ids = None):
s = ''
for i in self._dbc:
s += '// Item Level scaling data, wow build %d\n' % self._options.build
s += 'static double __%s%s%s[%u] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
re.sub(r'([A-Z]+)', r'_\1', i).lower(),
self._options.suffix and ('_%s' % self._options.suffix) or '',
self._options.scale_ilevel)
for k in xrange(0, self._options.scale_ilevel):
val = getattr(self, '_%s_db' % i.lower())[k]
s += '%20.15f, ' % val.gt_value
if k > 0 and (k + 1) % 5 == 0:
s += '\n'
s += '\n};\n\n'
return s
class CombatRatingsDataGenerator(DataGenerator):
# From UIParent.lua, seems to match to gtCombatRatings too for lvl80 stuff
_combat_ratings = [ 'Dodge', 'Parry', 'Block', 'Melee hit', 'Ranged hit',
'Spell hit', 'Melee crit', 'Ranged crit', 'Spell crit', 'Multistrike',
'Readiness', 'PvP Resilience', 'Leech', 'Melee haste', 'Ranged haste',
'Spell haste', 'Expertise', 'Mastery', 'PvP Power', 'Damage Versatility',
'Healing Versatility', 'Mitigation Versatility', 'Speed', 'Avoidance' ]
_combat_rating_ids = [ 2, 3, 4, 5, 6,
7, 8, 9, 10, 11,
12, 15, 16, 17, 18,
19, 23, 25, 26, 28,
29, 30, 13, 20 ]
def __init__(self, options):
# Hardcode these, as we need two different kinds of databases for output, using the same combat rating ids
self._dbc = [ 'gtCombatRatings' ]
DataGenerator.__init__(self, options)
def generate(self, ids = None):
s = ''
db = self._gtcombatratings_db
s += '// Combat ratings for levels 1 - %d, wow build %d \n' % (
self._options.level, self._options.build )
s += 'static double __%s%s%s[][%d] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
re.sub(r'([A-Z]+)', r'_\1', self._dbc[0]).lower(),
self._options.suffix and ('_%s' % self._options.suffix) or '',
self._options.level )
for j in xrange(0, len(CombatRatingsDataGenerator._combat_rating_ids)):
s += ' // %s rating multipliers\n' % CombatRatingsDataGenerator._combat_ratings[j]
s += ' {\n'
m = CombatRatingsDataGenerator._combat_rating_ids[j]
for k in xrange(m * 123, m * 123 + self._options.level, 5):
s += ' %20.15f, %20.15f, %20.15f, %20.15f, %20.15f,\n' % (
db[k].gt_value, db[k + 1].gt_value, db[k + 2].gt_value,
db[k + 3].gt_value, db[k + 4].gt_value )
s += ' },\n'
s += '};\n\n'
# db = self._gtoctclasscombatratingscalar_db
# s += '// Combat Rating scalar multipliers for classes, wow build %d\n' % self._options.build
# s += 'static double __%s%s%s[][%d] = {\n' % (
# self._options.prefix and ('%s_' % self._options.prefix) or '',
# re.sub(r'([A-Z]+)', r'_\1', self._dbc[1]).lower(),
# self._options.suffix and ('_%s' % self._options.suffix) or '',
# len(self._class_names))
# for i in xrange(0, len(CombatRatingsDataGenerator._combat_rating_ids)):
# id = CombatRatingsDataGenerator._combat_rating_ids[i]
# s += ' // %s rating class scalar multipliers\n' % CombatRatingsDataGenerator._combat_ratings[i]
# s += ' { \n'
# s += ' %20.15f, %20.15f, %20.15f, %20.15f, %20.15f,\n' % (
# 0.0, db[id * 10 + 1].gt_value, db[id * 10 + 2].gt_value, db[id * 10 + 3].gt_value,
# db[id * 10 + 4].gt_value)
#
# s += ' %20.15f, %20.15f, %20.15f, %20.15f, %20.15f,\n' % (
# db[id * 10 + 5].gt_value, db[id * 10 + 6].gt_value, db[id * 10 + 7].gt_value,
# db[id * 10 + 8].gt_value, db[id * 10 + 9].gt_value )
#
# s += ' %20.15f, %20.15f\n' % ( db[i * 10 + 10].gt_value, db[i * 10 + 11].gt_value )
#
# s += ' },\n'
#
# s += '};\n\n'
return s
class ClassScalingDataGenerator(DataGenerator):
def __init__(self, options, scaling_data):
if isinstance(scaling_data, str):
self._dbc = [ scaling_data ]
else:
self._dbc = scaling_data
DataGenerator.__init__(self, options)
def generate(self, ids = None):
s = ''
for i in self._dbc:
db = getattr(self, '_%s_db' % i.lower())
s += '// Class based scaling multipliers for levels 1 - %d, wow build %d\n' % (
self._options.level, self._options.build )
s += 'static double __%s%s%s[][%d] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
re.sub(r'([A-Z]+)', r'_\1', i).lower(),
self._options.suffix and ('_%s' % self._options.suffix) or '',
self._options.level )
for j in xrange(0, len(self._class_names)):
# Last entry is the fixed data
if j < len(self._class_names) and self._class_names[j] != None:
s += ' // %s\n' % self._class_names[j]
s += ' {\n'
for k in xrange((j - 1) * 123, (j - 1) * 123 + self._options.level, 5):
s += ' %20.15f, %20.15f, %20.15f, %20.15f, %20.15f,\n' % (
db[k].gt_value, db[k + 1].gt_value, db[k + 2].gt_value,
db[k + 3].gt_value, db[k + 4].gt_value
)
s += ' },\n'
s += '};\n\n'
return s
class SpellScalingDataGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'gtSpellScaling' ]
DataGenerator.__init__(self, options)
def generate(self, ids = None):
db = self._gtspellscaling_db
s = ''
s += '// Spell scaling multipliers for levels 1 - %d, wow build %d\n' % (
self._options.level, self._options.build )
s += 'static double __%s%s%s[][%d] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
re.sub(r'([A-Z]+)', r'_\1', self._dbc[0]).lower(),
self._options.suffix and ('_%s' % self._options.suffix) or '',
self._options.level )
for j in xrange(0, len(self._class_names) + 5):
# Last entry is the fixed data
if j < len(self._class_names) and self._class_names[j] != None:
s += ' // %s\n' % self._class_names[j]
else:
s += ' // Constant scaling\n'
s += ' {\n'
for k in xrange((j - 1) * 123, (j - 1) * 123 + self._options.level, 5):
s += ' %20.15f, %20.15f, %20.15f, %20.15f, %20.15f,\n' % (
db[k].gt_value, db[k + 1].gt_value, db[k + 2].gt_value,
db[k + 3].gt_value, db[k + 4].gt_value
)
s += ' },\n'
s += '};\n\n'
return s
class TalentDataGenerator(DataGenerator):
def __init__(self, options):
DataGenerator.__init__(self, options)
self._dbc = [ 'Spell', 'Talent' ]
self._dbc.append( 'ChrSpecialization' )
def filter(self):
ids = [ ]
for talent_id, talent_data in self._talent_db.iteritems():
# Make sure at least one spell id is defined
if talent_data.id_spell == 0:
continue
# Make sure the "base spell" exists
if not self._spell_db.get(talent_data.id_spell):
continue
ids.append(talent_id)
return ids
def generate(self, ids = None):
# Sort keys
ids.sort()
s = '#define %sTALENT%s_SIZE (%d)\n\n' % (
(self._options.prefix and ('%s_' % self._options.prefix) or '').upper(),
(self._options.suffix and ('_%s' % self._options.suffix) or '').upper(),
len(ids)
)
s += '// %d talents, wow build %d\n' % ( len(ids), self._options.build )
s += 'static struct talent_data_t __%stalent%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '' )
index = 0
for id in ids + [ 0 ]:
talent = self._talent_db[id]
spell = self._spell_db[talent.id_spell]
if not spell.id and talent.id_spell > 0:
continue
if( index % 20 == 0 ):
s += '//{ Name , Id, Flgs, Class, Spc, Col, Row, SpellID, ReplaceID, S1 },\n'
fields = spell.field('name')
fields += talent.field('id')
fields += [ '%#.2x' % 0 ]
fields += [ '%#.04x' % (DataGenerator._class_masks[talent.class_id] or 0) ]
fields += talent.field('spec_id')
fields += talent.field('col','row', 'id_spell', 'id_replace' )
# Pad struct with empty pointers for direct rank based spell data access
fields += [ ' 0' ]
s += ' { %s },\n' % (', '.join(fields))
index += 1
s += '};'
return s
class RulesetItemUpgradeGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'RulesetItemUpgrade' ]
DataGenerator.__init__(self, options)
def generate(self, ids = None):
s = 'static item_upgrade_rule_t __%s_data[] = {\n' % (
self.format_str( 'item_upgrade_rule' ),
)
for id_ in sorted(self._rulesetitemupgrade_db.keys()) + [ 0 ]:
rule = self._rulesetitemupgrade_db[id_]
s += ' { %s },\n' % (', '.join(rule.field('id', 'upgrade_level', 'id_upgrade_base', 'id_item')))
s += '};\n\n'
return s
class ItemUpgradeDataGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'ItemUpgrade' ]
DataGenerator.__init__(self, options)
def generate(self, ids = None):
s = 'static item_upgrade_t __%s_data[] = {\n' % (
self.format_str( 'item_upgrade' ),
)
for id_ in sorted(self._itemupgrade_db.keys()) + [ 0 ]:
upgrade = self._itemupgrade_db[id_]
s += ' { %s },\n' % (', '.join(upgrade.field('id', 'upgrade_ilevel')))
s += '};\n\n'
return s
class ItemDataGenerator(DataGenerator):
_item_blacklist = [
17, 138, 11671, 11672, # Various non-existing items
27863, 27864, 37301, 38498,
40948, 41605, 41606, 43336,
43337, 43362, 43384, 55156,
55159, 65015, 65104, 51951,
51953, 52313, 52315,
68711, 68713, 68710, 68709, # Tol Barad trinkets have three versions, only need two, blacklist the "non faction" one
62343, 62345, 62346, 62333, # Therazane enchants that have faction requirements
50338, 50337, 50336, 50335, # Sons of Hodir enchants that have faction requirements
50370, 50368, 50367, 50369, 50373, 50372, # Wotlk head enchants that have faction requirements
62367, 68719, 62368, 68763, 68718, 62366, # Obsolete Cataclysm head enchants
68721, 62369, 62422, 68722, #
43097, #
]
_item_name_blacklist = [
"^(Lesser |)Arcanum of",
"^Scroll of Enchant",
"^Enchant ",
"Deprecated",
"DEPRECATED",
"QA",
"zzOLD",
"NYI",
]
_type_flags = {
"Raid Finder" : 0x01,
"Heroic" : 0x02,
"Flexible" : 0x04,
"Elite" : 0x10, # Meta type
"Timeless" : 0x10,
"Thunderforged" : 0x10,
"Warforged" : 0x10,
# Combinations
"Heroic Thunderforged" : 0x12,
"Heroic Warforged" : 0x12,
}
def __init__(self, options):
self._dbc = [ 'Item-sparse', 'Item', 'ItemEffect', 'SpellEffect', 'Spell', 'JournalEncounterItem', 'ItemNameDescription' ]
DataGenerator.__init__(self, options)
def initialize(self):
DataGenerator.initialize(self)
# Map Spell effects to Spell IDs so we can do filtering based on them
for spell_effect_id, spell_effect_data in self._spelleffect_db.iteritems():
if not spell_effect_data.id_spell:
continue
spell = self._spell_db[spell_effect_data.id_spell]
if not spell.id:
continue
spell.add_effect(spell_effect_data)
# Map JournalEncounterItem.dbc data to items.
for id, journal_item_data in self._journalencounteritem_db.iteritems():
if self._item_sparse_db[journal_item_data.id_item]:
self._item_sparse_db[journal_item_data.id_item].journal = journal_item_data
# For WoD, map ItemEffect to Item-sparse
for is_id,data in self._itemeffect_db.iteritems():
item = self._item_sparse_db[data.id_item]
if not item.id:
continue
item.spells.append(data)
return True
def filter(self):
ids = []
for item_id, data in self._item_sparse_db.iteritems():
blacklist_item = False
classdata = self._item_db[item_id]
if item_id in self._item_blacklist:
continue
for pat in self._item_name_blacklist:
if data.name and re.search(pat, data.name):
blacklist_item = True
if blacklist_item:
continue
filter_ilevel = True
# Item no longer in game
if data.flags & 0x10:
continue
# On-use armors/weapons
if classdata.classs in [ 2, 4 ]:
# All shirts
if data.inv_type == 4:
filter_ilevel = False
else:
# On-use item, with a valid spell (and cooldown)
for item_effect in data.spells:
if item_effect.trigger_type == 0 and item_effect.id_spell > 0 and (item_effect.cooldown_group_duration > 0 or item_effect.cooldown_category_duration > 0):
filter_ilevel = False
break
# Gems
elif classdata.classs == 3:
if data.gem_props == 0:
continue
else:
filter_ilevel = False
# Consumables
elif classdata.classs == 0:
# Potions, Elixirs, Flasks. Simple spells only.
if classdata.has_value('subclass', [1, 2, 3]):
for item_effect in data.spells:
spell = self._spell_db[item_effect.id_spell]
if not spell.has_effect('type', 6):
continue
# Grants armor, stats, or rating
if not spell.has_effect('sub_type', [13, 22, 29, 99, 189, 465]):
continue
filter_ilevel = False
# Food
elif classdata.has_value('subclass', 5):
for item_effect in data.spells:
spell = self._spell_db[item_effect.id_spell]
for effect in spell._effects:
if not effect:
continue
if effect.sub_type == 23:
filter_ilevel = False
elif classdata.subclass == 3:
filter_ilevel = False
# Permanent Item Enchants (not strictly needed for simc, but
# paperdoll will like them)
elif classdata.subclass == 6:
filter_ilevel = False
else:
continue
# Hunter scopes and whatnot
elif classdata.classs == 7:
if classdata.has_value('subclass', 3):
for item_effect in data.spells:
spell = self._spell_db[item_effect.id_spell]
for effect in spell._effects:
if not effect:
continue
if effect.type == 53:
filter_ilevel = False
# Only very select quest-item permanent item enchantments
elif classdata.classs == 12:
valid = False
for spell in data.spells:
spell_id = spell.id_spell
if spell_id == 0:
continue
spell = self._spell_db[spell_id]
for effect in spell._effects:
if not effect or effect.type != 53:
continue
valid = True
break
if valid:
filter_ilevel = False
else:
continue
# All glyphs
elif classdata.classs == 16:
filter_ilevel = False
# All tabards
elif data.inv_type == 19:
filter_ilevel = False
# Item-level based non-equippable items
if filter_ilevel and data.inv_type == 0:
continue
# All else is filtered based on item level
elif filter_ilevel and (data.ilevel < self._options.min_ilevel or data.ilevel > self._options.max_ilevel):
continue
ids.append(item_id)
return ids
def generate(self, ids = None):
ids.sort()
s = '#include "data_definitions.hh"\n\n'
s += '#define %sITEM%s_SIZE (%d)\n\n' % (
(self._options.prefix and ('%s_' % self._options.prefix) or '').upper(),
(self._options.suffix and ('_%s' % self._options.suffix) or '').upper(),
len(ids)
)
s += '// %d items, ilevel %d-%d, wow build level %d\n' % ( len(ids),
self._options.min_ilevel, self._options.max_ilevel, self._options.build )
s += 'static struct item_data_t __%sitem%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or ''
)
index = 0
for id in ids + [ 0 ]:
item = self._item_sparse_db[id]
item2 = self._item_db[id]
if not item.id and id > 0:
sys.stderr.write('Item id %d not found\n' % id)
continue
# Aand, hack classs 12 (quest item) to be 0, 6
# so we get item enchants clumped in the same category, sigh ..
if item2.classs == 12:
item2.classs = 0
item2.subclass = 6
if(index % 20 == 0):
s += '//{ Id, Name , Flags1, Flags2, Type,Level,ReqL,ReqSk, RSkL,Qua,Inv,Cla,SCl,Bnd, Delay, DmgRange, Modifier, ClassMask, RaceMask, { ST1, ST2, ST3, ST4, ST5, ST6, ST7, ST8, ST9, ST10}, { SV1, SV2, SV3, SV4, SV5, SV6, SV7, SV8, SV9, SV10 }, { SId1, SId2, SId3, SId4, SId5 }, {Soc1,Soc2,Soc3 }, GemP,IdSBon,IdSet,IdSuf },\n'
fields = item.field('id', 'name')
fields += item.field('flags', 'flags_2')
flag_types = 0x00
if hasattr(item, 'journal'):
if item.journal.flags_1 == 0x10:
flag_types |= self._type_flags['Raid Finder']
elif item.journal.flags_1 == 0xC:
flag_types |= self._type_flags['Heroic']
desc = self._itemnamedescription_db[item.id_name_desc]
flag_types |= self._type_flags.get(desc.desc, 0)
fields += [ '%#.2x' % flag_types ]
fields += item.field('ilevel', 'req_level', 'req_skill', 'req_skill_rank', 'quality', 'inv_type')
fields += item2.field('classs', 'subclass')
fields += item.field( 'bonding', 'delay', 'weapon_damage_range', 'item_damage_modifier', 'race_mask', 'class_mask')
fields += [ '{ %s }' % ', '.join(item.field('stat_type_1', 'stat_type_2', 'stat_type_3', 'stat_type_4', 'stat_type_5', 'stat_type_6', 'stat_type_7', 'stat_type_8', 'stat_type_9', 'stat_type_10')) ]
fields += [ '{ %s }' % ', '.join(item.field('stat_val_1', 'stat_val_2', 'stat_val_3', 'stat_val_4', 'stat_val_5', 'stat_val_6', 'stat_val_7', 'stat_val_8', 'stat_val_9', 'stat_val_10')) ]
fields += [ '{ %s }' % ', '.join(item.field('stat_alloc_1', 'stat_alloc_2', 'stat_alloc_3', 'stat_alloc_4', 'stat_alloc_5', 'stat_alloc_6', 'stat_alloc_7', 'stat_alloc_8', 'stat_alloc_9', 'stat_alloc_10')) ]
fields += [ '{ %s }' % ', '.join(item.field('stat_socket_mul_1', 'stat_socket_mul_2', 'stat_socket_mul_3', 'stat_socket_mul_4', 'stat_socket_mul_5', 'stat_socket_mul_6', 'stat_socket_mul_7', 'stat_socket_mul_8', 'stat_socket_mul_9', 'stat_socket_mul_10')) ]
spells = self._itemeffect_db[0].field('id_spell') * 5
trigger_types = self._itemeffect_db[0].field('trigger_type') * 5
cooldown_category = self._itemeffect_db[0].field('cooldown_category') * 5
cooldown_value = self._itemeffect_db[0].field('cooldown_category_duration') * 5
cooldown_group = self._itemeffect_db[0].field('cooldown_group') * 5
cooldown_shared = self._itemeffect_db[0].field('cooldown_group_duration') * 5
for spell in item.spells:
spells[ spell.index ] = spell.field('id_spell')[ 0 ]
trigger_types[ spell.index ] = spell.field('trigger_type')[ 0 ]
cooldown_category[ spell.index ] = spell.field('cooldown_category')[ 0 ]
cooldown_value[ spell.index ] = spell.field('cooldown_category_duration')[ 0 ]
cooldown_group[ spell.index ] = spell.field('cooldown_group')[ 0 ]
cooldown_shared[ spell.index ] = spell.field('cooldown_group_duration')[ 0 ]
fields += [ '{ %s }' % ', '.join(trigger_types) ]
fields += [ '{ %s }' % ', '.join(spells) ]
fields += [ '{ %s }' % ', '.join(cooldown_category) ]
fields += [ '{ %s }' % ', '.join(cooldown_value) ]
fields += [ '{ %s }' % ', '.join(cooldown_group) ]
fields += [ '{ %s }' % ', '.join(cooldown_shared) ]
fields += [ '{ %s }' % ', '.join(item.field('socket_color_1', 'socket_color_2', 'socket_color_3')) ]
fields += item.field('gem_props', 'socket_bonus', 'item_set', 'rand_suffix' )
s += ' { %s },\n' % (', '.join(fields))
index += 1
s += '};\n\n'
return s
class RandomPropertyHTMLParser(HTMLParser.HTMLParser):
def __init__(self, suffix_map):
#self.__indent = 0
self.__getSuffix = False
self.__suffixName = None
self.__suffix_map = suffix_map
HTMLParser.HTMLParser.__init__(self)
# Really stupid way to parse things, but it should work
def handle_starttag(self, tag, attrs):
#print '%s%s: %s' % ( ' ' * self.__indent, tag, attrs )
#self.__indent += 2
if tag == 'td' and not self.__suffixName:
for attr in attrs:
if attr[0] == 'class' and 'color-q' in attr[1]:
self.__getSuffix = True
break
def handle_data(self, data):
if self.__getSuffix:
self.__suffixName = data.translate(None, '.')
self.__suffix_map[self.__suffixName] = None
elif self.__suffixName:
self.__suffix_map[self.__suffixName] = data.translate(None, '\r\n\t')
def handle_endtag(self, tag):
#self.__indent -= 2
#print '%s%s' % ( ' ' * self.__indent, tag )
if tag == 'td' and self.__getSuffix:
self.__getSuffix = False
elif tag == 'td' and self.__suffixName:
self.__suffixName = None
class RandomSuffixGroupGenerator(ItemDataGenerator):
_stat_map = {
'agility' : 3,
'strength' : 4,
'intellect' : 5,
'spirit' : 6,
'stamina' : 7,
'dodge rating' : 13,
'parry rating' : 14,
'hit rating' : 31,
'crit rating' : 32,
'haste rating' : 36,
'expertise rating' : 37,
'holy resistance' : 53,
'mastery rating' : 49,
'frost resistance' : 52,
'shadow resistance' : 54,
'nature resistance' : 55,
'arcane resistance' : 56,
}
_quality_str = [ '', '', 'uncm', 'rare', 'epic' ]
def random_suffix_type(self, id):
item = self._item_sparse_db[id]
item2 = self._item_db[id]
f = -1
if item2.classs == 2:
if item2.subclass == 1 or item2.subclass == 5 or item2.subclass == 6 or item2.subclass == 8 or item2.subclass == 10:
f = 0
elif item2.subclass == 2 or item2.subclass == 3 or item2.subclass == 18 or item2.subclass == 16 or item2.subclass == 19:
f = 4
else:
f = 3
else:
if item.inv_type == 1 or item.inv_type == 5 or item.inv_type == 7:
f = 0
elif item.inv_type == 3 or item.inv_type == 6 or item.inv_type == 8 or item.inv_type == 10 or item.inv_type == 12:
f = 1
elif item.inv_type == 2 or item.inv_type == 9 or item.inv_type == 11 or item.inv_type == 14 or item.inv_type == 23 or item.inv_type == 16:
f = 2
return f
def __init__(self, options):
ItemDataGenerator.__init__(self, options)
def initialize(self):
self._dbc += [ 'SpellItemEnchantment', 'ItemRandomSuffix', 'RandPropPoints' ]
return ItemDataGenerator.initialize(self)
def filter(self):
item_ids = ItemDataGenerator.filter(self)
ids = []
# Generate an ID list of random suffix ids, to which we need to figure
# out the random suffix grouping, based on web crawling of battle.net
for id in item_ids:
if self._item_sparse_db[id].rand_suffix > 0:
ids.append(id)
return ids
def generate(self, ids = None):
rsuffix_groups = { }
parsed_rsuffix_groups = { }
for id in ids:
item = self._item_sparse_db[id]
if item.rand_suffix not in rsuffix_groups.keys():
rsuffix_groups[item.rand_suffix] = [ ]
rsuffix_groups[item.rand_suffix].append(id)
for rsuffix_group, rsuffix_items in rsuffix_groups.iteritems():
# Take first item of the group, we could revert to more items here
# if the url returns 404 or such
item = self._item_sparse_db[rsuffix_items[0]]
smap = { }
sys.stderr.write('.. Fetching group %d with item id %d (%s)\n' % (rsuffix_group, item.id, item.name))
try:
url = urllib2.urlopen(r'http://us.battle.net/wow/en/item/%d/randomProperties' % item.id)
except urllib2.HTTPError as err:
sys.stderr.write('.. HTTP Error %d: %s\n' % (err.code, err.msg))
continue
html = RandomPropertyHTMLParser(smap)
html.feed(url.read())
html.close()
for suffix, stats in smap.iteritems():
html_stats = [ ]
splits = stats.split(',')
# Parse html stats
for stat_str in splits:
stat_re = re.match(r'^[\+\-]([0-9]+) ([a-z ]+)', stat_str.lower().strip())
if not stat_re:
continue
stat_val = int(stat_re.group(1))
stat_id = self._stat_map.get(stat_re.group(2))
if stat_id == None:
#sys.stderr.write('Unknown stat %s\n' % stat_str.lower().strip())
continue
html_stats.append((stat_id, stat_val))
for suffix_id, suffix_data in self._itemrandomsuffix_db.iteritems():
if suffix_data.name_sfx != suffix:
continue
# Name matches, we need to check the stats
rsuffix_stats = [ ]
# Then, scan through the suffix properties,
for sp_id in xrange(1, 6):
item_ench_id = getattr(suffix_data, 'id_property_%d' % sp_id)
item_ench_alloc = getattr(suffix_data, 'property_pct_%d' % sp_id)
if item_ench_id == 0:
continue
rprop = self._randproppoints_db[item.ilevel]
f = self.random_suffix_type(item.id)
points = getattr(rprop, '%s_points_%d' % (self._quality_str[item.quality], f + 1))
amount = points * item_ench_alloc / 10000.0
item_ench = self._spellitemenchantment_db[item_ench_id]
for ie_id in xrange(1, 4):
ie_stat_type = getattr(item_ench, 'type_%d' % ie_id)
ie_stat_prop = getattr(item_ench, 'id_property_%d' % ie_id)
if ie_stat_type != 5:
continue
rsuffix_stats.append((ie_stat_prop, int(amount)))
# Compare lists, need at least as many matches as html stats
match = 0
for i in xrange(0, len(html_stats)):
if html_stats[i] in rsuffix_stats:
match += 1
if match == len(html_stats):
if not rsuffix_group in parsed_rsuffix_groups.keys():
parsed_rsuffix_groups[rsuffix_group] = [ ]
parsed_rsuffix_groups[rsuffix_group].append(suffix_data.id)
break
s = '#define %sRAND_SUFFIX_GROUP%s_SIZE (%d)\n\n' % (
(self._options.prefix and ('%s_' % self._options.prefix) or '').upper(),
(self._options.suffix and ('_%s' % self._options.suffix) or '').upper(),
len(parsed_rsuffix_groups.keys())
)
s += '// Random suffix groups\n';
s += 'static struct random_suffix_group_t __%srand_suffix_group%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '' )
for group_id in sorted(parsed_rsuffix_groups.keys()):
data = parsed_rsuffix_groups[group_id]
s += ' { %4u, { %s, 0 } },\n' % (group_id, ', '.join(['%3u' % d for d in sorted(data)]))
s += ' { 0, { 0 } }\n'
s += '};\n\n'
return s
class SpellDataGenerator(DataGenerator):
_spell_ref_rx = r'\$(?:\?[A-z]|@spell(?:name|desc|icon|tooltip))?([0-9]+)(?:\[|[A-z][0-9]?|)'
# Pattern based whitelist, these will always be added
_spell_name_whitelist = [
#re.compile(r'^Item\s+-\s+(.+)\s+T([0-9]+)\s+([A-z\s]*)\s*([0-9]+)P')
]
# Explicitly included list of spells per class, that cannot be
# found from talents, or based on a SkillLine category
# The list contains a tuple ( spell_id, category[, activated ] ),
# where category is an entry in the _class_categories class-specific
# tuples, e.g. general = 0, specialization0..3 = 1..4 and pets as a whole
# are 5. The optional activated parameter forces the spell to appear (if set
# to True) or disappear (if set to False) from the class activated spell list,
# regardless of the automated activated check.
# Manually entered general spells ("tree" 0) do not appear in class activated lists, even if
# they pass the "activated" check, but will appear there if the optional activated parameter
# exists, and is set to True.
# The first tuple in the list is for non-class related, generic spells that are whitelisted,
# without a category
_spell_id_list = [
(
109871, 109869, # No'Kaled the Elements of Death - LFR
107785, 107789, # No'Kaled the Elements of Death - Normal
109872, 109870, # No'Kaled the Elements of Death - Heroic
52586, 68043, 68044, # Gurthalak, Voice of the Deeps - LFR, N, H
109959, 109955, 109939, # Rogue Legendary buffs for P1, 2, 3
84745, 84746, # Shallow Insight, Moderate Insight
138537, # Death Knight Tier15 2PC melee pet special attack
137597, # Legendary meta gem Lightning Strike
137323, 137288, 137247, # Healer legendary meta
137331, 137326,
146137, # Cleave
146071, # Multistrike
120032, 142530, # Dancing Steel
104993, 142535, # Jade Spirit
116631, # Colossus
105617, # Alchemist's Flask
137596, # Capacitance
104510, 104423, # Windsong Mastery / Haste buffs
156060, 156055, 173287, # Hunter Scopes
177172, 177175, 177176, # WoD Legendary ring, phase 1(?)
177161, 177159, 177160, # WoD Legendary ring, phase 1(?)
143924, # Leech
),
# Warrior:
(
( 118340, 0 ), # Impending Victory Heal
( 58385, 0 ), # Glyph of Hamstring
( 118779, 0, False ), # Victory Rush heal is not directly activatable
( 144442, 0 ), # T16 Melee 4 pc buff
( 144500, 0 ), # T16 tank 4pc proc
( 156321, 0 ), # Shield Charge for Gladiator Stance
( 163558, 0 ), # Execute off-hand
( 165336, 0 ), # Arms T17 2 piece
( 165345, 0 ), # Arms T17 4 piece
( 165337, 0 ), # Fury T17 2 piece
( 165349, 0 ), # Fury T17 4 piece
( 165338, 0 ), # Prot T17 2 piece
( 165351, 0 ), # Prot T17 4 piece
),
# Paladin:
(
( 86700, 5 ), # Ancient Power
( 144581, 0 ), # Blessing of the Guardians (prot T16 2-piece bonus)
( 144595, 0 ), # Divine Crusader (ret T16 4-piece bonus)
( 122287, 0, True ), # Symbiosis Wrath
( 96172, 0 ), # Hand of Light damage spell
( 42463, 0, False ), # Seal of Truth damage id not directly activatable
( 114852, 0, False ), # Holy Prism false positives for activated
( 114871, 0, False ),
( 65148, 0, False ), # Sacred Shield absorb tick
( 136494, 0, False ), # World of Glory false positive for activated
( 113075, 0, False ), # Barkskin (from Symbiosis)
( 144569, 0, False ), # Bastion of Power (prot T16 4-piece bonus)
( 130552, 0, True ), # Harsh Word
),
# Hunter:
( ( 83381, 0 ), # Kill Command base dmg
( 82928, 0 ), # Aimed Shot Master Marksman
( 131900, 0 ), # Murder of Crows damage spell
( 138374, 5 ), # T15 2pc nuke
( 168811, 0 ), # Sniper Training
( 171454, 0 ), # Chimaera Shot - Frost
( 171457, 0 ), # Chimaera Shot - Nature
( 90967, 0 ), # Kill Command cooldown
( 157708, 2 ), # Marks Kill Shot
( 178875, 0 ), # BM T17 4P
),
# Rogue:
(
( 121474, 0 ), # Shadow Blades off hand
( 57841, 0 ), # Killing Spree assault
( 57842, 0 ), # Killing Spree Off-Hand assault
( 22482, 0 ), # Blade Flurry damage spell
( 113780, 0, False ), # Deadly Poison damage is not directly activatable
( 89775, 0, False ), # Hemorrhage damage is not directy activatable
( 86392, 0, False ), # Main Gauche false positive for activatable
( 145211, 0 ), # Subtlety Tier16 4PC proc
( 168908, 0 ), # Sinister Calling: Hemorrhage
( 168952, 0 ), # Sinister Calling: Crimson Tempest
( 168971, 0 ), # Sinister Calling: Garrote
( 168963, 0 ), # Sinister Calling: Rupture
( 115189, 0 ), # Anticipation buff
( 157562, 0 ), # Crimson Poison (Enhanced Crimson Tempest perk)
),
# Priest:
( ( 63619, 5 ), # Shadowfiend "Shadowcrawl"
( 94472, 0 ), # Atonement Crit
( 65081, 0 ), # Body and Soul
( 114908, 0, False ), # Spirit Shell absorb
( 124464, 0, False ), ( 124465, 0, False ), ( 124467, 0, False ), ( 124468, 0, False ), ( 124469, 0, False ), # Shadow Mastery "duplicate" ticks
( 127627, 3 ), ( 127628, 3 ), # Shadow Cascade
( 127626, 0, False ), # Devouring Plague heal (deactive)
( 129197, 3 ), # Mind Flay (Insanity)
( 165628, 0 ), # Item - Priest T17 Shadow 2P Bonus
( 165623, 0 ), # Item - Priest T17 Shadow 2P Bonus - dot spell
( 165621, 0 ), # Item - Priest T17 Holy 2P Bonus
( 165614, 0 ), # Item - Priest T17 Discipline 2P Bonus
( 167694, 0 ), # Item - Priest T17 Discipline 4P Bonus
( 167684, 0 ), # Item - Priest T17 Holy 4P Bonus
( 165629, 0 ), # Item - Priest T17 Shadow 4P Bonus
),
# Death Knight:
( ( 70890, 0 ), # weird Scourge Strike secondary effect
( 51963, 5 ), # gargoyle strike
( 66198, 0 ), # Obliterate off-hand
( 66196, 0 ), # Frost Strike off-hand
( 66216, 0 ), # Plague Strike off-hand
( 66188, 0 ), # Death Strike off-hand
( 113516, 0, True ), # Symbiosis Wild Mushroom: Plague
( 52212, 0, False ), # Death and Decay false positive for activatable
( 81277, 5 ), ( 81280, 5 ), ( 50453, 5 ), # Bloodworms heal / burst
( 45470, 0 ), # Death Strike heal
( 77535, 0 ), # Blood Shield
( 116783, 0 ), # Death Siphon heal
( 96171, 0 ), # Will of the Necropolish Rune Tap buff
( 144948, 0 ), # T16 tank 4PC Bone Shield charge proc
( 144953, 0 ), # T16 tank 2PC Death Strike proc
( 144909, 0 ), # T16 dps 4PC frost driver spell
( 57330, 0, True ), # Horn of Winter needs to be explicitly put in the general tree, as our (over)zealous filtering thinks it's not an active ability
( 47568, 0, True ), # Same goes for Empower Rune Weapon
( 170202, 0 ), # Frost T17 4pc driver
( 170205, 0 ), # Frost T17 4pc driver continued ...
),
# Shaman:
( ( 77451, 0 ), ( 45284, 0 ), ( 45297, 0 ), # Overloads
( 114093, 0 ), # Ascendance: Stormblast, offhand melee swing,
( 114074, 0 ), ( 114738, 0 ), # Ascendance: Lava Beam, Lava Beam overload
( 120687, 0 ), ( 120588, 0 ), # Stormlash, Elemental Blast overload
( 121617, 0 ), # Ancestral Swiftness 5% haste passive
( 25504, 0, False ), ( 33750, 0, False ), # Windfury passives are not directly activatable
( 8034, 0, False ), # Frostbrand false positive for activatable
( 145002, 0, False ), # Lightning Elemental nuke
( 157348, 5 ), ( 157331, 5 ), # Storm elemental spells
( 159101, 0 ), ( 159105, 0 ), ( 159103, 0 ), # Echo of the Elements spec buffs
( 157766, 0 ), # Improved Chain Lightning visible buff
( 173184, 0 ), ( 173185, 0 ), ( 173186, 0 ), # Elemental Blast buffs
( 173183, 0 ), # Elemental Blast buffs
( 170512, 0 ), ( 170523, 0 ) # Feral Spirit windfury (t17 enhance 4pc set bonus)
),
# Mage:
(
( 48107, 0, False ), ( 48108, 0, False ), # Heating Up and Pyroblast! buffs
( 79058, 0 ), # Mage: Mana Gem, Frostfire Orb x2, Arcane Brilliance
( 88084, 5 ), ( 59637, 5 ), ( 88082, 5 ), ( 59638, 5 ), # Mirror Image spells.
( 80354, 0 ), # Temporal Displacement
( 131079, 0 ), ( 131080, 0 ), ( 131081, 0 ), # Mini-bolt spells
( 131581, 0 ), # Waterbolt
( 7268, 0, False ), # Arcane missiles trigger
( 115757, 0, False ), # Frost Nova false positive for activatable
( 145264, 0 ), # T16 Frigid Blast
( 148022, 0 ), # Icicle
( 155152, 5 ), # Prismatic Crystal nuke
( 157978, 0 ), ( 157979, 0 ), ( 157977, 0 ), # Unstable magic aoe
),
# Warlock:
( ( 85692, 5 ), # doomguard doom bolt
( 115746, 5 ), # fel imp felbolt
( 166864, 5 ), # inner demon soulfire
( 115778, 5 ), # observer tongue lash
( 115748, 5 ), # shivarra bladedance
( 115770, 5 ), # shivarra fellash
( 115625, 5 ), # wrathguard mortal cleave
( 115831, 5 ), # wrathguard wrathstorm
( 112092, 0 ), # glyphed shadow bolt
( 112866, 0, True ), # fel imp summon
( 112867, 0, True ), # voidlord summon
( 112868, 0, True ), # shivarra summon
( 112869, 0, True ), # observer summon
( 112870, 2, True ), # wrathguard summon
( 112921, 0, True ), # abyssal summon
( 112927, 0, True ), # terrorguard summon
( 115422, 2, True ), # void ray
( 104025, 2, True ), # immolation aura
( 104027, 2, True ), # meta soul fire
( 124916, 2, True ), # meta chaos wave
( 103964, 2, True ), # meta touch of chaos
( 104232, 3, True ), # destruction rain of fire
( 114790, 1 ), ( 87385, 1 ), # soulburn seed of corruption
( 131737, 0, False ), ( 131740, 0, False ), ( 132566, 0, False ), ( 131736, 0, False ), # Duplicated Warlock dots
( 111859, 0, True ), ( 111895, 0, True ), ( 111897, 0, True ), ( 111896, 0, True ), ( 111898, 2, True ), # Grimoire of Service summons
( 103988, 0, False ), # Demo melee
( 145075, 0 ), # T16 2pc destro
( 145085, 0 ), # T16 2pc demo
( 145159, 0 ), # T16 4pc affli soul shard gain
( 145164, 0 ), # T16 4pc destro ember fire gain
( 114654, 0 ), # Fire and Brimstone nukes
( 108686, 0 ),
( 108685, 0 ),
( 109468, 0 ),
( 104225, 0 ),
( 89653, 0 ), # Drain Life heal
( 157698, 1 ), # Soulburn: Haunt buff
( 166864, 0 ), # T17 4 piece Demo spell.
( 129476, 0 ), # Immolation Aura
),
# Monk:
( ( 118864, 3 ), # Combo Breaker: Tiger Palm
( 116768, 3 ), # Combo Breaker: Blackout Kick
( 159407, 3 ), # Combo Breaker: Chi Explosion
( 108557, 1, False ), # Brewmaster Jab
( 126890, 0, False ), # Eminence false positives for activatable abilities
( 117895, 0, False ),
( 140737, 0 ), # Way of the Monk 2-Hander Weapon Speed modifier
),
# Druid:
( ( 93402, 1, True ), # Sunfire
( 106996, 1, True ), # Astral Storm
( 112071, 1, True ), # Celestial Alignment
( 110621, 0, True ), # Symbiosis spells
( 122114, 1, True ), # Chosen of Elune
( 122283, 0, True ),
( 110807, 0, True ),
( 112997, 0, True ),
( 113769, 5 ), # Wrath for Treants
( 110691, 5 ), # Wrath for Mirror Images
( 108291, 0 ), ( 108292, 0 ), ( 108293, 0 ), ( 108294, 0 ), # Heart of the Wild
( 144770, 1, False ), ( 144772, 1, False ), # Balance Tier 16 2pc spells
( 150017, 5 ), # Rake for Treants
( 146874, 0 ), # Feral Rage (T16 4pc feral bonus)
( 124991, 0 ), ( 124988, 0 ), # Nature's Vigil
( 155580, 0 ), # Lunar Inspiration
( 155627, 0 ), # Lunar Inspiration
( 155625, 0 ), # Lunar Inspiration Moonfire
( 145152, 0 ), # Bloodtalons buff
( 135597, 0 ), # Tooth and Claw absorb buff
( 155784, 0 ), # Primal Tenacity buff
( 165431, 0 ), # tier17_2pc_melee
( 165432, 0 ), # tier17_4pc_melee
( 166638, 0 ), # tier17_4pc_melee debuff
( 166639, 0 ), # tier17_4pc_melee proc driver
( 177969, 0 ), # tier17_4pc_tank buff
( 137542, 0 ), # Displacer Beast buff
),
]
# Class specific item sets, T13, T14, T15
_item_set_list = [
(),
# Tier13, Tier14, Tier15 Tier16
( ( 1073, 1074, ), ( 1144, 1145, ), ( 1172, 1173 ), ( 1179, 1180 ), ), # Warrior
( ( 1063, 1065, 1064, ), ( 1134, 1135, 1136, ), ( 1162, 1163, 1164 ), ( 1188, 1189, 1190 ), ), # Paladin
( ( 1061, ), ( 1129, ), ( 1157, ), ( 1195, ), ), # Hunter
( ( 1068, ), ( 1139, ), ( 1167, ), ( 1185, ), ), # Rogue
( ( 1066, 1067, ), ( 1137, 1138, ), ( 1165, 1166 ), ( 1186, 1187 ), ), # Priest
( ( 1056, 1057, ), ( 1123, 1124, ), ( 1151, 1152 ), ( 1200, 1201 ), ), # Death Knight
( ( 1070, 1071, 1069, ), ( 1140, 1141, 1142, ), ( 1168, 1169, 1170 ), ( 1182, 1183, 1184 ), ), # Shaman
( ( 1062, ), ( 1130, ), ( 1158, ), ( 1194, ) ), # Mage
( ( 1072, ), ( 1143, ), ( 1171, ), ( 1181, ) ), # Warlock
( ( ), ( 1131, 1132, 1133, ), ( 1159, 1160, 1161 ), ( 1191, 1192, 1193 ), ), # Monk
( ( 1059, 1058, 1060 ), ( 1125, 1126, 1127, 1128, ), ( 1153, 1154, 1155, 1156 ), ( 1196, 1197, 1198, 1199 ), ), # Druid
]
_profession_enchant_categories = [
165, # Leatherworking
171, # Alchemy
197, # Tailoring
202, # Engineering
333, # Enchanting
773, # Inscription
]
# General
_skill_categories = [
0,
840, # Warrior
800, # Paladin
795, # Hunter
921, # Rogue
804, # Priest
796, # Death Knight
924, # Shaman
904, # Mage
849, # Warlock
829, # Monk
798, # Druid
]
_pet_skill_categories = [
( ),
( ), # Warrior
( ), # Paladin
( 203, 208, 209, 210, 211, 212, 213, 214, 215, 217, 218, 236, 251, 270, 653, 654, 655, 656, 763, 764, 765, 766, 767, 768, 775, 780, 781, 783, 784, 785, 786, 787, 788, 808, 811 ), # Hunter
( ), # Rogue
( ), # Priest
( 782, ), # Death Knight
( 962, 963 ), # Shaman
( 805, ), # Mage
( 188, 189, 204, 205, 206, 207, 761 ), # Warlock
( ), # Monk
( ), # Druid
]
# Specialization categories, Spec0 | Spec1 | Spec2
# Note, these are reset for MoP
_spec_skill_categories = [
(),
( 71, 72, 73, 0 ), # Warrior
( 65, 66, 70, 0 ), # Paladin
( 254, 255, 256, 0 ), # Hunter
( 259, 260, 261, 0 ), # Rogue
( 256, 257, 258, 0 ), # Priest
( 250, 251, 252, 0 ), # Death Knight
( 262, 263, 264, 0 ), # Shaman
( 62, 63, 64, 0 ), # Mage
( 265, 266, 267, 0 ), # Warlock
( 268, 270, 269, 0 ), # Monk
( 102, 103, 104, 105 ), # Druid
]
_race_categories = [
(),
( 754, ), # Human 0x0001
( 125, ), # Orc 0x0002
( 101, ), # Dwarf 0x0004
( 126, ), # Night-elf 0x0008
( 220, ), # Undead 0x0010
( 124, ), # Tauren 0x0020
( 753, ), # Gnome 0x0040
( 733, ), # Troll 0x0080
( 790, ), # Goblin 0x0100? not defined yet
( 756, ), # Blood elf 0x0200
( 760, ), # Draenei 0x0400
(), # Fel Orc
(), # Naga
(), # Broken
(), # Skeleton
(), # Vrykul
(), # Tuskarr
(), # Forest Troll
(), # Taunka
(), # Northrend Skeleton
(), # Ice Troll
( 789, ), # Worgen 0x200000
(), # Gilnean
]
_skill_category_blacklist = [
148, # Horse Riding
762, # Riding
183, # Generic (DND)
]
# Any spell with this effect type, will be automatically
# blacklisted
# http://github.com/mangos/mangos/blob/400/src/game/SharedDefines.h
_effect_type_blacklist = [
5, # SPELL_EFFECT_TELEPORT_UNITS
#10, # SPELL_EFFECT_HEAL
18, # SPELL_EFFECT_RESURRECT
25, # SPELL_EFFECT_WEAPONS
39, # SPELL_EFFECT_LANGUAGE
47, # SPELL_EFFECT_TRADESKILL
50, # SPELL_EFFECT_TRANS_DOOR
60, # SPELL_EFFECT_PROFICIENCY
71, # SPELL_EFFECT_PICKPOCKET
94, # SPELL_EFFECT_SELF_RESURRECT
97, # SPELL_EFFECT_SUMMON_ALL_TOTEMS
109, # SPELL_EFFECT_SUMMON_DEAD_PET
110, # SPELL_EFFECT_DESTROY_ALL_TOTEMS
118, # SPELL_EFFECT_SKILL
126, # SPELL_STEAL_BENEFICIAL_BUFF
]
# http://github.com/mangos/mangos/blob/400/src/game/SpellAuraDefines.h
_aura_type_blacklist = [
1, # SPELL_AURA_BIND_SIGHT
2, # SPELL_AURA_MOD_POSSESS
5, # SPELL_AURA_MOD_CONFUSE
6, # SPELL_AURA_MOD_CHARM
7, # SPELL_AURA_MOD_FEAR
#8, # SPELL_AURA_PERIODIC_HEAL
17, # SPELL_AURA_MOD_STEALTH_DETECT
25, # SPELL_AURA_MOD_PACIFY
30, # SPELL_AURA_MOD_SKILL (various skills?)
#31, # SPELL_AURA_MOD_INCREASE_SPEED
44, # SPELL_AURA_TRACK_CREATURES
45, # SPELL_AURA_TRACK_RESOURCES
56, # SPELL_AURA_TRANSFORM
58, # SPELL_AURA_MOD_INCREASE_SWIM_SPEED
75, # SPELL_AURA_MOD_LANGUAGE
78, # SPELL_AURA_MOUNTED
82, # SPELL_AURA_WATER_BREATHING
91, # SPELL_AURA_MOD_DETECT_RANGE
98, # SPELL_AURA_MOD_SKILL (trade skills?)
104, # SPELL_AURA_WATER_WALK,
105, # SPELL_AURA_FEATHER_FALL
151, # SPELL_AURA_TRACK_STEALTHED
154, # SPELL_AURA_MOD_STEALTH_LEVEL
156, # SPELL_AURA_MOD_REPUTATION_GAIN
206, # SPELL_AURA_MOD_FLIGHT_SPEED_xx begin
207,
208,
209,
210,
211,
212 # SPELL_AURA_MOD_FLIGHT_SPEED_xx ends
]
_mechanic_blacklist = [
21, # MECHANIC_MOUNT
]
_spell_blacklist = [
3561, # Teleports --
3562,
3563,
3565,
3566,
3567, # -- Teleports
20585, # Wisp spirit (night elf racial)
42955, # Conjure Refreshment
43987, # Ritual of Refreshment
48018, # Demonic Circle: Summon
48020, # Demonic Circle: Teleport
69044, # Best deals anywhere (goblin racial)
69046, # Pack hobgoblin (woe hogger)
68978, # Flayer (worgen racial)
68996, # Two forms (worgen racial)
]
_spell_name_blacklist = [
"^Languages",
"^Teleport:",
"^Weapon Skills",
"^Armor Skills",
"^Tamed Pet Passive",
]
_spell_families = {
'mage': 3,
'warrior': 4,
'warlock': 5,
'priest': 6,
'druid': 7,
'rogue': 8,
'hunter': 9,
'paladin': 10,
'shaman': 11,
'deathknight': 15,
'monk': 53
}
def __init__(self, options):
DataGenerator.__init__(self, options)
self._dbc = [
'Spell', 'SpellEffect', 'SpellScaling', 'SpellCooldowns', 'SpellRange', 'SpellClassOptions',
'SpellDuration', 'SpellPower', 'SpellLevels', 'SpellCategories', 'SpellCategory', 'Talent',
'SkillLineAbility', 'SpellAuraOptions', 'SpellRuneCost', 'SpellRadius', 'GlyphProperties',
'SpellCastTimes', 'ItemSet', 'SpellDescriptionVariables', 'SpellItemEnchantment', 'Item-sparse',
'Item', 'SpellEquippedItems', 'SpellIcon', 'SpecializationSpells', 'ChrSpecialization', 'SpellEffectScaling',
'SpellMisc', 'SpellProcsPerMinute', 'ItemSetSpell', 'ItemEffect', 'MinorTalent' ]
def initialize(self):
DataGenerator.initialize(self)
# Map Spell effects to Spell IDs so we can do filtering based on them
for spell_effect_id, spell_effect_data in self._spelleffect_db.iteritems():
if not spell_effect_data.id_spell:
continue
spell = self._spell_db[spell_effect_data.id_spell]
if not spell.id:
continue
spell.add_effect(spell_effect_data)
# Map Spell effect scaling to Spell Effects
for ses_id,ses_data in self._spelleffectscaling_db.iteritems():
if not ses_data.id_effect:
continue
effect = self._spelleffect_db[ses_data.id_effect]
if not effect.id:
continue
effect.scaling = ses_data
# Map Spell powers to spell ids
for spell_power_id, spell_power_data in self._spellpower_db.iteritems():
if not spell_power_data.id_spell:
continue
spell = self._spell_db[spell_power_data.id_spell]
if not spell.id:
continue
spell.add_power(spell_power_data)
# For builds 15589+, map SpellMisc.dbc to spell ids
#for spell_misc_id, spell_misc_data in self._spellmisc_db.iteritems():
# if not spell_misc_data.id_spell:
# continue
#
# spell = self._spell_db[spell_misc_data.id_spell]
# if not spell.id:
# continue
#
# spell.add_misc(spell_misc_data)
# For WoD, map ItemSetSpell.dbc to ItemSet.dbc
for isb_id, data in self._itemsetspell_db.iteritems():
item_set = self._itemset_db[data.id_item_set]
if not item_set.id:
continue
item_set.bonus.append(data)
# For WoD, map ItemEffect to Item-sparse
for is_id,data in self._itemeffect_db.iteritems():
item = self._item_sparse_db[data.id_item]
if not item.id:
continue
item.spells.append(data)
return True
def class_mask_by_skill(self, skill):
for i in xrange(0, len(self._skill_categories)):
if self._skill_categories[i] == skill:
return DataGenerator._class_masks[i]
return 0
def class_mask_by_spec_skill(self, spec_skill):
for i in xrange(0, len(self._spec_skill_categories)):
if spec_skill in self._spec_skill_categories[i]:
return DataGenerator._class_masks[i]
return 0
def class_mask_by_pet_skill(self, pet_skill):
for i in xrange(0, len(self._pet_skill_categories)):
if pet_skill in self._pet_skill_categories[i]:
return DataGenerator._class_masks[i]
return 0
def race_mask_by_skill(self, skill):
for i in xrange(0, len(self._race_categories)):
if skill in self._race_categories[i]:
return DataGenerator._race_masks[i]
return 0
def process_spell(self, spell_id, result_dict, mask_class = 0, mask_race = 0, state = True):
filter_list = { }
lst = self.generate_spell_filter_list(spell_id, mask_class, mask_race, filter_list, state)
if not lst:
return
for k, v in lst.iteritems():
if result_dict.get(k):
result_dict[k]['mask_class'] |= v['mask_class']
result_dict[k]['mask_race'] |= v['mask_race']
else:
result_dict[k] = { 'mask_class': v['mask_class'], 'mask_race' : v['mask_race'], 'effect_list': v['effect_list'] }
def spell_state(self, spell, enabled_effects = None):
# Check for blacklisted spells
if spell.id in SpellDataGenerator._spell_blacklist:
self.debug("Spell id %u (%s) is blacklisted" % ( spell.id, spell.name ) )
return False
# Check for spell name blacklist
for p in SpellDataGenerator._spell_name_blacklist:
if spell.name and re.search(p, spell.name):
self.debug("Spell id %u (%s) matches name blacklist pattern %s" % ( spell.id, spell.name, p ) )
return False
# Check for blacklisted spell category mechanism
if spell.id_categories > 0:
c = self._spellcategories_db[spell.id_categories]
if c.mechanic in SpellDataGenerator._mechanic_blacklist:
self.debug("Spell id %u (%s) matches mechanic blacklist %u" % ( spell.id, spell.name, c.mechanic ))
return False
# Make sure we can filter based on effects even if there's no map of relevant effects
if enabled_effects == None:
enabled_effects = [ True ] * ( spell.max_effect_index + 1 )
# Effect blacklist processing
for effect_index in xrange(0, len(spell._effects)):
if not spell._effects[effect_index]:
enabled_effects[effect_index] = False
continue
effect = spell._effects[effect_index]
# Blacklist by effect type
if effect.type in SpellDataGenerator._effect_type_blacklist:
enabled_effects[effect.index] = False
# Blacklist by apply aura (party, raid)
if effect.type in [ 6, 35, 65 ] and effect.sub_type in SpellDataGenerator._aura_type_blacklist:
enabled_effects[effect.index] = False
# If we do not find a true value in enabled effects, this spell is completely
# blacklisted, as it has no effects enabled that interest us
if True not in enabled_effects:
self.debug("Spell id %u (%s) has no enabled effects" % ( spell.id, spell.name ) )
return False
return True
def generate_spell_filter_list(self, spell_id, mask_class, mask_race, filter_list = { }, state = True):
spell = self._spell_db[spell_id]
enabled_effects = [ True ] * ( spell.max_effect_index + 1 )
if not spell.id:
return None
if state and not self.spell_state(spell, enabled_effects):
return None
filter_list[spell.id] = { 'mask_class': mask_class, 'mask_race': mask_race, 'effect_list': enabled_effects }
# Add spell triggers to the filter list recursively
for effect in spell._effects:
if not effect or spell.id == effect.trigger_spell:
continue
# Regardless of trigger_spell or not, if the effect is not enabled,
# we do not process it
if not enabled_effects[effect.index]:
continue
if effect.trigger_spell > 0:
if effect.trigger_spell in filter_list.keys():
continue
lst = self.generate_spell_filter_list(effect.trigger_spell, mask_class, mask_race, filter_list)
if not lst:
continue
for k, v in lst.iteritems():
if filter_list.get(k):
filter_list[k]['mask_class'] |= v['mask_class']
filter_list[k]['mask_race'] |= v['mask_race']
else:
filter_list[k] = { 'mask_class': v['mask_class'], 'mask_race' : v['mask_race'], 'effect_list': v['effect_list'] }
spell_refs = re.findall(SpellDataGenerator._spell_ref_rx, spell.desc or '')
spell_refs += re.findall(SpellDataGenerator._spell_ref_rx, spell.tt or '')
if spell.id_desc_var and self._spelldescriptionvariables_db.get(spell.id_desc_var):
spell_refs += re.findall(SpellDataGenerator._spell_ref_rx, self._spelldescriptionvariables_db.get(spell.id_desc_var).var)
spell_refs = list(set(spell_refs))
for ref_spell_id in spell_refs:
rsid = int(ref_spell_id)
if rsid == spell.id:
continue
if rsid in filter_list.keys():
continue
lst = self.generate_spell_filter_list(rsid, mask_class, mask_race, filter_list)
if not lst:
continue
for k, v in lst.iteritems():
if filter_list.get(k):
filter_list[k]['mask_class'] |= v['mask_class']
filter_list[k]['mask_race'] |= v['mask_race']
else:
filter_list[k] = { 'mask_class': v['mask_class'], 'mask_race' : v['mask_race'], 'effect_list': v['effect_list'] }
return filter_list
def filter(self):
ids = { }
# First, get spells from talents. Pet and character class alike
for talent_id, talent_data in self._talent_db.iteritems():
mask_class = 0
# These may now be pet talents
if talent_data.class_id > 0:
mask_class = DataGenerator._class_masks[talent_data.class_id]
self.process_spell(getattr(talent_data, 'id_spell'), ids, mask_class, 0, False)
# Get all perks
for perk_id, perk_data in self._minortalent_db.iteritems():
if perk_data.id_spell == 0:
continue
spec_data = self._chrspecialization_db[perk_data.id_spec]
if spec_data.id == 0:
continue
self.process_spell(perk_data.id_spell, ids, DataGenerator._class_masks[spec_data.class_id], 0, False)
# Get base skills from SkillLineAbility
for ability_id, ability_data in self._skilllineability_db.iteritems():
mask_class_category = 0
mask_race_category = 0
if ability_data.id_skill in SpellDataGenerator._skill_category_blacklist:
continue
# Guess class based on skill category identifier
mask_class_category = self.class_mask_by_skill(ability_data.id_skill)
if mask_class_category == 0:
mask_class_category = self.class_mask_by_spec_skill(ability_data.id_skill)
if mask_class_category == 0:
mask_class_category = self.class_mask_by_pet_skill(ability_data.id_skill)
# Guess race based on skill category identifier
mask_race_category = self.race_mask_by_skill(ability_data.id_skill)
# Make sure there's a class or a race for an ability we are using
if not ability_data.mask_class and not ability_data.mask_race and not mask_class_category and not mask_race_category:
continue
spell = self._spell_db[ability_data.id_spell]
if not spell.id:
continue
self.process_spell(spell.id, ids, ability_data.mask_class or mask_class_category, ability_data.mask_race or mask_race_category)
# Get specialization skills from SpecializationSpells and masteries from ChrSpecializations
for spec_id, spec_spell_data in self._specializationspells_db.iteritems():
# Guess class based on specialization category identifier
spec_data = self._chrspecialization_db[spec_spell_data.spec_id]
if spec_data.id == 0:
continue
spell = self._spell_db[spec_spell_data.spell_id]
if not spell.id:
continue
mask_class = 0
if spec_data.class_id > 0:
mask_class = DataGenerator._class_masks[spec_data.class_id]
# Hunter pet classes have a class id of 0, tag them as "hunter spells" like before
else:
mask_class = DataGenerator._class_masks[3]
self.process_spell(spell.id, ids, mask_class, 0, False)
if ids.has_key(spell.id):
ids[spell.id]['replace_spell_id'] = spec_spell_data.replace_spell_id
for spec_id, spec_data in self._chrspecialization_db.iteritems():
s = self._spell_db[spec_data.id_mastery]
if s.id == 0:
continue
if self._spellmisc_db[s.id_misc].flags_12694 & 0x20000000:
self.process_spell(s.id, ids, DataGenerator._class_masks[spec_data.class_id], 0, False)
# Get spells relating to item enchants, so we can populate a (nice?) list
for enchant_id, enchant_data in self._spellitemenchantment_db.iteritems():
for i in xrange(1, 4):
type_field_str = 'type_%d' % i
id_field_str = 'id_property_%d' % i
# "combat spell", "equip spell", "use spell"
if getattr(enchant_data, type_field_str) not in [ 1, 3, 7 ]:
continue
spell_id = getattr(enchant_data, id_field_str)
if not spell_id:
continue
self.process_spell(spell_id, ids, 0, 0)
# Get spells that create item enchants
for ability_id, ability_data in self._skilllineability_db.iteritems():
if ability_data.id_skill not in self._profession_enchant_categories:
continue;
spell = self._spell_db[ability_data.id_spell]
if not spell.id:
continue
enchant_spell_id = 0
for effect in spell._effects:
# Grab Enchant Items and Create Items (create item will be filtered further)
if not effect or (effect.type != 53 and effect.type != 24):
continue
# Create Item, see if the created item has a spell that enchants an item, if so
# add the enchant spell. Also grab all gem spells
if effect.type == 24:
item = self._item_sparse_db[effect.item_type]
if not item.id or item.gem_props == 0:
continue
for spell in item.spells:
id_spell = spell.id_spell
enchant_spell = self._spell_db[id_spell]
for enchant_effect in enchant_spell._effects:
if not enchant_effect or (enchant_effect.type != 53 and enchant_effect.type != 6):
continue
enchant_spell_id = id_spell
break
if enchant_spell_id > 0:
break
elif effect.type == 53:
spell_item_ench = self._spellitemenchantment_db[effect.misc_value]
#if (spell_item_ench.req_skill == 0 and self._spelllevels_db[spell.id_levels].base_level < 60) or \
# (spell_item_ench.req_skill > 0 and spell_item_ench.req_skill_value <= 375):
# continue
enchant_spell_id = spell.id
if enchant_spell_id > 0:
break
# Valid enchant, process it
if enchant_spell_id > 0:
self.process_spell(enchant_spell_id, ids, 0, 0)
# Rest of the Item enchants relevant to us, such as Shoulder / Head enchants
for item_id, data in self._item_sparse_db.iteritems():
blacklist_item = False
classdata = self._item_db[item_id]
if item_id in ItemDataGenerator._item_blacklist:
continue
for pat in ItemDataGenerator._item_name_blacklist:
if data.name and re.search(pat, data.name):
blacklist_item = True
if blacklist_item:
continue
# Consumables, Flasks, Elixirs, Potions, Food & Drink, Permanent Enchants
if classdata.classs != 12 and \
(classdata.classs != 7 or classdata.subclass not in [3]) and \
(classdata.classs != 0 or classdata.subclass not in [1, 2, 3, 5, 6]):
continue
# Grab relevant spells from quest items, this in essence only
# includes certain permanent enchants
if classdata.classs == 12:
for spell in data.spells:
spell_id = spell.id_spell
if spell_id == 0:
continue
spell = self._spell_db[spell_id]
for effect in spell._effects:
if not effect or effect.type != 53:
continue
self.process_spell(spell_id, ids, 0, 0)
# Grab relevant spells from consumables as well
elif classdata.classs == 0:
for item_effect in data.spells:
spell = self._spell_db[item_effect.id_spell]
if not spell.id:
continue
# Potions and Elixirs need to apply attributes, rating or
# armor
if classdata.has_value('subclass', [1, 2, 3]) and spell.has_effect('sub_type', [13, 22, 29, 99, 189, 465]):
self.process_spell(spell.id, ids, 0, 0)
# Food needs to have a periodically triggering effect
# (presumed to be always a stat giving effect)
elif classdata.has_value('subclass', 5) and spell.has_effect('sub_type', 23):
self.process_spell(spell.id, ids, 0, 0)
# Permanent enchants
elif classdata.has_value('subclass', 6):
self.process_spell(spell.id, ids, 0, 0)
# Hunter scopes and whatnot
elif classdata.classs == 7:
if classdata.has_value('subclass', 3):
for item_effect in data.spells:
spell = self._spell_db[item_effect.id_spell]
for effect in spell._effects:
if not effect:
continue
if effect.type == 53:
self.process_spell(spell.id, ids, 0, 0)
# Relevant set bonuses
for id, set_spell_data in self._itemsetspell_db.iteritems():
if not SetBonusListGenerator.is_extract_set_bonus(set_spell_data.id_item_set)[0]:
continue
self.process_spell(set_spell_data.id_spell, ids, 0, 0)
# Glyph effects, need to do trickery here to get actual effect from spellbook data
for ability_id, ability_data in self._skilllineability_db.iteritems():
if ability_data.id_skill != 810 or not ability_data.mask_class:
continue
use_glyph_spell = self._spell_db[ability_data.id_spell]
if not use_glyph_spell.id:
continue
# Find the on-use for glyph then, misc value will contain the correct GlyphProperties.dbc id
for effect in use_glyph_spell._effects:
if not effect or effect.type != 74: # Use glyph
continue
# Filter some erroneous glyph data out
glyph_data = self._glyphproperties_db[effect.misc_value]
if not glyph_data.id or not glyph_data.id_spell:
continue
self.process_spell(glyph_data.id_spell, ids, ability_data.mask_class, 0)
# Item enchantments that use a spell
for eid, data in self._spellitemenchantment_db.iteritems():
for attr_id in xrange(1, 4):
attr_type = getattr(data, 'type_%d' % attr_id)
if attr_type == 1 or attr_type == 3 or attr_type == 7:
sid = getattr(data, 'id_property_%d' % attr_id)
self.process_spell(sid, ids, 0, 0)
# Items with a spell identifier as "stats"
for iid, data in self._item_sparse_db.iteritems():
# Allow trinkets, weapons, 2hweapons to bypass ilevel checking
if data.inv_type not in [ 12, 13, 15, 17, 21, 22, 26 ] and \
(data.ilevel < self._options.min_ilevel or data.ilevel > self._options.max_ilevel):
continue
for spell in data.spells:
if spell.id_spell == 0:
continue
self.process_spell(spell.id_spell, ids, 0, 0)
# Last, get the explicitly defined spells in _spell_id_list on a class basis and the
# generic spells from SpellDataGenerator._spell_id_list[0]
for generic_spell_id in SpellDataGenerator._spell_id_list[0]:
if generic_spell_id in ids.keys():
sys.stderr.write('Whitelisted spell id %u (%s) already in the list of spells to be extracted.\n' % (
generic_spell_id, self._spell_db[generic_spell_id].name) )
self.process_spell(generic_spell_id, ids, 0, 0)
for cls in xrange(1, len(SpellDataGenerator._spell_id_list)):
for spell_tuple in SpellDataGenerator._spell_id_list[cls]:
if len(spell_tuple) == 2 and spell_tuple[0] in ids.keys():
sys.stderr.write('Whitelisted spell id %u (%s) already in the list of spells to be extracted.\n' % (
spell_tuple[0], self._spell_db[spell_tuple[0]].name) )
self.process_spell(spell_tuple[0], ids, self._class_masks[cls], 0)
for spell_id, spell_data in self._spell_db.iteritems():
for pattern in SpellDataGenerator._spell_name_whitelist:
if pattern.match(spell_data.name):
self.process_spell(spell_id, ids, 0, 0)
# After normal spells have been fetched, go through all spell ids,
# and get all the relevant aura_ids for selected spells
more_ids = { }
for spell_id, spell_data in ids.iteritems():
spell = self._spell_db[spell_id]
for power in spell._powers:
if not power or power.aura_id == 0:
continue
self.process_spell(power.aura_id, more_ids, spell_data['mask_class'], spell_data['mask_race'])
for id, data in more_ids.iteritems():
if not ids.has_key(id):
ids[ id ] = data
else:
ids[id]['mask_class'] |= data['mask_class']
ids[id]['mask_race'] |= data['mask_race']
return ids
def generate(self, ids = None):
# Sort keys
id_keys = ids.keys()
id_keys.sort()
effects = set()
powers = set()
s = '#include "data_definitions.hh"\n\n'
s += '#define %sSPELL%s_SIZE (%d)\n\n' % (
(self._options.prefix and ('%s_' % self._options.prefix) or '').upper(),
(self._options.suffix and ('_%s' % self._options.suffix) or '').upper(),
len(ids)
)
s += '// %d spells, wow build level %d\n' % ( len(ids), self._options.build )
s += 'static struct spell_data_t __%sspell%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or ''
)
index = 0
for id in id_keys + [ 0 ]:
spell = self._spell_db[id]
if not spell.id and id > 0:
sys.stderr.write('Spell id %d not found\n') % id
continue
#if len(spell._misc) > 1:
# sys.stderr.write('Spell id %u (%s) has more than one SpellMisc.dbc entry\n' % ( spell.id, spell.name ) )
# continue
for power in spell._powers:
if power == None:
continue
powers.add( power )
if index % 20 == 0:
s += '//{ Name , Id,Flags,PrjSp, Sch, Class, Race,Sca,MSL,SpLv,MxL,MinRange,MaxRange,Cooldown, GCD,Chg, ChrgCd, Cat, Duration, RCost, RPG,Stac, PCh,PCr, ProcFlags,EqpCl, EqpInvType,EqpSubclass,CastMn,CastMx,Div, Scaling,SLv, RplcId, { Attr1, Attr2, Attr3, Attr4, Attr5, Attr6, Attr7, Attr8, Attr9, Attr10, Attr11, Attr12 }, { Flags1, Flags2, Flags3, Flags4 }, Family, Description, Tooltip, Description Variable, Icon, ActiveIcon, Effect1, Effect2, Effect3 },\n'
fields = spell.field('name', 'id')
fields += [ '%#.2x' % 0 ]
fields += self._spellmisc_db[spell.id_misc].field('prj_speed', 'mask_school')
# Hack in the combined class from the id_tuples dict
fields += [ '%#.3x' % ids.get(id, { 'mask_class' : 0, 'mask_race': 0 })['mask_class'] ]
fields += [ '%#.3x' % ids.get(id, { 'mask_class' : 0, 'mask_race': 0 })['mask_race'] ]
# Set the scaling index for the spell
fields += self._spellscaling_db[spell.id_scaling].field('id_class', 'max_scaling_level')
#fields += spell.field('extra_coeff')
fields += self._spelllevels_db[spell.id_levels].field('base_level', 'max_level')
fields += self._spellrange_db[self._spellmisc_db[spell.id_misc].id_range].field('min_range')
fields += self._spellrange_db[self._spellmisc_db[spell.id_misc].id_range].field('max_range')
fields += self._spellcooldowns_db[spell.id_cooldowns].field('cooldown_duration', 'gcd_cooldown')
category = self._spellcategories_db[spell.id_categories]
fields += self._spellcategory_db[category.id_category].field('charges', 'charge_cooldown')
fields += self._spellcategories_db[spell.id_categories].field('category')
fields += self._spellduration_db[self._spellmisc_db[spell.id_misc].id_duration].field('duration_1')
fields += _rune_cost(self, None, self._spellrunecost_db[spell.id_rune_cost], '%#.4x'),
fields += self._spellrunecost_db[spell.id_rune_cost].field('rune_power_gain')
fields += self._spellauraoptions_db[spell.id_aura_opt].field(
'stack_amount', 'proc_chance', 'proc_charges', 'proc_flags'
)
if self.dbc_version(50400, 17093):
fields += self._spellauraoptions_db[spell.id_aura_opt].field('internal_cooldown')
else:
fields += [ '%7u' % 0 ]
if self._options.build >= 17227:
aura = self._spellauraoptions_db[spell.id_aura_opt]
fields += self._spellprocsperminute_db[aura.id_ppm].field('ppm')
else:
fields += [ '%5.3f' % 0 ]
fields += self._spellequippeditems_db[spell.id_equip_items].field('item_class', 'mask_inv_type', 'mask_sub_class')
if spell.id_scaling:
fields += self._spellscaling_db[spell.id_scaling].field('cast_min', 'cast_max', 'cast_div')
fields += self._spellscaling_db[spell.id_scaling].field('c_scaling', 'c_scaling_threshold')
else:
fields += self._spellcasttimes_db[self._spellmisc_db[spell.id_misc].id_cast_time].field('min_cast_time', 'cast_time')
# Use default values, i.e., zeros
fields += self._spellscaling_db[0].field('cast_div', 'c_scaling', 'c_scaling_threshold' )
if ids.has_key(id) and ids[id].has_key('replace_spell_id'):
fields += [ '%6u' % ids[id]['replace_spell_id'] ]
else:
fields += [ '%6u' % 0 ]
s_effect = []
effect_ids = []
for effect in spell._effects:
if effect and ids.get(id, { 'effect_list': [ False ] })['effect_list'][effect.index]:
effects.add( ( effect.id, spell.id_scaling ) )
effect_ids.append( '%u' % effect.id )
# Add spell flags
fields += [ '{ %s }' % ', '.join(self._spellmisc_db[spell.id_misc].field('flags', 'flags_1', 'flags_2', 'flags_3', 'flags_4', 'flags_5', 'flags_6', 'flags_7', 'flags_12694', 'flags_8', 'unk_2', 'flags_15668')) ]
fields += [ '{ %s }' % ', '.join(self._spellclassoptions_db[spell.id_class_opts].field('spell_family_flags_1', 'spell_family_flags_2', 'spell_family_flags_3', 'spell_family_flags_4')) ]
fields += self._spellclassoptions_db[spell.id_class_opts].field('spell_family_name')
fields += spell.field('desc', 'tt')
if spell.id_desc_var and self._spelldescriptionvariables_db.get(spell.id_desc_var):
fields += self._spelldescriptionvariables_db[spell.id_desc_var].field('var')
else:
fields += [ '0' ]
if spell.id_misc and self._spellicon_db.get(self._spellmisc_db[spell.id_misc].id_icon):
fields += self._spellicon_db[self._spellmisc_db[spell.id_misc].id_icon].field('name')
else:
fields += [ '0' ]
if spell.id_misc and self._spellicon_db.get(self._spellmisc_db[spell.id_misc].id_active_icon):
fields += self._spellicon_db[self._spellmisc_db[spell.id_misc].id_active_icon].field('name')
else:
fields += [ '0' ]
fields += spell.field('rank')
# Pad struct with empty pointers for direct access to spell effect data
fields += [ '0', '0' ]
s += ' { %s }, /* %s */\n' % (', '.join(fields), ', '.join(effect_ids))
index += 1
s += '};\n\n'
s += '#define __%sSPELLEFFECT%s_SIZE (%d)\n\n' % (
(self._options.prefix and ('%s_' % self._options.prefix) or '').upper(),
(self._options.suffix and ('_%s' % self._options.suffix) or '').upper(),
len(effects)
)
s += '// %d effects, wow build level %d\n' % ( len(effects), self._options.build )
s += 'static struct spelleffect_data_t __%sspelleffect%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or ''
)
index = 0
for effect_data in sorted(effects) + [ ( 0, 0 ) ]:
effect = self._spelleffect_db[effect_data[0]]
if not effect.id and effect_data[ 0 ] > 0:
sys.stderr.write('Spell Effect id %d not found\n') % effect_data[0]
continue
if index % 20 == 0:
s += '//{ Id,Flags, SpId,Idx, EffectType , EffectSubType , Average, Delta, Unknown, Coefficient, APCoefficient, Ampl, Radius, RadMax, BaseV, MiscV, MiscV2, { Flags1, Flags2, Flags3, Flags4 }, Trigg, DmgMul, CboP, RealP,Die, 0, 0 },\n'
fields = effect.field('id')
fields += [ '%#.2x' % 0 ]
fields += effect.field('id_spell', 'index')
tmp_fields = []
if constants.effect_type.get(effect.type):
tmp_fields += [ '%-*s' % ( constants.effect_type_maxlen, constants.effect_type.get(effect.type) ) ]
else:
#print "Type %d missing" % effect.type
tmp_fields += [ '%-*s' % ( constants.effect_type_maxlen, 'E_%d' % effect.type ) ]
if constants.effect_subtype.get(effect.sub_type):
tmp_fields += [ '%-*s' % ( constants.effect_subtype_maxlen, constants.effect_subtype.get(effect.sub_type) ) ]
else:
#stm.add(effect.sub_type)
tmp_fields += [ '%-*s' % ( constants.effect_subtype_maxlen, 'A_%d' % effect.sub_type ) ]
fields += tmp_fields
if effect.scaling == None:
fields += self._spelleffectscaling_db[0].field('average', 'delta', 'bonus')
else:
fields += effect.scaling.field('average', 'delta', 'bonus')
fields += effect.field('coefficient', 'ap_coefficient', 'amplitude')
fields += self._spellradius_db[effect.id_radius].field('radius_1')
fields += self._spellradius_db[effect.id_radius_max].field('radius_1')
fields += effect.field('base_value', 'misc_value', 'misc_value_2')
fields += [ '{ %s }' % ', '.join( effect.field('class_mask_1', 'class_mask_2', 'class_mask_3', 'class_mask_4' ) ) ]
fields += effect.field('trigger_spell', 'dmg_multiplier', 'points_per_combo_points', 'real_ppl', 'die_sides')
# Pad struct with empty pointers for direct spell data access
fields += [ '0', '0' ]
s += ' { %s },\n' % (', '.join(fields))
index += 1
s += '};\n\n'
index = 0
def sortf( a, b ):
if a.id > b.id:
return 1
elif a.id < b.id:
return -1
return 0
powers = list(powers)
powers.sort(sortf)
s += '#define __%s_SIZE (%d)\n\n' % ( self.format_str( "spellpower" ).upper(), len(powers) )
s += '// %d effects, wow build level %d\n' % ( len(powers), self._options.build )
s += 'static struct spellpower_data_t __%s_data[] = {\n' % ( self.format_str( "spellpower" ) )
for power in powers + [ self._spellpower_db[0] ]:
fields = power.field('id', 'id_spell', 'aura_id', 'type_power', 'cost', 'cost_2', 'cost_per_second', 'cost_per_second2' )
s += ' { %s },\n' % (', '.join(fields))
s += '};\n\n'
return s
class MasteryAbilityGenerator(DataGenerator):
def __init__(self, options):
DataGenerator.__init__(self, options)
self._dbc = [ 'Spell', 'ChrSpecialization' ]
if options.build >= 15589:
self._dbc.append( 'SpellMisc' )
def filter(self):
ids = {}
for k, v in self._chrspecialization_db.iteritems():
if v.class_id == 0:
continue
s = self._spell_db[v.id_mastery]
if s.id == 0:
continue
if self._spellmisc_db[s.id_misc].flags_12694 & 0x20000000:
ids[v.id_mastery] = { 'mask_class' : v.class_id, 'category' : v.spec_id, 'spec_name' : v.name }
return ids
def generate(self, ids = None):
max_ids = 0
mastery_class = 0
keys = [
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
]
for k, v in ids.iteritems():
keys[v['mask_class']][v['category']].append( ( self._spell_db[k].name, k, v['spec_name'] ) )
# Find out the maximum size of a key array
for cls in keys:
for spec in cls:
if len(spec) > max_ids:
max_ids = len(spec)
data_str = "%sclass_mastery_ability%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
s = '#define %s_SIZE (%d)\n\n' % (
data_str.upper(),
max_ids
)
s += '// Class mastery abilities, wow build %d\n' % self._options.build
s += 'static unsigned __%s_data[MAX_CLASS][MAX_SPECS_PER_CLASS][%s_SIZE] = {\n' % (
data_str,
data_str.upper(),
)
for cls in xrange(0, len(keys)):
if SpellDataGenerator._class_names[cls]:
s += ' // Class mastery abilities for %s\n' % ( SpellDataGenerator._class_names[cls] )
s += ' {\n'
for spec in xrange(0, len(keys[cls])):
if len(keys[cls][spec]) > 0:
s += ' // Masteries for %s specialization\n' % keys[cls][spec][0][2]
s += ' {\n'
for ability in sorted(keys[cls][spec], key = lambda i: i[0]):
s += ' %6u, // %s\n' % ( ability[1], ability[0] )
if len(keys[cls][spec]) < max_ids:
s += ' %6u,\n' % 0
s += ' },\n'
s += ' },\n'
s += '};\n'
return s
class RacialSpellGenerator(SpellDataGenerator):
def __init__(self, options):
SpellDataGenerator.__init__(self, options)
SpellDataGenerator._class_categories = []
def filter(self):
ids = { }
for ability_id, ability_data in self._skilllineability_db.iteritems():
racial_spell = 0
# Take only racial spells to this
for j in xrange(0, len(SpellDataGenerator._race_categories)):
if ability_data.id_skill in SpellDataGenerator._race_categories[j]:
racial_spell = j
break
if not racial_spell:
continue
spell = self._spell_db[ability_data.id_spell]
if not self.spell_state(spell):
continue
if ids.get(ability_data.id_spell):
ids[ability_data.id_spell]['mask_class'] |= ability_data.mask_class
ids[ability_data.id_spell]['mask_race'] |= (ability_data.mask_race or (1 << (racial_spell - 1)))
else:
ids[ability_data.id_spell] = { 'mask_class': ability_data.mask_class, 'mask_race' : ability_data.mask_race or (1 << (racial_spell - 1)) }
return ids
def generate(self, ids = None):
keys = [ ]
max_ids = 0
for i in xrange(0, len(DataGenerator._race_names)):
keys.insert(i, [])
for j in xrange(0, len(DataGenerator._class_names)):
keys[i].insert(j, [])
for k, v in ids.iteritems():
# Add this for all races and classes that have a mask in v['mask_race']
for race_bit in xrange(0, len(DataGenerator._race_names)):
if not DataGenerator._race_names[race_bit]:
continue
if v['mask_race'] & (1 << (race_bit - 1)):
if v['mask_class']:
for class_bit in xrange(0, len(DataGenerator._class_names)):
if not DataGenerator._class_names[class_bit]:
continue
if v['mask_class'] & (1 << (class_bit - 1)):
spell = self._spell_db[k]
keys[race_bit][class_bit].append( ( spell.name, k ) )
# Generic racial spell, goes to "class 0"
else:
spell = self._spell_db[k]
keys[race_bit][0].append( ( spell.name, k ) )
# Figure out tree with most abilities
for race in xrange(0, len(keys)):
for cls in xrange(0, len(keys[race])):
if len(keys[race][cls]) > max_ids:
max_ids = len(keys[race][cls])
data_str = "%srace_ability%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
# Then, output the stuffs
s = '#define %s_SIZE (%d)\n\n' % (
data_str.upper(),
max_ids
)
s += "#ifndef %s\n#define %s (%d)\n#endif\n\n" % (
self.format_str( 'MAX_RACE' ),
self.format_str( 'MAX_RACE' ),
len(DataGenerator._race_names) )
s += '// Racial abilities, wow build %d\n' % self._options.build
s += 'static unsigned __%s_data[%s][%s][%s_SIZE] = {\n' % (
self.format_str( 'race_ability' ),
self.format_str( 'MAX_RACE' ),
self.format_str( 'MAX_CLASS' ),
data_str.upper()
)
for race in xrange(0, len(keys)):
if DataGenerator._race_names[race]:
s += ' // Racial abilities for %s\n' % DataGenerator._race_names[race]
s += ' {\n'
for cls in xrange(0, len(keys[race])):
if len(keys[race][cls]) > 0:
if cls == 0:
s += ' // Generic racial abilities\n'
else:
s += ' // Racial abilities for %s class\n' % DataGenerator._class_names[cls]
s += ' {\n'
else:
s += ' { %5d, },\n' % 0
continue
for ability in sorted(keys[race][cls], key = lambda i: i[0]):
s += ' %5d, // %s\n' % ( ability[1], ability[0] )
if len(keys[race][cls]) < max_ids:
s += ' %5d,\n' % 0
s += ' },\n'
s += ' },\n'
s += '};\n'
return s
class SpecializationSpellGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'Spell', 'SpecializationSpells', 'ChrSpecialization' ]
DataGenerator.__init__(self, options)
def generate(self, ids = None):
max_ids = 0
keys = [
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ]
]
for ssid, data in self._specializationspells_db.iteritems():
chrspec = self._chrspecialization_db[data.spec_id]
if chrspec.id == 0:
continue
spell = self._spell_db[data.spell_id]
if spell.id == 0:
continue
keys[chrspec.class_id][chrspec.spec_id].append( ( self._spell_db[data.spell_id].name, data.spell_id, chrspec.name ) )
# Figure out tree with most abilities
for cls in xrange(0, len(keys)):
for tree in xrange(0, len(keys[cls])):
if len(keys[cls][tree]) > max_ids:
max_ids = len(keys[cls][tree])
data_str = "%stree_specialization%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
s = '#define %s_SIZE (%d)\n\n' % (
data_str.upper(),
max_ids
)
s += '// Talent tree specialization abilities, wow build %d\n' % self._options.build
s += 'static unsigned __%s_data[][MAX_SPECS_PER_CLASS][%s_SIZE] = {\n' % (
data_str,
data_str.upper(),
)
for cls in xrange(0, len(keys)):
s += ' // Specialization abilities for %s\n' % (cls > 0 and DataGenerator._class_names[cls] or 'Hunter pets')
s += ' {\n'
for tree in xrange(0, len(keys[cls])):
if len(keys[cls][tree]) > 0:
s += ' // Specialization abilities for %s\n' % keys[cls][tree][0][2]
s += ' {\n'
for ability in sorted(keys[cls][tree], key = lambda i: i[0]):
s += ' %6u, // %s\n' % ( ability[1], ability[0] )
if len(keys[cls][tree]) < max_ids:
s += ' %6u,\n' % 0
s += ' },\n'
s += ' },\n'
s += '};\n'
return s
class PerkSpellGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'ChrSpecialization', 'MinorTalent', 'Spell' ]
DataGenerator.__init__(self, options)
def generate(self, ids = None):
max_ids = 0
keys = [
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ],
[ [], [], [], [] ]
]
spec_map = { }
for ssid, data in self._chrspecialization_db.iteritems():
spec_map[ssid] = (data.class_id, data.spec_id, data.name)
for mtid, data in self._minortalent_db.iteritems():
spec = data.id_spec
pos_data = spec_map[spec]
spell = self._spell_db[data.id_spell]
keys[pos_data[0]][pos_data[1]].append((data.index, data.id_spell, pos_data[2], spell.name))
# Figure out tree with most abilities
for cls in xrange(0, len(keys)):
for tree in xrange(0, len(keys[cls])):
if len(keys[cls][tree]) > max_ids:
max_ids = len(keys[cls][tree])
data_str = "%sperk%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
s = '#define %s_SIZE (%d)\n\n' % (
data_str.upper(),
max_ids
)
s += '// Perk specialization abilities, wow build %d\n' % self._options.build
s += 'static unsigned __%s_data[][MAX_SPECS_PER_CLASS][%s_SIZE] = {\n' % (
data_str,
data_str.upper(),
)
for cls in xrange(0, len(keys)):
s += ' {\n'
for tree in xrange(0, len(keys[cls])):
if len(keys[cls][tree]) > 0:
s += ' // %s\n' % keys[cls][tree][0][2]
s += ' {\n'
for ability in sorted(keys[cls][tree], key = lambda i: i[0]):
s += ' %6u, // %d: %s\n' % ( ability[1], ability[0], ability[3] )
if len(keys[cls][tree]) < max_ids:
s += ' %6u,\n' % 0
s += ' },\n'
s += ' },\n'
s += '};\n'
return s
class SpellListGenerator(SpellDataGenerator):
def __init__(self, options):
SpellDataGenerator.__init__(self, options)
def spell_state(self, spell, enabled_effects = None):
if not SpellDataGenerator.spell_state(self, spell, None):
return False
# Skip passive spells
if self._spellmisc_db[spell.id_misc].flags & 0x40:
self.debug( "Spell id %u (%s) marked as passive" % ( spell.id, spell.name ) )
return False
if self._spellmisc_db[spell.id_misc].flags & 0x80:
self.debug( "Spell id %u (%s) marked as hidden" % ( spell.id, spell.name ) )
return False
# Skip by possible indicator for spellbook visibility
if self._spellmisc_db[spell.id_misc].flags_4 & 0x8000:
self.debug( "Spell id %u (%s) marked as hidden in spellbook" % ( spell.id, spell.name ) )
return False;
# Skip spells without any resource cost and category
found_power = False
for power in spell._powers:
if not power:
continue
if power.cost > 0 or power.cost_2 > 0 or power.cost_per_second > 0:
found_power = True
break
if not found_power and spell.id_rune_cost == 0 and spell.id_categories == 0:
self.debug( "Spell id %u (%s) has no power requirements" % ( spell.id, spell.name ) )
return False
# Make sure rune cost makes sense, even if the rune cost id is valid
if spell.id_rune_cost > 0:
src = self._spellrunecost_db[spell.id_rune_cost]
if src.rune_cost_1 == 0 and src.rune_cost_2 == 0 and src.rune_cost_3 == 0 and src.rune_cost_4 == 0:
self.debug( "Spell id %u (%s) has no sensible rune cost" % ( spell.id, spell.name ) )
return False
# Filter out any "Rank x" string, as there should no longer be such things. This should filter out
# some silly left over? things, or things not shown to player anyhow, so should be all good.
if spell.ofs_rank > 0 and 'Rank ' in spell.rank:
self.debug( "Spell id %u (%s) has a rank defined" % ( spell.id, spell.name ) )
return False
# Let's not accept spells that have over 100y range, as they cannot really be base abilities then
if self._spellmisc_db[spell.id_misc].id_range > 0:
range = self._spellrange_db[self._spellmisc_db[spell.id_misc].id_range]
if range.max_range > 100.0 or range.max_range_2 > 100.0:
self.debug( "Spell id %u (%s) has a high range (%f, %f)" % ( spell.id, spell.name, range.max_range, range.max_range_2 ) )
return False
# And finally, spells that are forcibly activated/disabled in whitelisting for
for cls in xrange(1, len(SpellDataGenerator._spell_id_list)):
for spell_tuple in SpellDataGenerator._spell_id_list[cls]:
if spell_tuple[0] == spell.id and len(spell_tuple) == 2 and spell_tuple[1] == 0:
return False
elif spell_tuple[0] == spell.id and len(spell_tuple) == 3:
return spell_tuple[2]
return True
def filter(self):
triggered_spell_ids = []
ids = { }
spell_tree = -1
spell_tree_name = ''
for ability_id, ability_data in self._skilllineability_db.iteritems():
if ability_data.id_skill in SpellDataGenerator._skill_category_blacklist:
continue
mask_class_skill = self.class_mask_by_skill(ability_data.id_skill)
mask_class_pet_skill = self.class_mask_by_pet_skill(ability_data.id_skill)
mask_class = 0
# Generic Class Ability
if mask_class_skill > 0:
spell_tree_name = "General"
spell_tree = 0
mask_class = mask_class_skill
elif mask_class_pet_skill > 0:
spell_tree_name = "Pet"
spell_tree = 5
mask_class = mask_class_pet_skill
# We only want abilities that belong to a class
if mask_class == 0:
continue
spell = self._spell_db[ability_data.id_spell]
if not spell.id:
continue
# Blacklist all triggered spells for this
for effect in spell._effects:
if not effect:
continue
if effect.trigger_spell > 0:
triggered_spell_ids.append(effect.trigger_spell)
# Check generic SpellDataGenerator spell state filtering before anything else
if not self.spell_state(spell):
continue
if ids.get(ability_data.id_spell):
ids[ability_data.id_spell]['mask_class'] |= ability_data.mask_class or mask_class
else:
ids[ability_data.id_spell] = {
'mask_class': ability_data.mask_class or mask_class,
'tree' : [ spell_tree ]
}
# Specialization spells
for ss_id, ss_data in self._specializationspells_db.iteritems():
chrspec = self._chrspecialization_db[ss_data.spec_id]
if chrspec.class_id == 0:
continue
spell = self._spell_db[ss_data.spell_id]
if not spell.id:
continue
# Check generic SpellDataGenerator spell state filtering before anything else
if not self.spell_state(spell):
continue
if ids.get(ss_data.spell_id):
ids[ss_data.spell_id]['mask_class'] |= DataGenerator._class_masks[chrspec.class_id]
if chrspec.spec_id + 1 not in ids[ss_data.spell_id]['tree']:
ids[ss_data.spell_id]['tree'].append( chrspec.spec_id + 1 )
else:
ids[ss_data.spell_id] = {
'mask_class': DataGenerator._class_masks[chrspec.class_id],
'tree' : [ chrspec.spec_id + 1 ]
}
for cls in xrange(1, len(SpellDataGenerator._spell_id_list)):
for spell_tuple in SpellDataGenerator._spell_id_list[cls]:
# Skip spells with zero tree, as they dont exist
#if spell_tuple[1] == 0:
# continue
spell = self._spell_db[spell_tuple[0]]
if not spell.id:
continue
if len(spell_tuple) == 2 and (spell_tuple[1] == 0 or not self.spell_state(spell)):
continue
elif len(spell_tuple) == 3 and spell_tuple[2] == False:
continue
if ids.get(spell_tuple[0]):
ids[spell_tuple[0]]['mask_class'] |= self._class_masks[cls]
if spell_tuple[1] not in ids[spell_tuple[0]]['tree']:
ids[spell_tuple[0]]['tree'].append( spell_tuple[1] )
else:
ids[spell_tuple[0]] = {
'mask_class': self._class_masks[cls],
'tree' : [ spell_tuple[1] ],
}
# Finally, go through the spells and remove any triggered spell
# as the order in dbcs is arbitrary
for id in ids.keys():
if id in triggered_spell_ids:
self.debug("Spell id %u (%s) is a triggered spell" % (id, self._spell_db[id].name))
del ids[id]
return ids
def generate(self, ids = None):
keys = [
# General | Spec0 | Spec1 | Spec2 | Spec3 | Pet
[ [], [], [], [], [], [] ],
[ [], [], [], [], [], [] ],
[ [], [], [], [], [], [] ],
[ [], [], [], [], [], [] ],
[ [], [], [], [], [], [] ],
[ [], [], [], [], [], [] ],
[ [], [], [], [], [], [] ],
[ [], [], [], [], [], [] ],
[ [], [], [], [], [], [] ],
[ [], [], [], [], [], [] ],
[ [], [], [], [], [], [] ],
[ [], [], [], [], [], [] ],
]
# Sort a suitable list for us
for k, v in ids.iteritems():
if v['mask_class'] not in DataGenerator._class_masks:
continue
spell = self._spell_db[k]
for tree in v['tree']:
keys[self._class_map[v['mask_class']]][tree].append(( spell.name, spell.id ))
# Find out the maximum size of a key array
max_ids = 0
for cls_list in keys:
for tree_list in cls_list:
if len(tree_list) > max_ids:
max_ids = len(tree_list)
data_str = "%sclass_ability%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
s = '#define %s_SIZE (%d)\n' % (
data_str.upper(),
max_ids
)
s += '#define %s_TREE_SIZE (%d)\n\n' % ( data_str.upper(), len( keys[0] ) )
s += "#ifndef %s\n#define %s (%d)\n#endif\n" % (
self.format_str( 'MAX_CLASS' ),
self.format_str( 'MAX_CLASS' ),
len(DataGenerator._class_names) )
s += '// Class based active abilities, wow build %d\n' % self._options.build
s += 'static unsigned __%s_data[][%s_TREE_SIZE][%s_SIZE] = {\n' % (
data_str,
data_str.upper(),
data_str.upper(),
)
for i in xrange(0, len(keys)):
if SpellDataGenerator._class_names[i]:
s += ' // Class active abilities for %s\n' % ( SpellDataGenerator._class_names[i] )
s += ' {\n'
for j in xrange(0, len(keys[i])):
# See if we can describe the tree
for t in keys[i][j]:
tree_name = ''
if j == 0:
tree_name = 'General'
elif j == 5:
tree_name = 'Pet'
else:
for chrspec_id, chrspec_data in self._chrspecialization_db.iteritems():
if chrspec_data.class_id == i and chrspec_data.spec_id == j - 1:
tree_name = chrspec_data.name
break
s += ' // %s tree, %d abilities\n' % ( tree_name, len(keys[i][j]) )
break
s += ' {\n'
for spell_id in sorted(keys[i][j], key = lambda k_: k_[0]):
r = ''
if self._spell_db[spell_id[1]].rank:
r = ' (%s)' % self._spell_db[spell_id[1]].rank
s += ' %6u, // %s%s\n' % ( spell_id[1], spell_id[0], r )
# Append zero if a short struct
if max_ids - len(keys[i][j]) > 0:
s += ' %6u,\n' % 0
s += ' },\n'
s += ' },\n'
s += '};\n'
return s
class ClassFlagGenerator(SpellDataGenerator):
_masks = {
'mage': 3,
'warrior': 4,
'warlock': 5,
'priest': 6,
'druid': 7,
'rogue': 8,
'hunter': 9,
'paladin': 10,
'shaman': 11,
'deathknight': 15
}
def __init__(self, options):
SpellDataGenerator.__init__(self, options)
def filter(self, class_name):
ids = { }
mask = ClassFlagGenerator._masks.get(class_name.lower(), -1)
if mask == -1:
return ids
for id, data in self._spell_db.iteritems():
if data.id_class_opts == 0:
continue
opts = self._spellclassoptions_db[data.id_class_opts]
if opts.spell_family_name != mask:
continue
ids[id] = { }
return ids
def generate(self, ids):
s = ''
spell_data = []
effect_data = { }
for i in xrange(0, 128):
spell_data.append({ 'spells' : [ ], 'effects': [ ] })
for spell_id, data in ids.iteritems():
spell = self._spell_db[spell_id]
if not spell.id_class_opts:
continue
copts = self._spellclassoptions_db[spell.id_class_opts]
# Assign this spell to bitfield entries
for i in xrange(1, 5):
f = getattr(copts, 'spell_family_flags_%u' % i)
for bit in xrange(0, 32):
if not (f & (1 << bit)):
continue
bfield = ((i - 1) * 32) + bit
spell_data[bfield]['spells'].append( spell )
# Loop through spell effects, assigning them to effects
for effect in spell._effects:
if not effect:
continue
for i in xrange(1, 5):
f = getattr(effect, 'class_mask_%u' % i)
for bit in xrange(0, 32):
if not (f & (1 << bit)):
continue
bfield = ((i - 1) * 32) + bit
spell_data[bfield]['effects'].append( effect )
# Build effect data
for bit_data in spell_data:
for effect in bit_data['effects']:
if not effect_data.has_key(effect.id_spell):
effect_data[effect.id_spell] = {
'effects': { },
'spell': self._spell_db[effect.id_spell]
}
if not effect_data[effect.id_spell]['effects'].has_key(effect.index):
effect_data[effect.id_spell]['effects'][effect.index] = []
effect_data[effect.id_spell]['effects'][effect.index] += bit_data['spells']
field = 0
for bit_field in spell_data:
field += 1
if not len(bit_field['spells']):
continue
if not len(bit_field['effects']):
continue
s += ' [%-3d] ===================================================\n' % field
for spell in sorted(bit_field['spells'], key = lambda s: s.name):
s += ' %s (%u)\n' % ( spell.name, spell.id )
for effect in sorted(bit_field['effects'], key = lambda e: e.id_spell):
rstr = ''
if self._spell_db[effect.id_spell].rank:
rstr = ' (%s)' % self._spell_db[effect.id_spell].rank
s += ' [%u] {%u} %s%s\n' % ( effect.index, effect.id_spell, self._spell_db[effect.id_spell].name, rstr)
s += '\n'
for spell_id in sorted(effect_data.keys()):
spell = effect_data[spell_id]['spell']
s += 'Spell: %s (%u)' % (spell.name, spell.id)
if spell.rank:
s += ' %s' % spell.rank
s += '\n'
effects = effect_data[spell_id]['effects']
for effect_index in sorted(effects.keys()):
s += ' Effect#%u:\n' % effect_index
for spell in sorted(effects[effect_index], key = lambda s: s.id):
s += ' %s (%u)' % (spell.name, spell.id)
if spell.rank:
s += ' %s' % spell.rank
s += '\n'
s += '\n'
s += '\n'
return s
class GlyphPropertyGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'GlyphProperties.dbc', 'Spell.dbc' ]
DataGenerator.__init__(self, options)
def filter(self):
return None
def generate(self, ids = None):
data_str = "%sglyph_property_data%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
content_str = ''
properties = 0
for id, data in self._glyphproperties_db.iteritems():
if data.id_spell == 0:
continue
if self._spell_db[data.id_spell].id == 0:
continue
content_str += ' { %5u, %6u },\n' % (data.id, data.id_spell)
properties += 1
s = '// Glyph properties, wow build %d\n' % self._options.build
s += 'static glyph_property_data_t __%s[%d] = {\n' % (data_str, properties + 1)
s += content_str
s += ' { %5u, %6u }\n' % (0, 0)
s += '};\n'
return s
class GlyphListGenerator(SpellDataGenerator):
def __init__(self, options):
SpellDataGenerator.__init__(self, options)
def filter(self):
ids = { }
for ability_id, ability_data in self._skilllineability_db.iteritems():
if ability_data.id_skill != 810 or not ability_data.mask_class:
continue
use_glyph_spell = self._spell_db[ability_data.id_spell]
if not use_glyph_spell.id:
continue
# Find the on-use for glyph then, misc value will contain the correct GlyphProperties.dbc id
for effect in use_glyph_spell._effects:
if not effect or effect.type != 74: # Use glyph
continue
# Filter some erroneous glyph data out
glyph_data = self._glyphproperties_db[effect.misc_value]
if not glyph_data.id or not glyph_data.id_spell:
continue
if ids.get(glyph_data.id_spell):
ids[glyph_data.id_spell]['mask_class'] |= ability_data.mask_class
else:
ids[glyph_data.id_spell] = { 'mask_class': ability_data.mask_class, 'glyph_slot' : glyph_data.flags }
return ids
def generate(self, ids = None):
max_ids = 0
keys = [
[ [], [], [] ],
[ [], [], [] ],
[ [], [], [] ],
[ [], [], [] ],
[ [], [], [] ],
[ [], [], [] ],
[ [], [], [] ],
[ [], [], [] ],
[ [], [], [] ],
[ [], [], [] ],
[ [], [], [] ],
[ [], [], [] ]
]
glyph_slot_names = [ 'Major', 'Minor', 'Prime' ]
for k, v in ids.iteritems():
keys[self._class_map[v['mask_class']]][v['glyph_slot']].append( ( self._spell_db[k].name, k ) )
# Figure out tree with most abilities
for cls in xrange(0, len(keys)):
for glyph_slot in xrange(0, len(keys[cls])):
if len(keys[cls][glyph_slot]) > max_ids:
max_ids = len(keys[cls][glyph_slot])
data_str = "%sglyph_abilities%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
s = '#define %s_SIZE (%d)\n\n' % (
data_str.upper(),
max_ids
)
s += '// Glyph spells for classes, wow build %d\n' % self._options.build
s += 'static unsigned __%s_data[][3][%s_SIZE] = {\n' % (
data_str,
data_str.upper(),
)
for cls in xrange(0, len(keys)):
if DataGenerator._class_names[cls]:
s += ' // Glyph spells for %s\n' % DataGenerator._class_names[cls]
s += ' {\n'
for glyph_slot in xrange(0, len(keys[cls])):
if len(keys[cls][glyph_slot]) > 0:
s += ' // %s Glyphs (%d spells)\n' % (glyph_slot_names[glyph_slot], len(keys[cls][glyph_slot]))
s += ' {\n'
for glyph in sorted(keys[cls][glyph_slot], key = lambda i: i[0]):
s += ' %6u, // %s\n' % ( glyph[1], glyph[0] )
if len(keys[cls][glyph_slot]) < max_ids:
s += ' %6u,\n' % 0
s += ' },\n'
s += ' },\n'
s += '};\n'
return s
class SetBonusListGenerator(DataGenerator):
# These set bonuses map directly to set bonuses in ItemSet/ItemSetSpell
# (the bonuses array is the id in ItemSet, and
# ItemSetSpell::id_item_set).
#
# NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
# ====================================================================
# The ordering of this array _MUST_ match the ordering of
# "set_bonus_type_e" enumeration in simulationcraft.hpp or very bad
# things will happen.
# ====================================================================
# NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
set_bonus_map = [
# Warlords of Draenor PVP set bonuses
{
'name' : 'pvp',
'bonuses': [ 1230, 1225, 1222, 1227, 1226, 1220, 1228, 1223, 1229, 1224, 1221 ],
'tier' : 0,
},
# T17 LFR set bonuses
{
'name' : 'tier17lfr',
'bonuses': [ 1245, 1248, 1246, 1247 ],
'tier' : 17,
},
# Glaives (test, not yet implemented)
{
'name' : 'glaives',
'bonuses': [ 699 ],
'tier' : 0,
},
# Normal set bonuses, T13 -> T17
{
'name' : 'tier13',
'bonuses': [ 1073, 1074, 1063, 1065, 1064, 1061, 1068, 1066, 1067, 1056, 1057,
1070, 1071, 1069, 1062, 1072, 1059, 1058, 1060 ],
'tier' : 13
},
{
'name' : 'tier14',
'bonuses': [ 1144, 1145, 1134, 1136, 1135, 1129, 1139, 1137, 1138, 1124, 1123,
1141, 1142, 1140, 1130, 1143, 1133, 1132, 1131, 1126, 1127, 1128,
1125 ],
'tier' : 14
},
{
'name' : 'tier15',
'bonuses': [ 1172, 1173, 1163, 1164, 1162, 1157, 1167, 1165, 1166, 1151, 1152,
1170, 1169, 1168, 1158, 1171, 1161, 1159, 1160, 1155, 1153, 1156,
1154 ],
'tier' : 15
},
{
'name' : 'tier16',
'bonuses': [ 1180, 1179, 1189, 1188, 1190, 1195, 1185, 1187, 1186, 1201,
1200, 1182, 1183, 1184, 1194, 1181, 1191, 1193, 1192, 1197, 1199,
1196, 1198 ],
'tier' : 16
},
{
'name' : 'tier17',
'bonuses': [ 1242, 1238, 1236, 1240, 1239, 1234, 1241, 1235, 1243, 1237, 1233 ],
'tier' : 17
}
]
def __init__(self, options):
self._dbc = [ 'ItemSet', 'ItemSetSpell', 'Spell', 'ChrSpecialization', 'Item-sparse', 'ItemNameDescription' ]
self._regex = re.compile(r'^Item\s+-\s+(.+)\s+T([0-9]+)\s+([A-z\s]*)\s*([0-9]+)P\s+Bonus')
# Older set bonuses (T16 and before) need to be mapped to "roles" in
# simulationcraft (for backwards compatibility reasons, and because set
# bonuses were not "spec specific" before Warlords of Draenor). We can use
# the specialization roles for healer and tank, however simulationcraft
# makes a distinction between "caster" and "melee" roles, so the DPS
# specialization role in the client has to be split into two, by spec.
#
# Use Blizzard's "dps" spec type (2) as "melee", and add 3 as the "caster"
# version. All of this is only relevant to backwards support our
# "tierxx_ypc_zzzz" options, where zzzz is the role. Tier17 onwards, set
# bonuses are spec specific, and we can simply enable/disable bonuses.
self.role_map = {
# Balance Druid
102: 3,
# Mage specs
62 : 3, 63: 3, 64: 3,
# Shadow Priest
258: 3,
# Elemental Shaman
262: 3,
# Warlock specs
265: 3, 266: 3, 267: 3
}
# Make a set bonus to spec map, so we only need one "blizzard formatted"
# spell per set bonus, to determine all relevant information for that
# tier/class/spec combo
self.set_bonus_to_spec_map = {
}
DataGenerator.__init__(self, options)
@staticmethod
def is_extract_set_bonus(bonus):
for idx in xrange(0, len(SetBonusListGenerator.set_bonus_map)):
if bonus in SetBonusListGenerator.set_bonus_map[idx]['bonuses']:
return True, idx
return False, -1
def initialize_set_bonus_map(self):
for set_bonus_id, set_spell_data in self._itemsetspell_db.iteritems():
if set_spell_data.unk_wod_1 > 0:
continue
is_set_bonus, set_index = SetBonusListGenerator.is_extract_set_bonus(set_spell_data.id_item_set)
if not is_set_bonus:
continue
item_set = self._itemset_db[set_spell_data.id_item_set]
if not item_set.id:
continue
if set_spell_data.id_item_set in self.set_bonus_to_spec_map:
continue
spell_data = self._spell_db[set_spell_data.id_spell]
set_class = -1
set_role = -1;
set_spec_arr_derived = []
# Spell name matches generic set bonus pattern, derive some
# information from it
mobj = self._regex.match(spell_data.name)
if mobj:
# Name matches something in our generic class map, use it
if set_class == -1 and mobj.group(1) in self._class_map:
set_class = self._class_map[mobj.group(1)]
# Derive spec information and role from the spell name, if we
# cannot get the information from DBC. Note that role presumes
# set bonuses are made "per role", as they were in the olden
# days (T16 and before)
set_spec_arr_derived, set_role = self.derive_specs(set_class, mobj.group(3).strip())
if len(set_spec_arr_derived) == 0:
set_spec_arr_derived.append(0)
self.set_bonus_to_spec_map[item_set.id] = {
'index' : set_index,
'name' : self.set_bonus_map[set_index]['name'],
'tier' : self.set_bonus_map[set_index]['tier'],
'derived_class': set_class,
'derived_specs': set_spec_arr_derived,
'derived_role' : set_role
}
def initialize(self):
DataGenerator.initialize(self)
self.spec_type_map = {}
for spec_id, spec_data in self._chrspecialization_db.iteritems():
if spec_data.class_id not in self.spec_type_map:
self.spec_type_map[spec_data.class_id] = { }
if spec_data.spec_type not in self.spec_type_map[spec_data.class_id]:
self.spec_type_map[spec_data.class_id][spec_data.spec_type] = []
self.spec_type_map[spec_data.class_id][spec_data.spec_type].append(spec_id)
self.initialize_set_bonus_map()
return True
def derive_specs(self, class_, name):
specs = []
spec_added = False
roles = set()
for spec_id, spec_data in self._chrspecialization_db.iteritems():
if class_ == spec_data.class_id and spec_data.name in name:
spec_added = True
specs.append(spec_id)
roles.add(self.role_map.get(spec_id, spec_data.spec_type))
# If we could not figure out specs from spec specific words, use
# some generic things and blizzard typoes .. sigh .. to massage
# things a bit
if not spec_added:
if name == 'Tank':
specs = self.spec_type_map[class_][0]
roles.add(0)
elif name in ['DPS', 'Melee']:
specs = self.spec_type_map[class_][2]
roles.add(self.role_map.get(specs[0], 2))
elif name in ['Healer', 'Healing']:
specs = self.spec_type_map[class_][1]
roles.add(1)
# Pure DPS classes can have empty string, in which case just
# slap in all specs for the class
elif len(name) == 0:
specs = self.spec_type_map[class_][2]
roles.add(self.role_map.get(specs[0], 2))
# Sigh ...
elif name == 'Enhancment':
specs = [ 263, ]
roles.add(2)
else:
print >>sys.stderr, "Found set bonus spell '%s' that does not match Blizzard set bonus spell template" % name
return specs, list(roles)[0]
def filter(self):
data = []
for id, set_spell_data in self._itemsetspell_db.iteritems():
is_set_bonus, set_index = SetBonusListGenerator.is_extract_set_bonus(set_spell_data.id_item_set)
if not is_set_bonus:
continue
item_set = self._itemset_db[set_spell_data.id_item_set]
if not item_set.id:
continue
spell_data = self._spell_db[set_spell_data.id_spell]
if not spell_data.id:
continue
entry = {
'index' : set_index,
'set_bonus_id': id,
'bonus' : set_spell_data.n_req_items
}
if set_spell_data.id_item_set in self.set_bonus_to_spec_map:
bonus_data = self.set_bonus_to_spec_map[set_spell_data.id_item_set]
entry['class'] = bonus_data['derived_class']
entry['specs'] = bonus_data['derived_specs']
entry['role'] = bonus_data['derived_role']
entry['spec'] = -1
else:
if set_spell_data.unk_wod_1:
spec_data = self._chrspecialization_db[set_spell_data.unk_wod_1]
entry['class'] = spec_data.class_id
entry['role'] = self.role_map.get(set_spell_data.unk_wod_1, spec_data.spec_type)
entry['spec'] = set_spell_data.unk_wod_1
else:
entry['class'] = -1
entry['role'] = -1
entry['spec'] = -1
entry['specs'] = [ 0, ]
data.append(dict(entry))
return data
def generate(self, ids):
def sorter(a, b):
if a['index'] != b['index']:
return a['index'] - b['index']
else:
if a['class'] != b['class']:
return a['class'] - b['class']
else:
if a['role'] != b['role']:
return a['role'] - b['role']
else:
if a['bonus'] != b['bonus']:
return a['bonus'] - b['bonus']
else:
if a['set_bonus_id'] != b['set_bonus_id']:
return a['set_bonus_id'] - b['set_bonus_id']
else:
return 0
ids.sort(cmp = sorter)
data_str = "%sset_bonus_data%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
s = '#define %s_SIZE (%d)\n\n' % (
data_str.upper(),
len(ids)
)
s += '// Set bonus data, wow build %d\n' % self._options.build
s += 'static item_set_bonus_t __%s[%s_SIZE] = {\n' % (
data_str,
data_str.upper(),
)
for data_idx in xrange(0, len(ids)):
entry = ids[data_idx]
if data_idx % 25 == 0:
s += ' // %-44s, OptName, EnumID, SetID, Tier, Bns, Cls, %20s, Role, Spec, Spell, Items\n' % ('Set bonus name', 'Derived Spec')
item_set_spell = self._itemsetspell_db[entry['set_bonus_id']]
item_set = self._itemset_db[item_set_spell.id_item_set]
map_entry = self.set_bonus_map[entry['index']]
item_set_str = ""
items = []
for item_n in xrange(1, 17):
item_id = getattr(item_set, 'id_item_%d' % item_n)
if item_id > 0:
items.append('%6u' % item_id)
if len(items) < 17:
items.append(' 0')
s += ' { %-45s, %12s, %6d, %5d, %4u, %3u, %3u, %20s, %4u, %4u, %6u, %s },\n' % (
'"%s"' % item_set.name.replace('"', '\\"'),
'"%s"' % map_entry['name'].replace('"', '\\"'),
entry['index'],
item_set_spell.id_item_set,
map_entry['tier'],
entry['bonus'],
entry['class'],
'{ %s }' % (', '.join(['%3u' % x for x in entry['specs']])),
entry['role'],
entry['spec'],
item_set_spell.id_spell,
'{ %s }' % (', '.join(items))
)
s += '};\n'
return s
class ItemSetListGenerator(SpellDataGenerator):
_tier_base = 13
def __init__(self, options):
SpellDataGenerator.__init__(self, options)
def filter(self):
ids = { }
mask_class_category = 0
tier_id = 0
# Item sets, loop through ItemSet.dbc getting class-specific tier sets and add
# their bonuses to the spell list
for itemset_id, itemset_data in self._itemset_db.iteritems():
mask_class_category = 0
tier_id = 0
for cls in xrange(0, len(SpellDataGenerator._item_set_list)):
for tier in xrange(0, len(SpellDataGenerator._item_set_list[cls])):
if itemset_id in SpellDataGenerator._item_set_list[cls][tier]:
mask_class_category = DataGenerator._class_masks[cls]
tier_id = tier
break
if mask_class_category:
break
if not mask_class_category:
continue
# Item set is a tier set, we want informations.
for bonus in itemset_data.bonus:
spell_id = bonus.id_spell
if spell_id:
f = { }
self.process_spell(spell_id, f, mask_class_category, 0)
if spell_id not in f:
continue
ids[spell_id] = {
'mask_class': mask_class_category,
'set' : itemset_data.name,
'tier' : tier_id,
'n_bonus' : bonus.n_req_items
}
return ids
def generate(self, ids = None):
max_ids = 0
s = ''
keys = [ ]
for cls in xrange(0, len(SpellDataGenerator._item_set_list)):
keys.append([])
for tier in SpellDataGenerator._item_set_list[cls]:
keys[cls].append([])
for spell_id, sdata in ids.iteritems():
keys[self._class_map[sdata['mask_class']]][sdata['tier']].append( (
spell_id,
sdata['set'],
sdata['n_bonus'] )
)
# Figure out tree with most abilities
for cls in xrange(0, len(keys)):
for tier_id in xrange(0, len(keys[cls])):
if len(keys[cls][tier_id]) > max_ids:
max_ids = len(keys[cls][tier_id])
data_str = "%stier_bonuses%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
s = '#define %s_SIZE (%d)\n' % (
data_str.upper(),
max_ids
)
s += '#define %s_MAX_TIER (%d)\n\n' % (
data_str.upper(),
len(keys[cls]) + 1
)
s += '#define %s_TIER_BASE (%d)\n\n' % (
data_str.upper(),
self._tier_base
)
s += '// Tier item set bonuses for class, wow build %d\n' % self._options.build
s += 'static unsigned __%s_data[][%s_MAX_TIER][%s_SIZE] = {\n' % (
data_str,
data_str.upper(),
data_str.upper(),
)
for cls in xrange(0, len(keys)):
if DataGenerator._class_names[cls]:
s += ' // Tier bonuses for %s\n' % DataGenerator._class_names[cls]
s += ' {\n'
for tier_id in xrange(0, len(keys[cls])):
if len(keys[cls][tier_id]) > 0:
s += ' // Tier %d bonuses (%d spells)\n' % (self._tier_base + tier_id, len(keys[cls][tier_id]))
s += ' {\n'
for tier_bonus in sorted(keys[cls][tier_id], key = lambda i: i[0]):
s += ' %5d, // %s - %d Piece Bonus (%s)\n' % (
tier_bonus[0],
tier_bonus[1],
tier_bonus[2],
self._spell_db[tier_bonus[0]].name)
if len(keys[cls][tier_id]) < max_ids:
s += ' %5d,\n' % 0
s += ' },\n'
s += ' {\n'
s += ' %5d,\n' % 0
s += ' },\n'
s += ' },\n'
s += '};\n'
return s
class RandomSuffixGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'ItemRandomSuffix', 'SpellItemEnchantment' ]
DataGenerator.__init__(self, options)
def filter(self):
ids = set()
# Let's do some modest filtering here, take only "stat" enchants,
# and take out the test items as well
for id, data in self._itemrandomsuffix_db.iteritems():
# of the Test, of the Paladin Testing
if id == 46 or id == 48:
continue
has_non_stat_enchant = False
# For now, naively presume type_1 of SpellItemEnchantment will tell us
# if it's a relevant enchantment for us (ie. a stat one )
for i in xrange(1,4):
item_ench = self._spellitemenchantment_db.get( getattr(data, 'id_property_%d' % i) )
if not item_ench:
self.debug( "No item enchantment found for %s (%s)" % (data.name_sfx, data.name_int) )
continue
if item_ench.type_1 != 5:
has_non_stat_enchant = True
break
if has_non_stat_enchant:
continue
ids.add( id )
return list(ids)
def generate(self, ids = None):
# Sort keys
ids.sort()
s = '#define %sRAND_SUFFIX%s_SIZE (%d)\n\n' % (
(self._options.prefix and ('%s_' % self._options.prefix) or '').upper(),
(self._options.suffix and ('_%s' % self._options.suffix) or '').upper(),
len(ids)
)
s += '// Random "cataclysm" item suffixes, wow build %d\n' % self._options.build
s += 'static struct random_suffix_data_t __%srand_suffix%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '' )
for id in ids + [ 0 ]:
rs = self._itemrandomsuffix_db[id]
fields = rs.field('id', 'suffix')
fields += [ '{ %s }' % ', '.join(rs.field('id_property_1', 'id_property_2', 'id_property_3', 'id_property_4', 'id_property_5')) ]
fields += [ '{ %s }' % ', '.join(rs.field('property_pct_1', 'property_pct_2', 'property_pct_3', 'property_pct_4', 'property_pct_5')) ]
s += ' { %s },' % (', '.join(fields))
if rs.name_int:
s += ' // %s' % rs.name_int
s += '\n'
s += '};\n'
return s
class SpellItemEnchantmentGenerator(RandomSuffixGenerator):
def __init__(self, options):
RandomSuffixGenerator.__init__(self, options)
self._dbc += ['Spell', 'SpellEffect']
def initialize(self):
RandomSuffixGenerator.initialize(self)
for spell_effect_id, spell_effect_data in self._spelleffect_db.iteritems():
if not spell_effect_data.id_spell:
continue
spell = self._spell_db[spell_effect_data.id_spell]
if not spell.id:
continue
spell.add_effect(spell_effect_data)
# Map spell ids to spellitemenchantments, as there's no direct
# link between them, and 5.4+, we need/want to scale enchants properly
for id, data in self._spell_db.iteritems():
enchant = False
for effect in data._effects:
# Skip all effects that are not of type enchant item
if not effect or effect.type != 53:
continue
item_ench = self._spellitemenchantment_db[effect.misc_value]
if item_ench.id == 0:
continue
if hasattr(item_ench, '_spells'):
item_ench._spells.append(data)
else:
item_ench._spells = [ data ]
return True
def filter(self):
return self._spellitemenchantment_db.keys()
def generate(self, ids = None):
ids.sort()
s = '#define %sSPELL_ITEM_ENCH%s_SIZE (%d)\n\n' % (
(self._options.prefix and ('%s_' % self._options.prefix) or '').upper(),
(self._options.suffix and ('_%s' % self._options.suffix) or '').upper(),
len(ids)
)
s += '// Item enchantment data, wow build %d\n' % self._options.build
s += 'static struct item_enchantment_data_t __%sspell_item_ench%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '' )
for i in ids + [ 0 ]:
ench_data = self._spellitemenchantment_db[i]
fields = ench_data.field('id', 'slot', 'id_gem', 'id_scaling', 'min_scaling_level', 'max_scaling_level', 'req_skill', 'req_skill_value')
fields += [ '{ %s }' % ', '.join(ench_data.field('type_1', 'type_2', 'type_3')) ]
fields += [ '{ %s }' % ', '.join(ench_data.field('amount_1', 'amount_2', 'amount_3')) ]
fields += [ '{ %s }' % ', '.join(ench_data.field('id_property_1', 'id_property_2', 'id_property_3')) ]
fields += [ '{ %s }' % ', '.join(ench_data.field('coeff_1', 'coeff_2', 'coeff_3')) ]
if hasattr(ench_data, '_spells'):
fields += ench_data._spells[ 0 ].field('id')
else:
fields += self._spell_db[0].field('id')
fields += ench_data.field('desc')
s += ' { %s },\n' % (', '.join(fields))
s += '};\n'
return s
class RandomPropertyPointsGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'RandPropPoints' ]
DataGenerator.__init__(self, options)
def filter(self):
ids = [ ]
for ilevel, data in self._randproppoints_db.iteritems():
if ilevel >= 1 and ilevel <= self._options.scale_ilevel:
ids.append(ilevel)
return ids
def generate(self, ids = None):
# Sort keys
ids.sort()
s = '#define %sRAND_PROP_POINTS%s_SIZE (%d)\n\n' % (
(self._options.prefix and ('%s_' % self._options.prefix) or '').upper(),
(self._options.suffix and ('_%s' % self._options.suffix) or '').upper(),
len(ids)
)
s += '// Random property points for item levels 1-%d, wow build %d\n' % (
self._options.scale_ilevel, self._options.build )
s += 'static struct random_prop_data_t __%srand_prop_points%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '' )
for id in ids + [ 0 ]:
rpp = self._randproppoints_db[id]
fields = rpp.field('id')
fields += [ '{ %s }' % ', '.join(rpp.field('epic_points_1', 'epic_points_2', 'epic_points_3', 'epic_points_4', 'epic_points_5')) ]
fields += [ '{ %s }' % ', '.join(rpp.field('rare_points_1', 'rare_points_2', 'rare_points_3', 'rare_points_4', 'rare_points_5')) ]
fields += [ '{ %s }' % ', '.join(rpp.field('uncm_points_1', 'uncm_points_2', 'uncm_points_3', 'uncm_points_4', 'uncm_points_5')) ]
s += ' { %s },\n' % (', '.join(fields))
s += '};\n'
return s
class WeaponDamageDataGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'ItemDamageOneHand', 'ItemDamageOneHandCaster',
'ItemDamageRanged', 'ItemDamageThrown', 'ItemDamageWand',
'ItemDamageTwoHand', 'ItemDamageTwoHandCaster', ]
DataGenerator.__init__(self, options)
def filter(self):
return None
def generate(self, ids = None):
s = ''
for dbname in self._dbc:
db = getattr(self, '_%s_db' % dbname.lower() )
s += '// Item damage data from %s.dbc, ilevels 1-%d, wow build %d\n' % (
dbname, self._options.scale_ilevel, self._options.build )
s += 'static struct item_scale_data_t __%s%s%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
dbname.lower(),
self._options.suffix and ('_%s' % self._options.suffix) or '' )
for ilevel, data in db.iteritems():
if ilevel > self._options.scale_ilevel:
continue
fields = data.field('ilevel')
fields += [ '{ %s }' % ', '.join(data.field('v_1', 'v_2', 'v_3', 'v_4', 'v_5', 'v_6', 'v_7')) ]
s += ' { %s },\n' % (', '.join(fields))
s += ' { %s }\n' % ( ', '.join([ '0' ] + [ '{ 0, 0, 0, 0, 0, 0, 0 }' ]) )
s += '};\n\n'
return s
class ArmorValueDataGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'ItemArmorQuality', 'ItemArmorShield', 'ItemArmorTotal' ]
DataGenerator.__init__(self, options)
def filter(self):
return None
def generate(self, ids = None):
s = ''
for dbname in self._dbc:
db = getattr(self, '_%s_db' % dbname.lower() )
s += '// Item armor values data from %s.dbc, ilevels 1-%d, wow build %d\n' % (
dbname, self._options.scale_ilevel, self._options.build )
s += 'static struct %s __%s%s%s_data[] = {\n' % (
(dbname != 'ItemArmorTotal') and 'item_scale_data_t' or 'item_armor_type_data_t',
self._options.prefix and ('%s_' % self._options.prefix) or '',
dbname.lower(),
self._options.suffix and ('_%s' % self._options.suffix) or '' )
for ilevel, data in db.iteritems():
if ilevel > self._options.scale_ilevel:
continue
fields = data.field('ilevel')
if dbname != 'ItemArmorTotal':
fields += [ '{ %s }' % ', '.join(data.field('v_1', 'v_2', 'v_3', 'v_4', 'v_5', 'v_6', 'v_7')) ]
else:
fields += [ '{ %s }' % ', '.join(data.field('v_1', 'v_2', 'v_3', 'v_4')) ]
s += ' { %s },\n' % (', '.join(fields))
if dbname != 'ItemArmorTotal':
s += ' { %s }\n' % ( ', '.join([ '0' ] + [ '{ 0, 0, 0, 0, 0, 0, 0 }' ]) )
else:
s += ' { %s }\n' % ( ', '.join([ '0' ] + [ '{ 0, 0, 0, 0 }' ]) )
s += '};\n\n'
return s
class ArmorSlotDataGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'ArmorLocation' ]
DataGenerator.__init__(self, options)
def filter(self):
return None
def generate(self, ids = None):
s = '// Inventory type based armor multipliers, wow build %d\n' % ( self._options.build )
s += 'static struct item_armor_type_data_t __%sarmor_slot%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '' )
for inv_type in sorted(self._armorlocation_db.keys()):
data = self._armorlocation_db[inv_type]
fields = data.field('id')
fields += [ '{ %s }' % ', '.join(data.field('v_1', 'v_2', 'v_3', 'v_4')) ]
s += ' { %s },\n' % (', '.join(fields))
s += ' { %s }\n' % ( ', '.join([ '0' ] + [ '{ 0, 0, 0, 0 }' ]) )
s += '};\n\n'
return s
class GemPropertyDataGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'GemProperties' ]
DataGenerator.__init__(self, options)
def filter(self):
ids = []
for id, data in self._gemproperties_db.iteritems():
if not data.color or not data.id_enchant:
continue
ids.append(id)
return ids
def generate(self, ids = None):
ids.sort()
s = '// Gem properties, wow build %d\n' % ( self._options.build )
s += 'static struct gem_property_data_t __%sgem_property%s_data[] = {\n' % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '' )
for id in ids + [ 0 ]:
data = self._gemproperties_db[id]
if data.color == 0:
continue;
fields = data.field('id', 'id_enchant', 'color', 'min_ilevel')
s += ' { %s },\n' % (', '.join(fields))
s += '};\n\n'
return s
class ItemBonusDataGenerator(DataGenerator):
def __init__(self, options):
self._dbc = [ 'ItemBonus', 'ItemBonusTreeNode', 'ItemXBonusTree' ]
DataGenerator.__init__(self, options)
def generate(self, ids):
# Bonus trees
data_str = "%sitem_bonus_tree%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
s = '#define %s_SIZE (%d)\n\n' % (data_str.upper(), len(self._itembonustreenode_db.keys()) + 1)
s += '// Item bonus trees, wow build %d\n' % ( self._options.build )
s += 'static struct item_bonus_tree_entry_t __%s_data[%s_SIZE] = {\n' % (data_str, data_str.upper())
for key in sorted(self._itembonustreenode_db.keys()) + [0,]:
data = self._itembonustreenode_db[key]
fields = data.field('id', 'id_tree', 'index', 'id_child', 'id_node')
s += ' { %s },\n' % (', '.join(fields))
s += '};\n\n'
# Bonus definitions
data_str = "%sitem_bonus%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
s += '#define %s_SIZE (%d)\n\n' % (data_str.upper(), len(self._itembonus_db.keys()) + 1)
s += '// Item bonuses, wow build %d\n' % ( self._options.build )
s += 'static struct item_bonus_entry_t __%s_data[%s_SIZE] = {\n' % (data_str, data_str.upper())
for key in sorted(self._itembonus_db.keys()) + [0,]:
data = self._itembonus_db[key]
fields = data.field('id', 'id_node', 'type', 'val1', 'val2', 'index')
s += ' { %s },\n' % (', '.join(fields))
s += '};\n\n'
# Item bonuses (unsure as of yet if we need this, depends on how
# Blizzard exports the bonus id to third parties)
data_str = "%sitem_bonus_map%s" % (
self._options.prefix and ('%s_' % self._options.prefix) or '',
self._options.suffix and ('_%s' % self._options.suffix) or '',
)
s += '#define %s_SIZE (%d)\n\n' % (data_str.upper(), len(self._itemxbonustree_db.keys()) + 1)
s += '// Item bonus map, wow build %d\n' % ( self._options.build )
s += 'static struct item_bonus_node_entry_t __%s_data[%s_SIZE] = {\n' % (data_str, data_str.upper())
for key in sorted(self._itemxbonustree_db.keys()) + [0,]:
data = self._itemxbonustree_db[key]
fields = data.field('id', 'id_item', 'id_tree')
s += ' { %s },\n' % (', '.join(fields))
s += '};\n\n'
return s
|
utecht/simcraft_shaman
|
dbc_extract/dbc/generator.py
|
Python
|
gpl-3.0
| 171,229
|
[
"BLAST",
"CRYSTAL"
] |
0dfc23cf6cb9e09fd01c39cedd7a3158cd54cc6d4cb9a4beafebbae4f0b2eed9
|
#
#@BEGIN LICENSE
#
# PSI4: an ab initio quantum chemistry software package
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#@END LICENSE
#
"""
| Geometries from Baker and Chan J. Comput. Chem. 17 888 (1996),
| as reported in Bakken and Helgaker, J. Chem. Phys. 117, 9160 (2002).
| No reference energies defined.
- **cp** ``'off'``
- **rlxd** ``'off'``
"""
import re
import qcdb
# <<< BAKERJCC96 Database Module >>>
dbse = 'BAKERJCC96'
isOS = 'true'
# <<< Database Members >>>
HRXN = ['HCN_to_HNC', 'HCCH_to_CCH2', 'H2CO_to_H2_CO',
'parent_diels_alder', 's_tetrazine_to_2HCN_N2', 'CH3CH3_to_CH2CH2_H2',
'CH3CH2F_to_CH2CH2_HF', 'CH2CHOH_to_CH3CHO', 'silylene_insertion',
'HNCCS_to_HCN_CS', 'acrolein_rotation', 'HCONHOH_to_HCOHNHO',
'HNC_H2_to_H2CNH', 'H2CNH_to_HCNH2', 'HCNH2_to_HCN_H2']
HRXN_TEMP = ['parent_diels_alder']
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV['%s-%s' % (dbse, 'HCN_to_HNC' )] = ['%s-%s-reagent' % (dbse, 'HCN_to_HNC')]
RXNM['%s-%s' % (dbse, 'HCN_to_HNC' )] = dict(zip(ACTV['%s-%s' % (dbse, 'HCN_to_HNC')], [+1]))
ACTV['%s-%s' % (dbse, 'HCCH_to_CCH2' )] = ['%s-%s-reagent' % (dbse, 'HCCH_to_CCH2')]
RXNM['%s-%s' % (dbse, 'HCCH_to_CCH2' )] = dict(zip(ACTV['%s-%s' % (dbse, 'HCCH_to_CCH2')], [+1]))
ACTV['%s-%s' % (dbse, 'H2CO_to_H2_CO' )] = ['%s-%s-reagent' % (dbse, 'H2CO_to_H2_CO')]
RXNM['%s-%s' % (dbse, 'H2CO_to_H2_CO' )] = dict(zip(ACTV['%s-%s' % (dbse, 'H2CO_to_H2_CO')], [+1]))
ACTV['%s-%s' % (dbse, 'parent_diels_alder' )] = ['%s-%s-reagent' % (dbse, 'parent_diels_alder')]
RXNM['%s-%s' % (dbse, 'parent_diels_alder' )] = dict(zip(ACTV['%s-%s' % (dbse, 'parent_diels_alder')], [+1]))
ACTV['%s-%s' % (dbse, 's_tetrazine_to_2HCN_N2' )] = ['%s-%s-reagent' % (dbse, 's_tetrazine_to_2HCN_N2')]
RXNM['%s-%s' % (dbse, 's_tetrazine_to_2HCN_N2' )] = dict(zip(ACTV['%s-%s' % (dbse, 's_tetrazine_to_2HCN_N2')], [+1]))
ACTV['%s-%s' % (dbse, 'CH3CH3_to_CH2CH2_H2' )] = ['%s-%s-reagent' % (dbse, 'CH3CH3_to_CH2CH2_H2')]
RXNM['%s-%s' % (dbse, 'CH3CH3_to_CH2CH2_H2' )] = dict(zip(ACTV['%s-%s' % (dbse, 'CH3CH3_to_CH2CH2_H2')], [+1]))
ACTV['%s-%s' % (dbse, 'CH3CH2F_to_CH2CH2_HF' )] = ['%s-%s-reagent' % (dbse, 'CH3CH2F_to_CH2CH2_HF')]
RXNM['%s-%s' % (dbse, 'CH3CH2F_to_CH2CH2_HF' )] = dict(zip(ACTV['%s-%s' % (dbse, 'CH3CH2F_to_CH2CH2_HF')], [+1]))
ACTV['%s-%s' % (dbse, 'CH2CHOH_to_CH3CHO' )] = ['%s-%s-reagent' % (dbse, 'CH2CHOH_to_CH3CHO')]
RXNM['%s-%s' % (dbse, 'CH2CHOH_to_CH3CHO' )] = dict(zip(ACTV['%s-%s' % (dbse, 'CH2CHOH_to_CH3CHO')], [+1]))
ACTV['%s-%s' % (dbse, 'silylene_insertion' )] = ['%s-%s-reagent' % (dbse, 'silylene_insertion')]
RXNM['%s-%s' % (dbse, 'silylene_insertion' )] = dict(zip(ACTV['%s-%s' % (dbse, 'silylene_insertion')], [+1]))
ACTV['%s-%s' % (dbse, 'HNCCS_to_HCN_CS' )] = ['%s-%s-reagent' % (dbse, 'HNCCS_to_HCN_CS')]
RXNM['%s-%s' % (dbse, 'HNCCS_to_HCN_CS' )] = dict(zip(ACTV['%s-%s' % (dbse, 'HNCCS_to_HCN_CS')], [+1]))
ACTV['%s-%s' % (dbse, 'acrolein_rotation' )] = ['%s-%s-reagent' % (dbse, 'acrolein_rotation')]
RXNM['%s-%s' % (dbse, 'acrolein_rotation' )] = dict(zip(ACTV['%s-%s' % (dbse, 'acrolein_rotation')], [+1]))
ACTV['%s-%s' % (dbse, 'HCONHOH_to_HCOHNHO' )] = ['%s-%s-reagent' % (dbse, 'HCONHOH_to_HCOHNHO')]
RXNM['%s-%s' % (dbse, 'HCONHOH_to_HCOHNHO' )] = dict(zip(ACTV['%s-%s' % (dbse, 'HCONHOH_to_HCOHNHO')], [+1]))
ACTV['%s-%s' % (dbse, 'HNC_H2_to_H2CNH' )] = ['%s-%s-reagent' % (dbse, 'HNC_H2_to_H2CNH')]
RXNM['%s-%s' % (dbse, 'HNC_H2_to_H2CNH' )] = dict(zip(ACTV['%s-%s' % (dbse, 'HNC_H2_to_H2CNH')], [+1]))
ACTV['%s-%s' % (dbse, 'H2CNH_to_HCNH2' )] = ['%s-%s-reagent' % (dbse, 'H2CNH_to_HCNH2')]
RXNM['%s-%s' % (dbse, 'H2CNH_to_HCNH2' )] = dict(zip(ACTV['%s-%s' % (dbse, 'H2CNH_to_HCNH2')], [+1]))
ACTV['%s-%s' % (dbse, 'HCNH2_to_HCN_H2' )] = ['%s-%s-reagent' % (dbse, 'HCNH2_to_HCN_H2')]
RXNM['%s-%s' % (dbse, 'HCNH2_to_HCN_H2' )] = dict(zip(ACTV['%s-%s' % (dbse, 'HCNH2_to_HCN_H2')], [+1]))
# <<< Reference Values [kcal/mol] >>>
BIND = {}
BIND['%s-%s' % (dbse, 'HCN_to_HNC' )] = 0.000
BIND['%s-%s' % (dbse, 'HCCH_to_CCH2' )] = 0.000
BIND['%s-%s' % (dbse, 'H2CO_to_H2_CO' )] = 0.000
BIND['%s-%s' % (dbse, 'parent_diels_alder' )] = 0.000
BIND['%s-%s' % (dbse, 's_tetrazine_to_2HCN_N2' )] = 0.000
BIND['%s-%s' % (dbse, 'CH3CH3_to_CH2CH2_H2' )] = 0.000
BIND['%s-%s' % (dbse, 'CH3CH2F_to_CH2CH2_HF' )] = 0.000
BIND['%s-%s' % (dbse, 'CH2CHOH_to_CH3CHO' )] = 0.000
BIND['%s-%s' % (dbse, 'silylene_insertion' )] = 0.000
BIND['%s-%s' % (dbse, 'HNCCS_to_HCN_CS' )] = 0.000
BIND['%s-%s' % (dbse, 'acrolein_rotation' )] = 0.000
BIND['%s-%s' % (dbse, 'HCONHOH_to_HCOHNHO' )] = 0.000
BIND['%s-%s' % (dbse, 'HNC_H2_to_H2CNH' )] = 0.000
BIND['%s-%s' % (dbse, 'H2CNH_to_HCNH2' )] = 0.000
BIND['%s-%s' % (dbse, 'HCNH2_to_HCN_H2' )] = 0.000
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 'HCN_to_HNC' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'HCN_to_HNC' )] = ''
TAGL['%s-%s' % (dbse, 'HCCH_to_CCH2' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'HCCH_to_CCH2' )] = ''
TAGL['%s-%s' % (dbse, 'H2CO_to_H2_CO' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'H2CO_to_H2_CO' )] = ''
TAGL['%s-%s' % (dbse, 'parent_diels_alder' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'parent_diels_alder' )] = ''
TAGL['%s-%s' % (dbse, 's_tetrazine_to_2HCN_N2' )] = ''
TAGL['%s-%s-reagent' % (dbse, 's_tetrazine_to_2HCN_N2' )] = ''
TAGL['%s-%s' % (dbse, 'CH3CH3_to_CH2CH2_H2' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'CH3CH3_to_CH2CH2_H2' )] = ''
TAGL['%s-%s' % (dbse, 'CH3CH2F_to_CH2CH2_HF' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'CH3CH2F_to_CH2CH2_HF' )] = ''
TAGL['%s-%s' % (dbse, 'CH2CHOH_to_CH3CHO' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'CH2CHOH_to_CH3CHO' )] = ''
TAGL['%s-%s' % (dbse, 'silylene_insertion' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'silylene_insertion' )] = ''
TAGL['%s-%s' % (dbse, 'HNCCS_to_HCN_CS' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'HNCCS_to_HCN_CS' )] = ''
TAGL['%s-%s' % (dbse, 'acrolein_rotation' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'acrolein_rotation' )] = ''
TAGL['%s-%s' % (dbse, 'HCONHOH_to_HCOHNHO' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'HCONHOH_to_HCOHNHO' )] = ''
TAGL['%s-%s' % (dbse, 'HNC_H2_to_H2CNH' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'HNC_H2_to_H2CNH' )] = ''
TAGL['%s-%s' % (dbse, 'H2CNH_to_HCNH2' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'H2CNH_to_HCNH2' )] = ''
TAGL['%s-%s' % (dbse, 'HCNH2_to_HCN_H2' )] = ''
TAGL['%s-%s-reagent' % (dbse, 'HCNH2_to_HCN_H2' )] = ''
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-reagent' % (dbse, 'HCN_to_HNC')] = qcdb.Molecule("""
0 1
C -0.0399606537 1.7574844925 0.0000000000
N 1.2331003808 0.0000000000 0.0000000000
H -1.1931397271 -1.7574844925 0.0000000000
units bohr
""")
#set { guess gwh """)
GEOS['%s-%s-reagent' % (dbse, 'HCCH_to_CCH2')] = qcdb.Molecule("""
0 1
C -0.4287449922 -0.0396754553 0.0000000000
C 1.9151999208 0.0000000000 0.0000000000
H -2.1914826494 0.9243341981 0.0000000000
H 1.1657551200 -2.7344386158 0.0000000000
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'H2CO_to_H2_CO')] = qcdb.Molecule("""
0 1
C 0.4656871259 0.8485069310 0.0000000000
O 2.6701879709 0.0000000000 0.0000000000
H -0.8014735829 -1.2561076240 0.0000000000
H -2.3344015139 0.4076006930 0.0000000000
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'parent_diels_alder')] = qcdb.Molecule("""
0 1
C 2.8332856188 0.0000000000 1.3228081920
C 2.8332856188 0.0000000000 -1.3228081920
C -2.3253041766 1.1773255830 1.4550890112
C -2.3253041766 1.1773255830 -1.4550890112
C -0.9003485915 -0.5867135859 2.6456163840
C -0.9003485915 -0.5867135859 -2.6456163840
H -3.5271811018 2.4200543219 2.5397117805
H -3.5271811018 2.4200543219 -2.5397117805
H 3.2953993811 -1.7330277495 2.2966434467
H 3.2953993811 -1.7330277495 -2.2966434467
H 2.7098759932 1.7961080923 2.2800674415
H 2.7098759932 1.7961080923 -2.2800674415
H 0.3094408240 -1.7857534614 1.5179110011
H 0.3094408240 -1.7857534614 -1.5179110011
H -1.0137937565 -0.6962109120 4.6804210291
H -1.0137937565 -0.6962109120 -4.6804210291
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 's_tetrazine_to_2HCN_N2')] = qcdb.Molecule("""
0 1
N 1.4172944914 2.0866021582 0.0000000000
N -1.4172944914 2.0866021582 0.0000000000
N 1.1338355931 -2.3320847650 0.0000000000
N -1.1338355931 -2.3320847650 0.0000000000
C 2.5511300846 0.1227413034 0.0000000000
C -2.5511300846 0.1227413034 0.0000000000
H 4.5920341522 0.1227413034 0.0000000000
H -4.5920341522 0.1227413034 0.0000000000
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3CH3_to_CH2CH2_H2')] = qcdb.Molecule("""
0 1
C -0.8859807599 -0.5993092127 0.0000000000
C 1.7490330691 0.0000000000 0.0000000000
H -2.8124044495 1.4800624400 0.0000000000
H 2.6980243606 -0.2473177820 1.8113616294
H 2.6980243606 -0.2473177820 -1.8113616294
H -1.6346529801 -1.2327446580 1.8113616294
H -1.6346529801 -1.2327446580 -1.8113616294
H -0.1773906205 2.0793716527 0.0000000000
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3CH2F_to_CH2CH2_HF')] = qcdb.Molecule("""
0 1
C -1.2114041996 0.8408020855 0.0000000000
C 0.2710949367 -1.4185486447 0.0000000000
F 3.5694675519 0.0000000000 0.0000000000
H 0.8525043764 2.7837833236 0.0000000000
H 0.3440183107 -2.4023246321 1.7866606845
H 0.3440183107 -2.4023246321 -1.7866606845
H -2.0848496434 1.2993062499 1.7866606845
H -2.0848496434 1.2993062499 -1.7866606845
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'CH2CHOH_to_CH3CHO')] = qcdb.Molecule("""
0 1
C -0.8638822546 0.0813052799 -1.2005986026
C 0.8568384546 0.0000000000 0.8814626606
O 0.0000000000 0.0000000000 3.1838367722
H -0.6251819034 1.8414643222 -2.2435002266
H -1.6228517906 -0.1159760771 1.2188517026
H 2.8765713088 -0.2172341086 0.5404787182
H -0.6214938148 -1.5895594164 -2.3805310244
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'silylene_insertion')] = qcdb.Molecule("""
0 1
C -1.0718066965 -0.1928298581 -2.5299430030
C 1.0250683554 0.0000000000 -0.5763517845
Si 0.0000000000 0.0000000000 3.6474101733
H -2.1062850458 -1.9954618846 -2.4930629368
H 2.3105025336 0.4726358860 5.1508238324
H -0.1255963623 -1.7429478038 1.4155380424
H -1.8281836996 2.0590612062 4.1373801504
H 2.1120964044 1.7375623482 -0.9231008641
H 2.3199575761 -1.5749557960 -0.8688541874
H -0.2137259723 -0.1257146643 -4.4428207722
H -2.4220270929 1.3626505664 -2.5170186506
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'HNCCS_to_HCN_CS')] = qcdb.Molecule("""
0 1
H -3.8046808257 0.6512707943 0.0000000000
N -2.5162100514 -0.7568079564 0.0000000000
C -0.3115625330 -1.2877146607 0.0000000000
C 2.4597092296 0.8743461413 0.0000000000
S 5.2948168550 0.0000000000 0.0000000000
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'acrolein_rotation')] = qcdb.Molecule("""
0 1
C -1.2039490098 0.6299819763 -1.8443710751
C 1.2056120532 0.0000000000 1.9839551839
C 0.0666798969 -0.9449729645 -0.3221285864
O 0.0000000000 0.0000000000 3.9490688445
H -2.0522570986 -0.0738599558 -3.5620058833
H 3.1211832712 0.7038419322 1.9619811778
H 0.2424607776 -2.9181815653 -0.8128813887
H -1.3797298905 2.6031905771 -1.3536182728
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'HCONHOH_to_HCOHNHO')] = qcdb.Molecule("""
0 1
O -3.5099961475 0.0808581274 0.0000000000
O 3.0085718011 0.0000000000 0.0000000000
C -1.2730134573 1.0962433404 0.0000000000
N 0.4462296750 -0.6585505903 0.0000000000
H -0.7144888327 3.0985015808 0.0000000000
H -2.0229706827 -2.0506512675 0.0000000000
H 4.0656676440 -1.5664011909 0.0000000000
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'HNC_H2_to_H2CNH')] = qcdb.Molecule("""
0 1
H 1.3134432637 -0.2040854069 3.0062409561
H -2.0477645147 0.2508396893 -1.7949671795
H 0.1303083495 -0.0467542824 -2.3515361651
N 0.0000000000 0.0000000000 1.6630059611
C 0.6040129016 0.0000000000 -0.5227435726
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'H2CNH_to_HCNH2')] = qcdb.Molecule("""
0 1
H -1.5502016034 1.0481518658 1.5179225979
H 0.3563149954 1.0481518658 -2.1765524295
H 0.1539684632 -2.0963037316 0.1645151437
N 0.0000000000 0.0000000000 1.2546414423
C 1.0399181448 0.0000000000 -0.7605267545
units bohr
""")
GEOS['%s-%s-reagent' % (dbse, 'HCNH2_to_HCN_H2')] = qcdb.Molecule("""
0 1
C 1.4474353774 0.0000000000 -1.1288243208
N 0.0000000000 0.0000000000 0.9719363835
H 3.2098577258 0.2829784675 -0.5084575223
H -2.3053050298 -1.2021721464 0.4824091612
H -2.3519880735 0.9191936788 0.1829362984
units bohr
""")
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-HCN_to_HNC-reagent' ] = 23.31219465
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-HCCH_to_CCH2-reagent' ] = 24.00207708
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-H2CO_to_H2_CO-reagent' ] = 29.08166284
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-parent_diels_alder-reagent' ] = 229.84632631
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-s_tetrazine_to_2HCN_N2-reagent'] = 211.09082404
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-CH3CH3_to_CH2CH2_H2-reagent' ] = 42.18531523
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-CH3CH2F_to_CH2CH2_HF-reagent' ] = 72.38149908
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-CH2CHOH_to_CH3CHO-reagent' ] = 70.96576388
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-silylene_insertion-reagent' ] = 105.58418932
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-HNCCS_to_HCN_CS-reagent' ] = 107.98244744
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-acrolein_rotation-reagent' ] = 104.52459576
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-HCONHOH_to_HCOHNHO-reagent' ] = 118.18056521
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-HNC_H2_to_H2CNH-reagent' ] = 33.37163870
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-H2CNH_to_HCNH2-reagent' ] = 35.16169022
DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC96-HCNH2_to_HCN_H2-reagent' ] = 30.58508624
|
spring01/libPSI
|
lib/databases/BAKERJCC96.py
|
Python
|
gpl-2.0
| 16,029
|
[
"Psi4"
] |
e2b0549613eff50c4878c1d2a317da7a84e451ad9a150188a4e36aa030be9f0c
|
# from django.db.models import get_model
from edc_appointment.models import Appointment
from edc_meta_data.models import CrfMetaDataMixin
# from edc_base.audit_trail import AuditTrail
from edc_base.model.models import BaseUuidModel
from edc_constants.constants import (
UNSCHEDULED, SCHEDULED, COMPLETED_PROTOCOL_VISIT, DEAD, POS, MALE, MISSED_VISIT)
from edc_export.models import ExportTrackingFieldsMixin
from edc_offstudy.models import OffStudyMixin
from edc_registration.models import RegisteredSubject
# from edc_sync.models import SyncModelMixin
from edc_visit_tracking.constants import VISIT_REASON_NO_FOLLOW_UP_CHOICES, LOST_VISIT
from edc_visit_tracking.models import PreviousVisitMixin
from edc_visit_tracking.models import VisitModelMixin
from tshilo_dikotla.choices import VISIT_REASON
from edc_visit_tracking.models.caretaker_fields_mixin import CaretakerFieldsMixin
from .infant_birth import InfantBirth
class InfantVisit(
CrfMetaDataMixin, PreviousVisitMixin, OffStudyMixin, VisitModelMixin,
CaretakerFieldsMixin, ExportTrackingFieldsMixin, BaseUuidModel):
""" A model completed by the user on the infant visits. """
off_study_model = ('td_infant', 'InfantOffStudy')
death_report_model = ('td_infant', 'InfantDeathReport')
consent_model = InfantBirth # a bit weird, see visit_form_mixin clean()
# history = AuditTrail()
def custom_post_update_crf_meta_data(self):
"""Calls custom methods that manipulate meta data on the post save.
This method is called in a post-save signal in edc_meta_data."""
if self.survival_status == DEAD:
self.require_death_report()
elif self.reason == COMPLETED_PROTOCOL_VISIT:
self.require_off_study_report()
elif self.reason == UNSCHEDULED:
self.change_to_unscheduled_visit(self.appointment)
elif self.reason == SCHEDULED:
pass
# if self.postnatal_enrollment.enrollment_hiv_status:
# self.requires_infant_birth_arv_on_maternal_pos()
# self.requires_dna_pcr_on_maternal_pos()
# self.requires_circumcision_for_male_at_2030_or_2060()
return self
# def requires_infant_birth_arv_on_maternal_pos(self):
# PostnatalEnrollment = get_model('mb_maternal', 'PostnatalEnrollment')
# maternal_registered_subject = RegisteredSubject.objects.get(
# subject_identifier=self.appointment.registered_subject.relative_identifier)
# postnatal_enrollment = PostnatalEnrollment.objects.get(
# registered_subject=maternal_registered_subject)
# if postnatal_enrollment.enrollment_hiv_status == POS:
# if self.appointment.visit_definition.code == '2000':
# self.crf_is_required(self.appointment, 'td_infant', 'infantbirtharv')
# def requires_dna_pcr_on_maternal_pos(self):
# PostnatalEnrollment = get_model('td_maternal', 'PostnatalEnrollment')
# maternal_registered_subject = RegisteredSubject.objects.get(
# subject_identifier=self.appointment.registered_subject.relative_identifier)
# postnatal_enrollment = PostnatalEnrollment.objects.get(
# registered_subject=maternal_registered_subject)
# if postnatal_enrollment.enrollment_hiv_status == POS:
# if self.appointment.visit_definition.code in [
# '2000', '2010', '2030', '2060', '2090', '2120']:
# self.requisition_is_required(
# self.appointment, 'td_lab', 'infantrequisition', 'DNA PCR')
def requires_circumcision_for_male_at_2030_or_2060(self):
infant_birth = InfantBirth.objects.get(
registered_subject=self.appointment.registered_subject)
if infant_birth.gender == MALE:
if self.appointment.visit_definition.code == '2030':
self.crf_is_required(
self.appointment, 'td_infant', 'infantcircumcision')
if self.appointment.visit_definition.code == '2060':
appointment = Appointment.objects.get(
visit_definition__code='2030',
registered_subject=self.appointment.registered_subject)
if appointment:
infant_visit = InfantVisit.objects.get(appointment=appointment)
if infant_visit.reason == MISSED_VISIT:
self.crf_is_required(
self.appointment, 'td_infant', 'infantcircumcision')
def natural_key(self):
return self.appointment.natural_key()
natural_key.dependencies = ['edc_appointment.appointment']
def get_visit_reason_choices(self):
return VISIT_REASON
def get_visit_reason_no_follow_up_choices(self):
"""Returns the visit reasons that do not imply any data collection;
that is, the subject is not available."""
dct = {}
for item in VISIT_REASON_NO_FOLLOW_UP_CHOICES:
if item not in [COMPLETED_PROTOCOL_VISIT, LOST_VISIT]:
dct.update({item: item})
return dct
class Meta:
app_label = 'td_infant'
verbose_name = "Infant Visit"
verbose_name_plural = "Infant Visit"
|
TshepangRas/tshilo-dikotla
|
td_infant/models/infant_visit.py
|
Python
|
gpl-2.0
| 5,263
|
[
"VisIt"
] |
685c5c68abf0439f8a68b1f65687bd2c7be8bb172eeab88ae25cb96d600f2d80
|
"""
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all.
"""
from __future__ import division, absolute_import, print_function
import sys
import pytest
if sys.version_info >= (3, 4):
from pathlib import Path
import ast
import tokenize
import numpy
class ParseCall(ast.NodeVisitor):
def __init__(self):
self.ls = []
def visit_Attribute(self, node):
ast.NodeVisitor.generic_visit(self, node)
self.ls.append(node.attr)
def visit_Name(self, node):
self.ls.append(node.id)
class FindFuncs(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
if node.args[0].s == "ignore":
raise AssertionError(
"ignore filter should not be used; found in "
"{} on line {}".format(self.__filename, node.lineno))
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if "testing/tests/test_warnings.py" is self.__filename:
# This file
return
# See if stacklevel exists:
if len(node.args) == 3:
return
args = {kw.arg for kw in node.keywords}
if "stacklevel" in args:
return
raise AssertionError(
"warnings should have an appropriate stacklevel; found in "
"{} on line {}".format(self.__filename, node.lineno))
@pytest.mark.slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(numpy.__file__).parent
for path in base.rglob("*.py"):
if base / "testing" in path.parents:
continue
if path == base / "__init__.py":
continue
if path == base / "random" / "__init__.py":
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)
|
kubaszostak/gdal-dragndrop
|
osgeo/apps/Python27/Lib/site-packages/numpy/tests/test_warnings.py
|
Python
|
mit
| 2,594
|
[
"VisIt"
] |
c9ee130469ceb8f019caee5b4b9243c5857984b82551041f292acc5b063fa612
|
from .layer import Layer
from ..activation import Activation
from ..potential import Potential
import numpy as np
class ParticleVectorNLocalConvolutionInput(object):
def __init__(self, output_size, nr=3, nv=3, sr=1.0, sv=1.0):
self.output_size = output_size
self.nv = nv
self.nr = nr
# Positions
self.positions = []
for i in range(nr):
self.positions.append(np.random.normal(0.0, sr, output_size))
# Vectors
self.nvectors = []
for i in range(nv):
self.nvectors.append(np.random.normal(0.0, sv, output_size))
# g = np.sqrt(1.0 / output_size)
# self.nvectors.append(np.random.uniform(-g, g, output_size))
# self.normalize()
def get_rxyz(self):
return self.positions, self.nvectors
def feed_forward(self, a_in):
return a_in, (self.get_rxyz())
def normalize(self):
d = 0.0
for v in range(self.nv):
d += self.nvectors[v]**2
d = np.sqrt(d)
for v in range(self.nv):
self.nvectors[v] /= d
class ParticleVectorNLocalConvolution(object):
def __init__(self, input_size=0, output_size=0, nr=3, nv=3, activation="sigmoid", potential="gaussian",
sr=1.0, sv=1.0, q=None, b=None, boff=0.0, uniform=False, p_dropout=-1.0, sigma_r=-1.0,
delta_r=0.0, apply_convolution=False):
self.input_size = input_size
self.output_size = output_size
self.nr = nr
self.nv = nv
self.activation_name = activation.lower()
self.activation = Activation.get(activation)
self.d_activation = Activation.get_d(activation)
self.potential = Potential.get(potential)
self.d_potential = Potential.get_d(potential)
self.p_dropout = p_dropout
self.dropout_mask = None
self.sigma_r = sigma_r
self.delta_r = delta_r
self.apply_convolution = apply_convolution
# Weight initialization
g = np.sqrt(2.0 / (input_size + output_size))
if b is None:
b = g
self.b = np.random.uniform(boff - b, boff + b, (1, output_size))
# self.b = np.random.normal(0.0, sv, (1, output_size))
# Positions
self.positions = []
for i in range(nr):
if uniform:
self.positions.append(np.random.uniform(-sr, sr, output_size))
else:
self.positions.append(np.random.normal(0.0, sr, output_size))
# Vectors
self.nvectors = []
for i in range(nv):
self.nvectors.append(np.random.normal(0.0, sv, output_size))
# self.nvectors.append(np.random.uniform(-g, g, output_size))
# self.normalize()
# Matrix
self.w = None
self.positions_cache = None
def get_rxyz(self):
return self.positions, self.nvectors
def feed_forward(self, a_in, r_in):
return self.compute_a(self.compute_z(a_in, r_in)), (self.get_rxyz())
def normalize(self):
d = 0.0
for v in range(self.nv):
d += self.nvectors[v]**2
d = np.sqrt(d)
for v in range(self.nv):
self.nvectors[v] /= d
def compute_z(self, a_in, r_in, apply_input_noise=False):
"""
Vectorized v2.0
:param a_in:
:param r_in:
:return:
"""
atrans = a_in.transpose()
z = None
r_positions = r_in[0]
r_nvectors = r_in[1]
if apply_input_noise and self.sigma_r > 0.0:
for r in range(self.nr):
r_positions[r] += np.random.normal(0.0, self.sigma_r, r_positions[r].shape)
if self.apply_convolution:
self.positions_cache = np.zeros((self.nr, self.output_size, len(a_in)))
# Let's just do 2 dimensions
z = np.zeros((self.output_size, len(a_in)))
n_steps = [-3, -2, -1, 0, 1, 2, 3]
# map for convenience
int_to_combo = []
for ix, nx in enumerate(n_steps):
for iy, ny in enumerate(n_steps):
int_to_combo.append((ix, iy, 1))
# for iz, nz in enumerate(n_steps):
# int_to_combo.append((ix, iy, iz))
# loop over nodes
for j in range(self.output_size):
# same for all translations
dot = 0.0
for v in range(self.nv):
dot += r_nvectors[v] * self.nvectors[v][j]
dot_atrans = dot.reshape((self.input_size, 1)) * atrans # cache outside the loops
tmp_zj_xyz = np.ones((len(int_to_combo), len(a_in))) * self.b[0][j]
ii = 0
for ix, nx in enumerate(n_steps):
pr0 = self.positions[0][j] + nx * self.delta_r
ddx = (r_positions[0] - pr0)**2
for iy, ny in enumerate(n_steps):
pr1 = self.positions[1][j] + ny * self.delta_r
ddy = (r_positions[1] - pr1)**2
pr2 = self.positions[2][j]
ddz = (r_positions[2] - pr2)**2
d = np.sqrt(ddx + ddy + ddz)
w_ji = self.potential(d)
tmp_zj_xyz[ii] += w_ji.dot(dot_atrans)
ii += 1
# for iz, nz in enumerate(n_steps):
# pr2 = self.positions[2][j] + nz * self.delta_r
# ddz = (r_positions[2] - pr2)**2
#
# d = np.sqrt(ddx + ddy + ddz)
# w_ji = self.potential(d)
#
# tmp_zj_xyz[ii] = self.b[0][j] + w_ji.dot(dot_atrans)
# ii += 1
# determine max for each input -- keep track of offset for gradient
for ja, amax in enumerate(tmp_zj_xyz.argmax(axis=0)):
ix, iy, iz = int_to_combo[amax]
self.positions_cache[0][j][ja] = self.positions[0][j] + n_steps[ix] * self.delta_r
self.positions_cache[1][j][ja] = self.positions[1][j] + n_steps[iy] * self.delta_r
self.positions_cache[2][j][ja] = self.positions[2][j] + n_steps[iz] * self.delta_r
z[j][ja] = tmp_zj_xyz[amax][ja]
else:
z = np.zeros((self.output_size, len(a_in)))
for j in range(self.output_size):
dd = 0.0
for r in range(self.nr):
dd += (r_positions[r] - self.positions[r][j]) ** 2
d = np.sqrt(dd)
dot = 0.0
for v in range(self.nv):
dot += r_nvectors[v] * self.nvectors[v][j]
w_ji = self.potential(d) * dot
z[j] = self.b[0][j] + w_ji.dot(atrans)
return z.transpose()
def compute_a(self, z, apply_dropout=False):
a = self.activation(z)
if apply_dropout and self.p_dropout > 0.0:
self.dropout_mask = np.random.binomial(1, self.p_dropout, a.shape)
a *= self.dropout_mask
return a
def compute_da(self, z, apply_dropout=False):
da = self.d_activation(z)
if apply_dropout and self.p_dropout > 0.0:
da *= self.dropout_mask
return da
def compute_w(self, r_in):
w = np.zeros((self.input_size, self.output_size))
r_positions = r_in[0]
r_nvectors = r_in[1]
for j in range(self.output_size):
dd = 0.0
for r in range(self.nr):
dd += (r_positions[r] - self.positions[r][j])**2
d = np.sqrt(dd)
dot = 0.0
for v in range(self.nv):
dot += r_nvectors[v] * self.nvectors[v][j]
w_ji = self.potential(d) * dot
for i in range(self.input_size):
w[i][j] = w_ji[i]
self.w = w
return w
def compute_w_j(self, r_in, j):
return None
def compute_w_i(self, r_in, i):
return None
|
awlange/brainsparks
|
src/calrissian/layers/particle_vector_n_local_conv.py
|
Python
|
mit
| 8,208
|
[
"Gaussian"
] |
47f3f4b6257f122a5a1b3ed7ab23432862be99e101f5c8df311929fd8424a86f
|
# Dual Annealing implementation.
# Copyright (c) 2018 Sylvain Gubian <sylvain.gubian@pmi.com>,
# Yang Xiang <yang.xiang@pmi.com>
# Author: Sylvain Gubian, Yang Xiang, PMP S.A.
"""
A Dual Annealing global optimization algorithm
"""
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize import minimize
from scipy.special import gammaln
from scipy._lib._util import check_random_state
__all__ = ['dual_annealing']
class VisitingDistribution:
"""
Class used to generate new coordinates based on the distorted
Cauchy-Lorentz distribution. Depending on the steps within the strategy
chain, the class implements the strategy for generating new location
changes.
Parameters
----------
lb : array_like
A 1-D NumPy ndarray containing lower bounds of the generated
components. Neither NaN or inf are allowed.
ub : array_like
A 1-D NumPy ndarray containing upper bounds for the generated
components. Neither NaN or inf are allowed.
visiting_param : float
Parameter for visiting distribution. Default value is 2.62.
Higher values give the visiting distribution a heavier tail, this
makes the algorithm jump to a more distant region.
The value range is (1, 3]. It's value is fixed for the life of the
object.
rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`}
A `~numpy.random.RandomState`, `~numpy.random.Generator` object
for using the current state of the created random generator container.
"""
TAIL_LIMIT = 1.e8
MIN_VISIT_BOUND = 1.e-10
def __init__(self, lb, ub, visiting_param, rand_gen):
# if you wish to make _visiting_param adjustable during the life of
# the object then _factor2, _factor3, _factor5, _d1, _factor6 will
# have to be dynamically calculated in `visit_fn`. They're factored
# out here so they don't need to be recalculated all the time.
self._visiting_param = visiting_param
self.rand_gen = rand_gen
self.lower = lb
self.upper = ub
self.bound_range = ub - lb
# these are invariant numbers unless visiting_param changes
self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
self._visiting_param - 1.0))
self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
/ (self._visiting_param - 1.0))
self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
3.0 - self._visiting_param))
self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
self._d1 = 2.0 - self._factor5
self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
def visiting(self, x, step, temperature):
""" Based on the step in the strategy chain, new coordinated are
generated by changing all components is the same time or only
one of them, the new values are computed with visit_fn method
"""
dim = x.size
if step < dim:
# Changing all coordinates with a new visiting value
visits = self.visit_fn(temperature, dim)
upper_sample, lower_sample = self.rand_gen.uniform(size=2)
visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
x_visit = visits + x
a = x_visit - self.lower
b = np.fmod(a, self.bound_range) + self.bound_range
x_visit = np.fmod(b, self.bound_range) + self.lower
x_visit[np.fabs(
x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
else:
# Changing only one coordinate at a time based on strategy
# chain step
x_visit = np.copy(x)
visit = self.visit_fn(temperature, 1)[0]
if visit > self.TAIL_LIMIT:
visit = self.TAIL_LIMIT * self.rand_gen.uniform()
elif visit < -self.TAIL_LIMIT:
visit = -self.TAIL_LIMIT * self.rand_gen.uniform()
index = step - dim
x_visit[index] = visit + x[index]
a = x_visit[index] - self.lower[index]
b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
x_visit[index] = np.fmod(b, self.bound_range[
index]) + self.lower[index]
if np.fabs(x_visit[index] - self.lower[
index]) < self.MIN_VISIT_BOUND:
x_visit[index] += self.MIN_VISIT_BOUND
return x_visit
def visit_fn(self, temperature, dim):
""" Formula Visita from p. 405 of reference [2] """
x, y = self.rand_gen.normal(size=(dim, 2)).T
factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
factor4 = self._factor4_p * factor1
# sigmax
x *= np.exp(-(self._visiting_param - 1.0) * np.log(
self._factor6 / factor4) / (3.0 - self._visiting_param))
den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
(3.0 - self._visiting_param))
return x / den
class EnergyState:
"""
Class used to record the energy state. At any time, it knows what is the
currently used coordinates and the most recent best location.
Parameters
----------
lower : array_like
A 1-D NumPy ndarray containing lower bounds for generating an initial
random components in the `reset` method.
upper : array_like
A 1-D NumPy ndarray containing upper bounds for generating an initial
random components in the `reset` method
components. Neither NaN or inf are allowed.
callback : callable, ``callback(x, f, context)``, optional
A callback function which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and `context` has value in [0, 1, 2]
"""
# Maximimum number of trials for generating a valid starting point
MAX_REINIT_COUNT = 1000
def __init__(self, lower, upper, callback=None):
self.ebest = None
self.current_energy = None
self.current_location = None
self.xbest = None
self.lower = lower
self.upper = upper
self.callback = callback
def reset(self, func_wrapper, rand_gen, x0=None):
"""
Initialize current location is the search domain. If `x0` is not
provided, a random location within the bounds is generated.
"""
if x0 is None:
self.current_location = rand_gen.uniform(self.lower, self.upper,
size=len(self.lower))
else:
self.current_location = np.copy(x0)
init_error = True
reinit_counter = 0
while init_error:
self.current_energy = func_wrapper.fun(self.current_location)
if self.current_energy is None:
raise ValueError('Objective function is returning None')
if (not np.isfinite(self.current_energy) or np.isnan(
self.current_energy)):
if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
init_error = False
message = (
'Stopping algorithm because function '
'create NaN or (+/-) infinity values even with '
'trying new random parameters'
)
raise ValueError(message)
self.current_location = rand_gen.uniform(self.lower,
self.upper,
size=self.lower.size)
reinit_counter += 1
else:
init_error = False
# If first time reset, initialize ebest and xbest
if self.ebest is None and self.xbest is None:
self.ebest = self.current_energy
self.xbest = np.copy(self.current_location)
# Otherwise, we keep them in case of reannealing reset
def update_best(self, e, x, context):
self.ebest = e
self.xbest = np.copy(x)
if self.callback is not None:
val = self.callback(x, e, context)
if val is not None:
if val:
return('Callback function requested to stop early by '
'returning True')
def update_current(self, e, x):
self.current_energy = e
self.current_location = np.copy(x)
class StrategyChain:
"""
Class that implements within a Markov chain the strategy for location
acceptance and local search decision making.
Parameters
----------
acceptance_param : float
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
visit_dist : VisitingDistribution
Instance of `VisitingDistribution` class.
func_wrapper : ObjectiveFunWrapper
Instance of `ObjectiveFunWrapper` class.
minimizer_wrapper: LocalSearchWrapper
Instance of `LocalSearchWrapper` class.
rand_gen : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
energy_state: EnergyState
Instance of `EnergyState` class.
"""
def __init__(self, acceptance_param, visit_dist, func_wrapper,
minimizer_wrapper, rand_gen, energy_state):
# Local strategy chain minimum energy and location
self.emin = energy_state.current_energy
self.xmin = np.array(energy_state.current_location)
# Global optimizer state
self.energy_state = energy_state
# Acceptance parameter
self.acceptance_param = acceptance_param
# Visiting distribution instance
self.visit_dist = visit_dist
# Wrapper to objective function
self.func_wrapper = func_wrapper
# Wrapper to the local minimizer
self.minimizer_wrapper = minimizer_wrapper
self.not_improved_idx = 0
self.not_improved_max_idx = 1000
self._rand_gen = rand_gen
self.temperature_step = 0
self.K = 100 * len(energy_state.current_location)
def accept_reject(self, j, e, x_visit):
r = self._rand_gen.uniform()
pqv_temp = 1.0 - ((1.0 - self.acceptance_param) *
(e - self.energy_state.current_energy) / self.temperature_step)
if pqv_temp <= 0.:
pqv = 0.
else:
pqv = np.exp(np.log(pqv_temp) / (
1. - self.acceptance_param))
if r <= pqv:
# We accept the new location and update state
self.energy_state.update_current(e, x_visit)
self.xmin = np.copy(self.energy_state.current_location)
# No improvement for a long time
if self.not_improved_idx >= self.not_improved_max_idx:
if j == 0 or self.energy_state.current_energy < self.emin:
self.emin = self.energy_state.current_energy
self.xmin = np.copy(self.energy_state.current_location)
def run(self, step, temperature):
self.temperature_step = temperature / float(step + 1)
self.not_improved_idx += 1
for j in range(self.energy_state.current_location.size * 2):
if j == 0:
if step == 0:
self.energy_state_improved = True
else:
self.energy_state_improved = False
x_visit = self.visit_dist.visiting(
self.energy_state.current_location, j, temperature)
# Calling the objective function
e = self.func_wrapper.fun(x_visit)
if e < self.energy_state.current_energy:
# We have got a better energy value
self.energy_state.update_current(e, x_visit)
if e < self.energy_state.ebest:
val = self.energy_state.update_best(e, x_visit, 0)
if val is not None:
if val:
return val
self.energy_state_improved = True
self.not_improved_idx = 0
else:
# We have not improved but do we accept the new location?
self.accept_reject(j, e, x_visit)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during annealing')
# End of StrategyChain loop
def local_search(self):
# Decision making for performing a local search
# based on strategy chain results
# If energy has been improved or no improvement since too long,
# performing a local search with the best strategy chain location
if self.energy_state_improved:
# Global energy has improved, let's see if LS improves further
e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
self.energy_state.ebest)
if e < self.energy_state.ebest:
self.not_improved_idx = 0
val = self.energy_state.update_best(e, x, 1)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during local search')
# Check probability of a need to perform a LS even if no improvement
do_ls = False
if self.K < 90 * len(self.energy_state.current_location):
pls = np.exp(self.K * (
self.energy_state.ebest - self.energy_state.current_energy) /
self.temperature_step)
if pls >= self._rand_gen.uniform():
do_ls = True
# Global energy not improved, let's see what LS gives
# on the best strategy chain location
if self.not_improved_idx >= self.not_improved_max_idx:
do_ls = True
if do_ls:
e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
self.xmin = np.copy(x)
self.emin = e
self.not_improved_idx = 0
self.not_improved_max_idx = self.energy_state.current_location.size
if e < self.energy_state.ebest:
val = self.energy_state.update_best(
self.emin, self.xmin, 2)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during dual annealing')
class ObjectiveFunWrapper:
def __init__(self, func, maxfun=1e7, *args):
self.func = func
self.args = args
# Number of objective function evaluations
self.nfev = 0
# Number of gradient function evaluation if used
self.ngev = 0
# Number of hessian of the objective function if used
self.nhev = 0
self.maxfun = maxfun
def fun(self, x):
self.nfev += 1
return self.func(x, *self.args)
class LocalSearchWrapper:
"""
Class used to wrap around the minimizer used for local search
Default local minimizer is SciPy minimizer L-BFGS-B
"""
LS_MAXITER_RATIO = 6
LS_MAXITER_MIN = 100
LS_MAXITER_MAX = 1000
def __init__(self, search_bounds, func_wrapper, **kwargs):
self.func_wrapper = func_wrapper
self.kwargs = kwargs
self.minimizer = minimize
bounds_list = list(zip(*search_bounds))
self.lower = np.array(bounds_list[0])
self.upper = np.array(bounds_list[1])
# If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
if not self.kwargs:
n = len(self.lower)
ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
self.LS_MAXITER_MIN),
self.LS_MAXITER_MAX)
self.kwargs['method'] = 'L-BFGS-B'
self.kwargs['options'] = {
'maxiter': ls_max_iter,
}
self.kwargs['bounds'] = list(zip(self.lower, self.upper))
def local_search(self, x, e):
# Run local search from the given x location where energy value is e
x_tmp = np.copy(x)
mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
if 'njev' in mres:
self.func_wrapper.ngev += mres.njev
if 'nhev' in mres:
self.func_wrapper.nhev += mres.nhev
# Check if is valid value
is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
in_bounds = np.all(mres.x >= self.lower) and np.all(
mres.x <= self.upper)
is_valid = is_finite and in_bounds
# Use the new point only if it is valid and return a better results
if is_valid and mres.fun < e:
return mres.fun, mres.x
else:
return e, x_tmp
def dual_annealing(func, bounds, args=(), maxiter=1000,
local_search_options={}, initial_temp=5230.,
restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
maxfun=1e7, seed=None, no_local_search=False,
callback=None, x0=None):
"""
Find the global minimum of a function using Dual Annealing.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence, shape (n, 2)
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining bounds for the objective function parameter.
args : tuple, optional
Any additional fixed parameters needed to completely specify the
objective function.
maxiter : int, optional
The maximum number of global search iterations. Default value is 1000.
local_search_options : dict, optional
Extra keyword arguments to be passed to the local minimizer
(`minimize`). Some important options could be:
``method`` for the minimizer method to use and ``args`` for
objective function additional arguments.
initial_temp : float, optional
The initial temperature, use higher values to facilitates a wider
search of the energy landscape, allowing dual_annealing to escape
local minima that it is trapped in. Default value is 5230. Range is
(0.01, 5.e4].
restart_temp_ratio : float, optional
During the annealing process, temperature is decreasing, when it
reaches ``initial_temp * restart_temp_ratio``, the reannealing process
is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
visit : float, optional
Parameter for visiting distribution. Default value is 2.62. Higher
values give the visiting distribution a heavier tail, this makes
the algorithm jump to a more distant region. The value range is (1, 3].
accept : float, optional
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
maxfun : int, optional
Soft limit for the number of objective function calls. If the
algorithm is in the middle of a local search, this number will be
exceeded, the algorithm will stop just after the local search is
done. Default value is 1e7.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the visiting distribution function
and new coordinates generation.
no_local_search : bool, optional
If `no_local_search` is set to True, a traditional Generalized
Simulated Annealing will be performed with no local search
strategy applied.
callback : callable, optional
A callback function with signature ``callback(x, f, context)``,
which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and ``context`` has value in [0, 1, 2], with the
following meaning:
- 0: minimum detected in the annealing process.
- 1: detection occurred in the local search process.
- 2: detection done in the dual annealing process.
If the callback implementation returns True, the algorithm will stop.
x0 : ndarray, shape(n,), optional
Coordinates of a single N-D starting point.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination.
See `OptimizeResult` for a description of other attributes.
Notes
-----
This function implements the Dual Annealing optimization. This stochastic
approach derived from [3]_ combines the generalization of CSA (Classical
Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
to a strategy for applying a local search on accepted locations [4]_.
An alternative implementation of this same algorithm is described in [5]_
and benchmarks are presented in [6]_. This approach introduces an advanced
method to refine the solution found by the generalized annealing
process. This algorithm uses a distorted Cauchy-Lorentz visiting
distribution, with its shape controlled by the parameter :math:`q_{v}`
.. math::
g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
\\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
\\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
\\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
\\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
Where :math:`t` is the artificial time. This visiting distribution is used
to generate a trial jump distance :math:`\\Delta x(t)` of variable
:math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
From the starting point, after calling the visiting distribution
function, the acceptance probability is computed as follows:
.. math::
p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
\\frac{1}{1-q_{a}}}\\}}
Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
acceptance probability is assigned to the cases where
.. math::
[1-(1-q_{a}) \\beta \\Delta E] < 0
The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
.. math::
T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
1 + t\\right)^{q_{v}-1}-1}
Where :math:`q_{v}` is the visiting parameter.
.. versionadded:: 1.2.0
References
----------
.. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
statistics. Journal of Statistical Physics, 52, 479-487 (1998).
.. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
Physica A, 233, 395-406 (1996).
.. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
Annealing Algorithm and Its Application to the Thomson Model.
Physics Letters A, 233, 216-220 (1997).
.. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
Annealing. Physical Review E, 62, 4473 (2000).
.. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
Simulated Annealing for Efficient Global Optimization: the GenSA
Package for R. The R Journal, Volume 5/1 (2013).
.. [6] Mullen, K. Continuous Global Optimization in R. Journal of
Statistical Software, 60(6), 1 - 45, (2014).
:doi:`10.18637/jss.v060.i06`
Examples
--------
The following example is a 10-D problem, with many local minima.
The function involved is called Rastrigin
(https://en.wikipedia.org/wiki/Rastrigin_function)
>>> from scipy.optimize import dual_annealing
>>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
>>> lw = [-5.12] * 10
>>> up = [5.12] * 10
>>> ret = dual_annealing(func, bounds=list(zip(lw, up)))
>>> ret.x
array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09,
-6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09,
-6.05775280e-09, -5.00668935e-09]) # random
>>> ret.fun
0.000000
""" # noqa: E501
if x0 is not None and not len(x0) == len(bounds):
raise ValueError('Bounds size does not match x0')
lu = list(zip(*bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
# Check that restart temperature ratio is correct
if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
raise ValueError('Restart temperature ratio has to be in range (0, 1)')
# Checking bounds are valid
if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
np.isnan(lower)) or np.any(np.isnan(upper))):
raise ValueError('Some bounds values are inf values or nan values')
# Checking that bounds are consistent
if not np.all(lower < upper):
raise ValueError('Bounds are not consistent min < max')
# Checking that bounds are the same length
if not len(lower) == len(upper):
raise ValueError('Bounds do not have the same dimensions')
# Wrapper for the objective function
func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
# Wrapper fot the minimizer
minimizer_wrapper = LocalSearchWrapper(
bounds, func_wrapper, **local_search_options)
# Initialization of random Generator for reproducible runs if seed provided
rand_state = check_random_state(seed)
# Initialization of the energy state
energy_state = EnergyState(lower, upper, callback)
energy_state.reset(func_wrapper, rand_state, x0)
# Minimum value of annealing temperature reached to perform
# re-annealing
temperature_restart = initial_temp * restart_temp_ratio
# VisitingDistribution instance
visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
# Strategy chain instance
strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state)
need_to_stop = False
iteration = 0
message = []
# OptimizeResult object to be returned
optimize_res = OptimizeResult()
optimize_res.success = True
optimize_res.status = 0
t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
# Run the search loop
while(not need_to_stop):
for i in range(maxiter):
# Compute temperature for this step
s = float(i) + 2.0
t2 = np.exp((visit - 1) * np.log(s)) - 1.0
temperature = initial_temp * t1 / t2
if iteration >= maxiter:
message.append("Maximum number of iteration reached")
need_to_stop = True
break
# Need a re-annealing process?
if temperature < temperature_restart:
energy_state.reset(func_wrapper, rand_state)
break
# starting strategy chain
val = strategy_chain.run(i, temperature)
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
# Possible local search at the end of the strategy chain
if not no_local_search:
val = strategy_chain.local_search()
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
iteration += 1
# Setting the OptimizeResult values
optimize_res.x = energy_state.xbest
optimize_res.fun = energy_state.ebest
optimize_res.nit = iteration
optimize_res.nfev = func_wrapper.nfev
optimize_res.njev = func_wrapper.ngev
optimize_res.nhev = func_wrapper.nhev
optimize_res.message = message
return optimize_res
|
anntzer/scipy
|
scipy/optimize/_dual_annealing.py
|
Python
|
bsd-3-clause
| 29,863
|
[
"VisIt"
] |
0bb2dc4eb8a4cf2ed5dd88b559029a183488299211ce58ff368aaee580997144
|
# -*- coding: utf-8 -*-
"""
__author__ = "Konstantin Klementiev", "Roman Chernikov"
__date__ = "2017-02-01"
Created with xrtQook
"""
import numpy as np
import sys
import os, sys; sys.path.append(os.path.join('..', '..')) # analysis:ignore
import xrt.backends.raycing.sources as rsources
import xrt.backends.raycing.screens as rscreens
import xrt.backends.raycing.materials as rmats
import xrt.backends.raycing.oes as roes
# import xrt.backends.raycing.apertures as rapts
import xrt.backends.raycing.run as rrun
import xrt.backends.raycing as raycing
import xrt.plotter as xrtplot
import xrt.runner as xrtrun
Emin, Emax = 8998, 9002
crystalSi01 = rmats.CrystalSi(
t=0.1,
hkl=[1, 1, 1],
useTT=True,
calcBorrmann='TT',
geom=r"Laue reflected")
def build_beamline():
beamLine = raycing.BeamLine()
beamLine.geometricSource01 = rsources.GeometricSource(
bl=beamLine,
name=None,
center=[0, 0, 0],
nrays=1000,
distE=r"flat",
dx=0.001, dz=0.001,
dxprime=0, dzprime=0,
energies=[Emin, Emax])
beamLine.screen02 = rscreens.Screen(
bl=beamLine,
center=[0, 10000, 0])
beamLine.lauePlate01 = roes.BentLaueCylinder(
# beamLine.lauePlate01 = roes.LauePlate(
bl=beamLine,
name=None,
center=[0, 10000, 0],
pitch=0.0,
R=1e4,
crossSection='parabolic',
material=crystalSi01,
targetOpenCL='CPU')
beamLine.screen01 = rscreens.Screen(
bl=beamLine,
name=None,
center=[0.0, 20000, 0.0])
return beamLine
def run_process(beamLine):
geometricSource01beamGlobal01 = beamLine.geometricSource01.shine()
screen02beamLocal01 = beamLine.screen02.expose(
beam=geometricSource01beamGlobal01)
lauePlate01beamGlobal01, lauePlate01beamLocal01 = beamLine.lauePlate01.reflect(
beam=geometricSource01beamGlobal01)
screen01beamLocal01 = beamLine.screen01.expose(
beam=lauePlate01beamGlobal01)
outDict = {
'geometricSource01beamGlobal01': geometricSource01beamGlobal01,
'lauePlate01beamGlobal01': lauePlate01beamGlobal01,
'lauePlate01beamLocal01': lauePlate01beamLocal01,
'screen01beamLocal01': screen01beamLocal01,
'screen02beamLocal01': screen02beamLocal01}
return outDict
rrun.run_process = run_process
def align_beamline(beamLine, energy):
geometricSource01beamGlobal01 = rsources.Beam(nrays=2)
geometricSource01beamGlobal01.a[:] = 0
geometricSource01beamGlobal01.b[:] = 1
geometricSource01beamGlobal01.c[:] = 0
geometricSource01beamGlobal01.x[:] = 0
geometricSource01beamGlobal01.y[:] = 0
geometricSource01beamGlobal01.z[:] = 0
geometricSource01beamGlobal01.E[:] = energy
geometricSource01beamGlobal01.state[:] = 1
tmpy = beamLine.screen02.center[1]
newx = beamLine.screen02.center[0]
newz = beamLine.screen02.center[2]
beamLine.screen02.center = (newx, tmpy, newz)
print("screen02.center:", beamLine.screen02.center)
screen02beamLocal01 = beamLine.screen02.expose(
beam=geometricSource01beamGlobal01)
tmpy = beamLine.lauePlate01.center[1]
newx = beamLine.lauePlate01.center[0]
newz = beamLine.lauePlate01.center[2]
beamLine.lauePlate01.center = (newx, tmpy, newz)
print("lauePlate01.center:", beamLine.lauePlate01.center)
braggT = crystalSi01.get_Bragg_angle(energy)
alphaT = 0 if beamLine.lauePlate01.alpha is None else beamLine.lauePlate01.alpha
lauePitch = 0
print("bragg, alpha:", np.degrees(braggT), np.degrees(alphaT), "degrees")
braggT += -crystalSi01.get_dtheta(energy, alphaT)
if crystalSi01.geom.startswith('Laue'):
lauePitch = 0.5 * np.pi
print("braggT:", np.degrees(braggT), "degrees")
loBeam = rsources.Beam(copyFrom=geometricSource01beamGlobal01)
raycing.global_to_virgin_local(
beamLine,
geometricSource01beamGlobal01,
loBeam,
center=beamLine.lauePlate01.center)
raycing.rotate_beam(
loBeam,
roll=-(beamLine.lauePlate01.positionRoll + beamLine.lauePlate01.roll),
yaw=-beamLine.lauePlate01.yaw,
pitch=0)
theta0 = np.arctan2(-loBeam.c[0], loBeam.b[0])
th2pitch = np.sqrt(1. - loBeam.a[0]**2)
targetPitch = np.arcsin(np.sin(braggT) / th2pitch) -\
theta0
targetPitch += alphaT + lauePitch
beamLine.lauePlate01.pitch = targetPitch
print("lauePlate01.pitch:", np.degrees(beamLine.lauePlate01.pitch), "degrees")
lauePlate01beamGlobal01, lauePlate01beamLocal01 = beamLine.lauePlate01.reflect(
beam=geometricSource01beamGlobal01)
print("Laue Plate exit point")
print(lauePlate01beamGlobal01.x, lauePlate01beamGlobal01.y,
lauePlate01beamGlobal01.z)
tmpy = beamLine.screen01.center[1]
newx = lauePlate01beamGlobal01.x[0] +\
lauePlate01beamGlobal01.a[0] * (tmpy - lauePlate01beamGlobal01.y[0]) /\
lauePlate01beamGlobal01.b[0]
newz = lauePlate01beamGlobal01.z[0] +\
lauePlate01beamGlobal01.c[0] * (tmpy - lauePlate01beamGlobal01.y[0]) /\
lauePlate01beamGlobal01.b[0]
beamLine.screen01.center = (newx, tmpy, newz)
print("screen01.center:", beamLine.screen01.center)
screen01beamLocal01 = beamLine.screen01.expose(
beam=lauePlate01beamGlobal01)
def define_plots():
plots = []
plot01 = xrtplot.XYCPlot(
beam=r"lauePlate01beamLocal01",
xaxis=xrtplot.XYCAxis(
label=r"x",
unit='$\mu$m',
fwhmFormatStr="%.1f",
bins=256,
ppb=1,
# factor=1000,
limits=[-5, 5]),
yaxis=xrtplot.XYCAxis(
label=r"y",
unit='$\mu$m',
fwhmFormatStr="%.1f",
# factor=1000,
bins=256,
ppb=1,
limits=[-50, 50]),
caxis=xrtplot.XYCAxis(
label=r"energy",
unit=r"eV",
bins=256,
ppb=1,
limits=[Emin, Emax]),
aspect=r"auto",
title=r"01 - Laue crystal Fooprint")
plots.append(plot01)
plot01a = xrtplot.XYCPlot(
beam=r"lauePlate01beamLocal01",
xaxis=xrtplot.XYCAxis(
label=r"y",
unit='$\mu$m',
fwhmFormatStr="%.1f",
# factor=1000,
bins=256,
ppb=1,
limits=[-50, 50]),
yaxis=xrtplot.XYCAxis(
label=r"z",
unit='$\mu$m',
fwhmFormatStr="%.1f",
bins=256,
ppb=1),
caxis=xrtplot.XYCAxis(
label=r"energy",
unit=r"eV",
bins=256,
ppb=1,
limits=[Emin, Emax]),
aspect=r"auto",
title=r"01a - Laue crystal depth profile")
plots.append(plot01a)
plot02 = xrtplot.XYCPlot(
beam=r"screen01beamLocal01",
xaxis=xrtplot.XYCAxis(
label=r"x",
fwhmFormatStr="%.1f",
bins=256,
ppb=1),
yaxis=xrtplot.XYCAxis(
label=r"z",
fwhmFormatStr="%.3f",
bins=256,
ppb=1),
caxis=xrtplot.XYCAxis(
label=r"energy",
unit=r"eV",
limits=[Emin, Emax],
bins=256,
ppb=1),
aspect=r"auto",
title=r"02 - screen")
plots.append(plot02)
plot03 = xrtplot.XYCPlot(
beam=r"screen02beamLocal01",
xaxis=xrtplot.XYCAxis(
label=r"x",
bins=256,
ppb=1,
unit='$\mu$m',
fwhmFormatStr="%.1f"),
yaxis=xrtplot.XYCAxis(
label=r"z",
bins=256,
ppb=1,
unit='$\mu$m',
fwhmFormatStr="%.1f"),
caxis=xrtplot.XYCAxis(
label=r"energy",
bins=256,
ppb=1,
unit=r"eV",
limits=[Emin, Emax]),
aspect=r"auto",
title=r"00 - Laue crystyal - incoming beam")
plots.append(plot03)
for plot in plots:
plot.saveName = plot.title + "R{0}m_t{1}mm.png".format(10, 0.1)
return plots
def main():
beamLine = build_beamline()
E0 = (Emin+Emax)*0.5
align_beamline(beamLine, E0)
plots = define_plots()
xrtrun.run_ray_tracing(
plots=plots,
repeats=10,
backend=r"raycing",
beamLine=beamLine)
if __name__ == '__main__':
main()
|
kklmn/xrt
|
tests/raycing/test_laue_bent.py
|
Python
|
mit
| 8,473
|
[
"CRYSTAL"
] |
e9e1322cf1d4bef8005d0f7740ef40653feb413d31674492e550525ac34e4e8b
|
from __future__ import with_statement
from copy import deepcopy
import os
from galaxy.util import parse_xml
def load_tool(path):
"""
Loads tool from file system and preprocesses tool macros.
"""
tree = raw_tool_xml_tree(path)
root = tree.getroot()
_import_macros(root, path)
# Expand xml macros
macro_dict = _macros_of_type(root, 'xml', lambda el: list(el.getchildren()))
_expand_macros([root], macro_dict)
# Expand tokens
macro_dict = _macros_of_type(root, 'token', lambda el: el.text)
_expand_tokens([root], macro_dict)
return tree
def template_macro_params(root):
"""
Look for template macros and populate param_dict (for cheetah)
with these.
"""
param_dict = {}
macro_dict = _macros_of_type(root, 'template', lambda el: el.text)
for key, value in macro_dict.iteritems():
param_dict[key] = value
return param_dict
def raw_tool_xml_tree(path):
""" Load raw (no macro expansion) tree representation of tool represented
at the specified path.
"""
tree = parse_xml(path)
return tree
def imported_macro_paths(root):
macros_el = _macros_el(root)
return _imported_macro_paths_from_el(macros_el)
def _import_macros(root, path):
tool_dir = os.path.dirname(path)
macros_el = _macros_el(root)
if macros_el:
macro_els = _load_macros(macros_el, tool_dir)
_xml_set_children(macros_el, macro_els)
def _macros_el(root):
return root.find('macros')
def _macros_of_type(root, type, el_func):
macros_el = root.find('macros')
macro_dict = {}
if macros_el:
macro_els = macros_el.findall('macro')
macro_dict = dict([(macro_el.get("name"), el_func(macro_el)) \
for macro_el in macro_els \
if macro_el.get('type') == type])
return macro_dict
def _expand_tokens(elements, tokens):
if not tokens or not elements:
return
for element in elements:
value = element.text
if value:
new_value = _expand_tokens_str(element.text, tokens)
if not (new_value is value):
element.text = new_value
for key, value in element.attrib.iteritems():
new_value = _expand_tokens_str(value, tokens)
if not (new_value is value):
element.attrib[key] = new_value
_expand_tokens(list(element.getchildren()), tokens)
def _expand_tokens_str(str, tokens):
for key, value in tokens.iteritems():
if str.find(key) > -1:
str = str.replace(key, value)
return str
def _expand_macros(elements, macros):
if not macros:
return
for element in elements:
while True:
expand_el = element.find('.//expand')
if expand_el is None:
break
_expand_macro(element, expand_el, macros)
def _expand_macro(element, expand_el, macros):
macro_name = expand_el.get('macro')
macro_def = deepcopy(macros[macro_name])
_expand_yield_statements(macro_def, expand_el)
# Recursively expand contained macros.
_expand_macros(macro_def, macros)
# HACK for elementtree, newer implementations (etree/lxml) won't
# require this parent_map data structure but elementtree does not
# track parents or recongnize .find('..').
parent_map = dict((c, p) for p in element.getiterator() for c in p)
_xml_replace(expand_el, macro_def, parent_map)
def _expand_yield_statements(macro_def, expand_el):
yield_els = [yield_el for macro_def_el in macro_def for yield_el in macro_def_el.findall('.//yield')]
expand_el_children = expand_el.getchildren()
macro_def_parent_map = \
dict((c, p) for macro_def_el in macro_def for p in macro_def_el.getiterator() for c in p)
for yield_el in yield_els:
_xml_replace(yield_el, expand_el_children, macro_def_parent_map)
def _load_macros(macros_el, tool_dir):
macros = []
# Import macros from external files.
macros.extend(_load_imported_macros(macros_el, tool_dir))
# Load all directly defined macros.
macros.extend(_load_embedded_macros(macros_el, tool_dir))
return macros
def _load_embedded_macros(macros_el, tool_dir):
macros = []
macro_els = []
# attribute typed macro
if macros_el:
macro_els = macros_el.findall("macro")
for macro in macro_els:
if 'type' not in macro.attrib:
macro.attrib['type'] = 'xml'
macros.append(macro)
# type shortcuts (<xml> is a shortcut for <macro type="xml",
# likewise for <template>.
typed_tag = ['template', 'xml', 'token']
for tag in typed_tag:
macro_els = []
if macros_el:
macro_els = macros_el.findall(tag)
for macro_el in macro_els:
macro_el.attrib['type'] = tag
macro_el.tag = 'macro'
macros.append(macro_el)
return macros
def _load_imported_macros(macros_el, tool_dir):
macros = []
for tool_relative_import_path in _imported_macro_paths_from_el(macros_el):
import_path = \
os.path.join(tool_dir, tool_relative_import_path)
file_macros = _load_macro_file(import_path, tool_dir)
macros.extend(file_macros)
return macros
def _imported_macro_paths_from_el(macros_el):
imported_macro_paths = []
macro_import_els = []
if macros_el:
macro_import_els = macros_el.findall("import")
for macro_import_el in macro_import_els:
raw_import_path = macro_import_el.text
tool_relative_import_path = \
os.path.basename(raw_import_path) # Sanitize this
imported_macro_paths.append( tool_relative_import_path )
return imported_macro_paths
def _load_macro_file(path, tool_dir):
tree = parse_xml(path)
root = tree.getroot()
return _load_macros(root, tool_dir)
def _xml_set_children(element, new_children):
for old_child in element.getchildren():
element.remove(old_child)
for i, new_child in enumerate(new_children):
element.insert(i, new_child)
def _xml_replace(query, targets, parent_map):
#parent_el = query.find('..') ## Something like this would be better with newer xml library
parent_el = parent_map[query]
matching_index = -1
#for index, el in enumerate(parent_el.iter('.')): ## Something like this for newer implementation
for index, el in enumerate(parent_el.getchildren()):
if el == query:
matching_index = index
break
assert matching_index >= 0
current_index = matching_index
for target in targets:
current_index += 1
parent_el.insert(current_index, deepcopy(target))
parent_el.remove(query)
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/tools/loader.py
|
Python
|
gpl-3.0
| 6,724
|
[
"Galaxy"
] |
c8baba74929cf680c56af62963491140ae67dbf1bd7a7ef58b12a03772ed7609
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
import numpy as np
from rmgpy import getPath
from rmgpy.qm.main import QMCalculator
from rmgpy.molecule import Molecule
from rmgpy.qm.gaussian import GaussianMolPM3, GaussianMolPM6
gaussEnv = os.getenv('GAUSS_EXEDIR') or os.getenv('g09root') or os.getenv('g03root') or ""
# GAUSS_EXEDIR may be a list like "path1:path2:path3"
for possibleDir in gaussEnv.split(':'):
if os.path.exists(os.path.join(possibleDir , 'g09')):
executablePath = os.path.join(possibleDir , 'g09')
break
elif os.path.exists(os.path.join(possibleDir , 'g03')):
executablePath = os.path.join(possibleDir , 'g03')
break
else:
executablePath = os.path.join(gaussEnv , '(g03 or g09)')
mol1 = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
class TestGaussianMolPM3(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(os.path.exists(executablePath)==False, "Gaussian not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
qm = QMCalculator(software = 'gaussian',
method = 'pm3',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = os.path.join(RMGpy_path, 'testing', 'qm', 'QMscratch'),
)
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.settings.fileStore)
self.qmmol1 = GaussianMolPM3(mol1, qm.settings)
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly.
"""
try:
fileList = os.listdir(self.qmmol1.settings.fileStore)
for fileName in fileList:
os.remove(os.path.join(self.qmmol1.settings.fileStore, fileName))
except OSError:
pass
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM3 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.0626, 3)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169908.3376, 0) # to 0 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 335.5438748, 0) # to 0 decimal place
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from a previous run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM3 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
self.assertAlmostEqual(result.energy.value_si, 169908.7581, 0)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.0626, 3)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169908.3376, 0) # to 0 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 335.5438748, 0) # to 0 decimal place
class TestGaussianMolPM6(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(os.path.exists(executablePath)==False, "Gaussian not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
qm = QMCalculator(software = 'gaussian',
method = 'pm6',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = os.path.join(RMGpy_path, 'testing', 'qm', 'QMscratch'),
)
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.settings.fileStore)
self.qmmol1 = GaussianMolPM6(mol1, qm.settings)
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly.
"""
try:
fileList = os.listdir(self.qmmol1.settings.fileStore)
for fileName in fileList:
os.remove(os.path.join(self.qmmol1.settings.fileStore, fileName))
except OSError:
pass
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM6 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.0626, 3)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169326.2504, 0) # to 0 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 338.2696063, 0) # to 0 decimal place
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from a previous run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM6 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
self.assertAlmostEqual(result.energy.value_si, 169325.9867, 1)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.0626, 3)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169326.2504, 0) # to 0 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 338.2696063, 0) # to 0 decimal place
################################################################################
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
|
enochd/RMG-Py
|
rmgpy/qm/gaussianTest.py
|
Python
|
mit
| 5,807
|
[
"Gaussian"
] |
157ba42bdf1d9ebbfde53920d9a79f009c93b0c349eb96963adebe4f4529506d
|
"""This module provides common utility functions.
Copyright 2021--2022 Michael Hayes, UCECE
"""
import sympy as sym
def factor_const(expr, var):
"""Extract constant factor from expression and return tuple
of constant and the rest of the expression.
For example `a * r(var)` returns `a, r(var)`.
If have a polynomial expression, the leading coefficient is
returned as the constant, for example: `2 * s + 4` returns `2, s + 2`.
"""
# Perhaps use expr.as_coeff_Mul() ?
if expr.is_polynomial():
poly = sym.Poly(expr, var)
const = poly.LC()
if const == 0 or const == 1:
return 1, expr
return const, expr / const
rest = sym.S.One
const = sym.S.One
for factor in expr.as_ordered_factors():
# Cannot use factor.is_constant() since SymPy 1.2, 1.3
# barfs for Heaviside(t) and DiracDelta(t)
if not factor.has(var):
const *= factor
else:
rest *= factor
return const, rest
def term_const(expr, var):
"""Extract constant term from expression and return tuple
of constant and the rest of the expression."""
rest = sym.S.One
const = sym.S.Zero
for term in expr.as_ordered_terms():
# Cannot use factor.is_constant() since SymPy 1.2, 1.3
# barfs for Heaviside(t) and DiracDelta(t)
if not term.has(var):
const += term
else:
rest += term
return const, rest
def scale_shift(expr, var):
if expr == var:
return sym.S.One, sym.S.Zero
expr = expr.expand()
if not expr.as_poly(var).is_linear:
raise ValueError('Expression not a linear function of %s: %s' % (var, expr))
scale = expr.coeff(var, 1)
shift = expr.coeff(var, 0)
return scale, shift
def as_N_D(expr, var, monic_denominator=False, use_sympy=False):
if use_sympy:
return expr.as_numer_denom()
factors = expr.as_ordered_factors()
numers = []
denoms = []
for factor in factors:
if factor.is_Function and factor.func == sym.exp:
# SymPy treats exp(-s * a) as 1 / exp(s * a)
numer = factor
denom = sym.S.One
else:
numer, denom = factor.as_numer_denom()
numers.append(numer)
denoms.append(denom)
poly_denom = False
for denom in denoms:
if denom != 1 and denom.is_polynomial(var):
poly_denom = True
break
if not poly_denom:
return expr.as_numer_denom()
N = sym.S.One
D = sym.S.One
for numer, denom in zip(numers, denoms):
N *= numer
if denom.is_polynomial(var):
D *= denom
else:
N /= denom
N = N.simplify()
if monic_denominator:
Dpoly = sym.Poly(D, var)
LC = Dpoly.LC()
D = Dpoly.monic().as_expr()
N = (N / LC).simplify()
return N, D
def as_sum_terms(expr, var):
N, D = as_N_D(expr, var)
N = N.simplify()
return [term / D for term in N.expand().as_ordered_terms ()]
def as_sum(expr, var):
result = 0
for term in as_sum_terms(expr, var):
result += term
return result
def merge_common(lists):
# From www.geeksforgeeks.org
from collections import defaultdict
neighbours = defaultdict(set)
visited = set()
for each in lists:
for item in each:
neighbours[item].update(each)
def comp(node, neighbours=neighbours, visited=visited, visit=visited.add):
nodes = set([node])
next_node = nodes.pop
while nodes:
node = next_node()
visit(node)
nodes |= neighbours[node] - visited
yield node
for node in neighbours:
if node not in visited:
yield sorted(comp(node))
def isiterable(arg):
return hasattr(arg, '__iter__')
def factor_expr(expr, factor):
"""Extract factor from expression or None if expression does
not have factor."""
factors = expr.as_ordered_factors()
if factor not in factors:
return None
return expr / factor
def separate_dirac_delta(expr):
terms = expr.as_ordered_terms()
deltas = []
rest = 0
for term in terms:
if term.has(sym.DiracDelta):
deltas.append(term)
else:
rest += term
return rest, deltas
def split_dirac_delta(expr):
"""Return expression as a list of terms.
The first term has no DiracDeltas, the second term collates
the DiracDeltas, the third term collates derivatives of DiracDeltas, etc.
For example, u(t) + DiractDelta(t, 1) returns [u(t), 0, DiracDelta(t, 1)]
"""
terms = expr.as_ordered_terms()
parts = {}
rest = 0
# FIXME, DiracDelta needs to be a factor
for term in terms:
if term.has(sym.DiracDelta):
if len(term.args) == 1:
if 1 not in parts:
parts[1] = 0
parts[1] += term
else:
idx = term.args[1] + 1
if idx not in parts:
parts[idx] = 0
parts[idx] += term
else:
parts[0] = term
maxkey = max(parts.keys())
result = []
for key in range(maxkey + 1):
if key in parts:
result.append(parts[key])
else:
result.append(0)
return result
def remove_images(expr, var, dt, m1=0, m2=0):
if m2 == 0 and isinstance(m1, tuple) and len(m1) == 2:
# Perhaps should warn that this might be deprecated?
m1, m2 = m1
remove_all = m1 == 0 and m2 == 0
const, expr1 = factor_const(expr, var)
result = sym.S.Zero
terms = expr1.as_ordered_terms()
if len(terms) > 1:
for term in expr1.as_ordered_terms():
result += remove_images(term, var, dt, m1, m2)
return const * result
if not isinstance(expr1, sym.Sum):
return expr
sumsym = expr1.args[1].args[0]
def query(expr):
return expr.is_Add and expr.has(var) and expr.has(sumsym)
def value(expr):
if not expr.is_Add:
return expr
if not expr.is_polynomial(var) and not expr.as_poly(var).is_linear:
return expr
expr = expr.expand()
a = expr.coeff(var, 1)
b = expr.coeff(var, 0)
if a == 0:
return expr
if b / a != -sumsym / dt:
return expr
return a * var
expr1 = expr1.replace(query, value)
if remove_all:
return const * expr1.args[0]
return const * sym.Sum(expr1.args[0], (sumsym, m1, m2))
def pair_conjugates(poles_dict):
"""Return dictionary of conjugate pole pairs and a dictionary of the
remaining single poles."""
pole_single_dict = poles_dict.copy()
pole_pair_dict = {}
pole_list = list(poles_dict)
for i, pole in enumerate(pole_list):
pole_c = sym.conjugate(pole)
# Check for conjugate pole
if pole_c in pole_list[i + 1:]:
pole_single_dict.pop(pole, None)
pole_single_dict.pop(pole_c, None)
o1 = poles_dict[pole]
o2 = poles_dict[pole_c]
if o1 == o2:
pole_pair_dict[pole, pole_c] = o1
elif o1 > o2:
pole_pair_dict[pole, pole_c] = o2
pole_single_dict[pole] = o1 - o2
else:
pole_pair_dict[pole, pole_c] = o1
pole_single_dict[pole_c] = o2 - o1
return pole_pair_dict, pole_single_dict
def similarity_shift(expr, var):
"""Rewrite foo(a * t + b) as foo(t) and return a, b."""
scale = None
shift = None
fail = False
for expr1 in sym.preorder_traversal(expr):
if not expr1.is_Function:
continue
arg = expr1.args[0]
if not arg.has(var):
continue
poly = arg.as_poly(var)
if poly is None or not poly.is_linear:
fail = True
break
scale1 = arg.coeff(var, 1)
shift1 = arg.coeff(var, 0)
if scale is None:
scale = scale1
if shift is None:
shift = shift1
if scale != scale1 or shift != shift1:
fail = True
break
if fail or scale is None:
return expr, 1, 0
expr2 = expr.replace(var * scale + shift, var)
return expr2, scale, shift
def expand_functions(expr, var):
# Try rewriting functions such as rampstep(t - 1)
# to ramp(t - 1) - ramp(t - 2) to sums of weighted Heavisides...
const, expr = factor_const(expr, var)
if expr.is_Function:
new_expr = expr.rewrite()
if expr != new_expr:
return const * expand_functions(new_expr, var)
return const * new_expr
terms = expr.as_ordered_terms()
if len(terms) == 1:
return const * expr
expr = 0
for term in terms:
expr += expand_functions(term, var)
return const * expr
|
mph-/lcapy
|
lcapy/utils.py
|
Python
|
lgpl-2.1
| 8,995
|
[
"VisIt"
] |
06389a092ae60a94b5a5fdbc7ccc4de20228e06a359bf18a8ff3966f2cab924f
|
# -*- coding: utf-8 -*-
import os
import wave
import json
import requests
import pyaudio
from settings import HTTP_PROXY, HTTPS_PROXY
TTS_URL = "https://try-api.recaius.jp/tts/v1/"
class RecaiusTTS(object):
"""Speech Synthesizer by RECAIUS-dev API"""
speaker2info = {
'itaru': ('ja_JP', 'ja_JP-M0001-H00T'),
'hiroto': ('ja_JP', 'ja_JP-M0002-H01T'),
'moe': ('ja_JP', 'ja_JP-F0005-U01T'),
'sakura': ('ja_JP', 'ja_JP-F0006-C53T'),
'jane': ('en_US', 'en_US-F0001-H00T'),
'nicole': ('fr_FR', 'fr_FR-F0001-H00T'),
'miyon': ('ko_KR', 'ko_KR-F0001-H00T'),
'linly': ('zh_CN', 'zh_CN-en_US-F0002-H00T')
}
def __init__(self, recaius_id, recaius_password):
self.recaius_id = recaius_id
self.recaius_password = recaius_password
self._values = dict()
self._values['id'] = self.recaius_id
self._values['password'] = self.recaius_password
# default settings
lang, speaker_id = self.speaker2info['sakura']
self._values['lang'] = lang
self._values['speaker_id'] = speaker_id
self._values['codec'] = 'audio/x-linear' # for pyaudio
self.reset_parameters()
# set proxies
self.proxies = {
'http': HTTP_PROXY,
'https': HTTPS_PROXY,
}
def clear_parameters(self):
self._values = dict()
self._values['id'] = self.recaius_id
self._values['password'] = self.recaius_password
def reset_parameters(self):
delete_keys = ['speed', 'pitch', 'depth', 'volume']
for k in delete_keys:
if k in self._values:
del self._values[k]
def reset_emotions(self):
delete_keys = ['happy', 'angry', 'sad', 'fear', 'tender']
for k in delete_keys:
if k in self._values:
del self._values[k]
def speaker(self, speaker):
if speaker in self.speaker2info:
lang, speaker_id = self.speaker2info[speaker]
self._values['lang'] = lang
self._values['speaker_id'] = speaker_id
else:
raise RecaiusTTSException('Unknown speaker: %s' % speaker)
return self
def emotion(self, emotion, level):
self.reset_emotions()
if emotion in ['happy', 'angry', 'sad', 'fear', 'tender']:
self._values[emotion] = level
else:
raise RecaiusTTSException('Unknown emotion: %s' % emotion)
return self
def speed(self, speed):
if -10 <= speed <= 10:
self._values["speed"] = speed
else:
raise RecaiusTTSException('Invalid speed: %d [-10, 10]' % speed)
return self
def pitch(self, pitch):
if -10 <= pitch <= 10:
self._values["pitch"] = pitch
else:
raise RecaiusTTSException('Invalid pitch: %d [-10, 10]' % pitch)
return self
def depth(self, depth):
if -4 <= depth <= 4:
self._values["depth"] = depth
else:
raise RecaiusTTSException('Invalid depth: %d [-4, 4]' % depth)
return self
def volume(self, volume):
if -50 <= volume <= 50:
self._values["volume"] = volume
else:
raise RecaiusTTSException('Invalid volume: %d [-50, 50]' % volume)
return self
def speak(self, text, is_phonetic=False):
temp = 'temp.wav'
self.save_wav(text, temp, is_phonetic)
w = wave.open(temp)
p = pyaudio.PyAudio()
stream = p.open(
format=p.get_format_from_width(w.getsampwidth()),
channels=w.getnchannels(),
rate=w.getframerate(),
output=True)
chunk = 1024
data = w.readframes(chunk)
while data:
stream.write(data)
data = w.readframes(chunk)
stream.close()
p.terminate()
def save_wav(self, text, wave_file, is_phonetic=False):
if is_phonetic:
self._values['phonetic_text'] = text
else:
self._values['plain_text'] = text
response = self._text2speechwave(is_phonetic)
with open(wave_file, "wb") as fp:
fp.write(response.content)
def get_speaker_list(self):
temp_values = dict()
temp_values['id'] = self.recaius_id
temp_values['password'] = self.recaius_password
headers = {'Content-Type': 'application/json'}
data = json.dumps(temp_values)
data = data.encode('utf-8')
response = requests.post(TTS_URL + 'get_speaker_list',
data=data,
headers=headers,
proxies=self.proxies)
result = response.text
return result
def get_phonetic(self, plain_text, lang):
temp_values = dict()
temp_values['id'] = self.recaius_id
temp_values['password'] = self.recaius_password
temp_values['plain_text'] = plain_text
temp_values['lang'] = lang
headers = {'Content-Type': 'application/json'}
data = json.dumps(temp_values)
data = data.encode('utf-8')
response = requests.post(TTS_URL + 'plaintext2phonetictext',
data=data,
headers=headers,
proxies=self.proxies)
phonetic_text = response.text
return phonetic_text
def _text2speechwave(self, is_phonetic=False):
# check necessary parameters
if 'id' not in self._values:
raise RecaiusTTSException('Missing parameter: id')
if 'password' not in self._values:
raise RecaiusTTSException('Missing parameter: password')
if is_phonetic:
if 'phonetic_text' not in self._values:
raise RecaiusTTSException('Missing parameter: phonetic_text')
else:
if 'plain_text' not in self._values:
raise RecaiusTTSException('Missing parameter: plain_text')
if 'lang' not in self._values:
raise RecaiusTTSException('Missing parameter: lang')
if 'speaker_id' not in self._values:
raise RecaiusTTSException('Missing parameter: speaker_id')
if is_phonetic:
function_name = 'phonetictext2speechwave'
else:
function_name = 'plaintext2speechwave'
headers = {'Content-Type': 'application/json'}
data = json.dumps(self._values)
data = data.encode('utf-8')
response = requests.post(TTS_URL + function_name,
data=data,
headers=headers,
proxies=self.proxies)
return response
class RecaiusTTSException(Exception):
pass
|
aidiary/python-recaius
|
recaius/tts.py
|
Python
|
mit
| 6,847
|
[
"MOE"
] |
187cbd8e3eab5fcf95026cb6253797f424949b924341d2d378c8c05b6e620acf
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015-16 Red Hat, Inc.
# This file is part of the Infinity Note Compiler.
#
# The Infinity Note Compiler is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# The Infinity Note Compiler is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Infinity Note Compiler. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class VisitError(NotImplementedError):
def __init__(self, visitor, visitee):
NotImplementedError.__init__(
self,
"%s has no visitor suitable for %s" % (
visitor.__class__.__name__,
visitee.__class__.__name__))
class Visitable(object):
@classmethod
def get_visitfunc(cls, visitor):
func = getattr(visitor, "visit_" + cls.__name__.lower(), None)
if func is None:
for cls in cls.__bases__:
if cls is not Visitable and issubclass(cls, Visitable):
func = cls.get_visitfunc(visitor)
if func is not None:
break
return func
def accept(self, visitor):
# Find a suitable visitor
func = self.get_visitfunc(visitor)
if func is None:
raise VisitError(visitor, self)
# Visit any folded children first
if hasattr(self, "folded_children"):
for node in self.folded_children:
node.accept(visitor)
# Now visit ourself
return func(self)
|
gbenson/i8c
|
src/i8c/compiler/visitors.py
|
Python
|
lgpl-2.1
| 2,062
|
[
"VisIt"
] |
9c51595cae0839056d166f1e25cdadc37bfb63eb382fab0a1f09e7120a8d41f2
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow input/output utilities."""
import collections
import json
import math
import os
import numpy as np
import tensorflow.compat.v1 as tf
class Features(object):
"""Feature keys."""
# Waveform(s) of audio observed at receiver(s).
RECEIVER_AUDIO = 'receiver_audio'
# Images of each source at each microphone, including reverberation.
# Images are real valued with shape [sources, microphones, length].
SOURCE_IMAGES = 'source_images'
# Boolean diarization labels of shape (sources, length) which indicates
# whether a source is active or not. For nonexisting source, it is all zeros.
DIARIZATION_LABELS = 'diarization_labels'
# Speaker indices (global indices which are contiguous over all training data
# starting with 0) that are present in this meeting or meeting chunk with
# shape (sources,). If number of speakers present in the meeting is less
# than sources, for a non-existing speaker/source, the speaker index is
# set to -1. Note that, for a meeting sub-block, we still have all the
# speaker indices in the meeting even if not all the speakers are present
# in that meeting sub-block.
SPEAKER_INDEX = 'speaker_indices'
def get_inference_spec(num_receivers=1,
num_samples=None):
"""Returns a specification of features in tf.Examples in roomsim format."""
spec = {}
spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature(
[num_receivers, num_samples], tf.float32)
return spec
def get_roomsim_spec(num_sources,
num_receivers,
num_samples):
"""Returns a specification of features in tf.Examples in roomsim format.
Args:
num_sources: Expected number of sources.
num_receivers: Number of microphones in array.
num_samples: Expected length of sources in samples. 'None' for variable.
Returns:
Feature specifications suitable to pass to tf.parse_example.
"""
spec = {}
spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature(
[num_receivers, num_samples], tf.float32)
spec[Features.SOURCE_IMAGES] = tf.FixedLenFeature(
[num_sources, num_receivers, num_samples], tf.float32)
return spec
def placeholders_from_spec(feature_spec):
"""Returns placeholders compatible with a given feature spec."""
placeholders = {}
for key, feature in feature_spec.items():
placeholders[key] = tf.placeholder(dtype=feature.dtype,
shape=[1] + feature.shape,
name=key)
return placeholders
def _read_meeting_list(meeting_list, meeting_length_type):
"""Reads meeting list from json file to get necessary information.
Args:
meeting_list: A meeting list read from a json file.
meeting_length_type: One of 'maximum', 'minimum' or 'average'.
Since typically meeting lengths are not fixed, we can
set the training/eval length to the maximum, minimum or average meeting
length in the json file based on the value of this argument. We
eventually pad or clip individual meetings to attain the desired constant
meeting length in our data reading pipeline.
Returns:
num_meetings: Number of meetings.
max_num_spk_per_meeting: Maximum number of speakers in a meeting.
max_num_utt_per_spk: Maximum number of utterances per speaker.
max_dia_seg_per_utt: Maximum diarization segments per utterance.
max_utt_length: Maximum utterance length.
meeting_length: Meeting length that will be used.
speaker_ids: A list of speaker ids that appear in meetings.
"""
max_num_spk_per_meeting = 0
max_num_utt_per_meeting = 0
meeting_lengths = []
speaker_id_to_count = collections.defaultdict(int)
num_meetings = len(meeting_list)
total_spk = 0
total_utt = 0
max_utt_length = 0
max_num_utt_per_spk = 0
max_dia_seg_per_utt = 0
for one_meeting in meeting_list:
sources_start_end = one_meeting['utterance_start_end']
meeting_length = int(one_meeting['duration'])
num_utt_in_meeting = len(sources_start_end)
max_num_utt_per_meeting = max(max_num_utt_per_meeting, num_utt_in_meeting)
utt2spk = []
spk2wavs = collections.defaultdict(list)
spk_utt_idx = collections.defaultdict(int)
for start, end, spkid, wav_path in sources_start_end:
max_utt_length = max(max_utt_length, end - start)
utt2spk.append(spkid)
spk2wavs[spkid].append(wav_path)
speaker_id_to_count[spkid] += 1
spk_utt_idx[spkid] += 1
diarization_info = \
one_meeting['diarization_label'][spkid][spk_utt_idx[spkid] - 1]
num_seg_in_utt = len(diarization_info)
max_dia_seg_per_utt = max(max_dia_seg_per_utt, num_seg_in_utt)
speakers_in_meeting = list(set(utt2spk))
num_spk = len(speakers_in_meeting)
for spkid in speakers_in_meeting:
max_num_utt_per_spk = max(max_num_utt_per_spk,
len(set(spk2wavs[spkid])))
max_num_spk_per_meeting = max(max_num_spk_per_meeting, num_spk)
total_spk += num_spk
total_utt += num_utt_in_meeting
meeting_lengths.append(meeting_length)
if meeting_length_type == 'maximum':
meeting_length = int(math.ceil(np.max(meeting_lengths)))
elif meeting_length_type == 'minimum':
meeting_length = int(math.floor(np.min(meeting_lengths)))
elif meeting_length_type == 'average':
meeting_length = int(round(np.mean(meeting_lengths)))
elif isinstance(meeting_length_type, int):
meeting_length = meeting_length_type
else:
raise ValueError(f'Unknown meeting_length_type={meeting_length_type}')
speaker_ids = sorted(speaker_id_to_count.keys())
tf.logging.info('Read %s meetings from json file.', num_meetings)
tf.logging.info('Average number of speakers per meeting = %f.',
total_spk / num_meetings)
tf.logging.info('Average number of utterances per speaker = %f.',
total_utt / total_spk)
return (num_meetings, max_num_spk_per_meeting, max_num_utt_per_spk,
max_dia_seg_per_utt, max_utt_length,
meeting_length, speaker_ids)
def _pad_mics_tf(signal, new_mics):
"""Pads new mic channels to an input tensor and returns the updated tensor.
Args:
signal: A tf.tensor of shape (input_mics, samples)
new_mics: The number of new mic channels to be added (integer scalar tensor)
Returns:
padded_signal: A tf.tensor of shape (input_mics + new_mics, samples)
"""
# Take first new_mics channels and shift them by 1 sample.
new_inputs = tf.roll(signal[:new_mics, :], shift=1, axis=-1)
# Add noise 1e-3 times the RMS value in the signal.
noise_scale = 1e-3 * tf.sqrt(tf.reduce_mean(tf.square(new_inputs)))
new_inputs += noise_scale * tf.random.normal(tf.shape(new_inputs))
return tf.concat((signal, new_inputs), axis=0)
def json_to_dataset(json_file,
batch_size,
parallel_readers=tf.data.experimental.AUTOTUNE,
randomize_order=False,
num_examples=-1,
prefetch_buffer_size=tf.data.experimental.AUTOTUNE,
shuffle_buffer_size=5,
repeat=True,
num_mics=1,
sample_rate=16000,
use_relative_path=True,
meeting_length_type='maximum',
num_meeting_subdivisions=1,
sensor_noise_range=(0.0, 0.0)):
r"""Fetches features from a dictionary and source .wav files.
Args:
json_file: A json file containing meeting information.
batch_size: The number of examples to read.
parallel_readers: Number of dataset.map operations that should happen in
parallel.
randomize_order: Whether to randomly shuffle features.
num_examples: Limit number of examples to this value. Unlimited if -1.
prefetch_buffer_size: How many batches to prefecth.
shuffle_buffer_size: The size of the shuffle buffer.
repeat: If True, repeat the dataset.
num_mics: The expected number of mics in source wav files.
sample_rate: Sample rate of wav files read.
use_relative_path: If True, the path for .wav files is relative to the
json file, otherwise, the paths are absolute.
meeting_length_type: 'maximum', 'minimum' or 'average'. Can also specify
an integer value which is the length in samples, which will be used.
num_meeting_subdivisions: If > 1, chop the meeting in time into this
many chunks.
sensor_noise_range: Range of standard deviation for sensor noise. If
sensor_noise_range[1] <= 0.0, then no sensor noise is added. Otherwise,
white Gaussian sensor noise with uniformly random standard deviation
from the provided range is added as the first reference signal.
Returns:
A batch_size number of features constructed from wav files.
Raises:
ValueError if max_sources_override is less than assumed max number sources.
"""
tf.logging.info('Reading %s.', json_file)
with open(json_file, 'r') as f:
meeting_list = json.load(f)
(num_meetings, max_num_spk, max_num_utt_per_spk, max_dia_seg_per_utt,
max_utt_length, samples, speaker_id_list) = _read_meeting_list(
meeting_list, meeting_length_type)
tf.logging.info('Maximum number of speakers per meeting = %s', max_num_spk)
tf.logging.info('Maximum number of utterances per speaker = %s',
max_num_utt_per_spk)
tf.logging.info('Maximum diarization segments per utterance = %s',
max_dia_seg_per_utt)
tf.logging.info('Maximum utterance length in seconds = %s',
max_utt_length/sample_rate)
tf.logging.info('Used meeting length in seconds = %s', samples/sample_rate)
tf.logging.info('Number of speakers seen in all meetings = %s',
len(speaker_id_list))
tf.logging.info('Using %s parallel readers.', parallel_readers)
tf.logging.info('shuffle_buffer=%s, prefetch_buffer=%s, num_mics=%s, '
'randomize=%s.', shuffle_buffer_size, prefetch_buffer_size,
num_mics, randomize_order)
if use_relative_path:
base_path = os.path.dirname(json_file)
spkid2idx = {key: idx for idx, key in enumerate(speaker_id_list)}
def utterance_info_generator():
"""Yields utterance informations from each meeting.
Utterance info is in the form of a 6-tuple:
wav_path, diarization, spkidx, meeting_scale, start, gain.
"""
default_diarization = np.zeros((max_dia_seg_per_utt, 2), dtype=np.int32)
default_utt = ('0', default_diarization, -1, 0.0, 0, 0.0)
for one_meeting in meeting_list:
meeting_info = collections.defaultdict(list)
sources_start_end = one_meeting['utterance_start_end']
num_utt_in_meeting = len(sources_start_end)
spk_num_in_meeting = {}
new_spknum = 0
spkids_in_meeting = []
spk_utt_idx = collections.defaultdict(int)
meeting_scale = float(one_meeting['meeting_scale'])
for utt_idx in range(num_utt_in_meeting):
start, end, spkid, wav_path = sources_start_end[utt_idx]
spkidx = spkid2idx[spkid]
if start >= samples:
continue
if end >= samples:
end = samples
if spkidx in spk_num_in_meeting:
spknum = spk_num_in_meeting[spkidx]
else:
spknum = new_spknum
if spknum > max_num_spk:
continue
spkids_in_meeting.append(spkidx)
spk_num_in_meeting[spkidx] = spknum
new_spknum += 1
if use_relative_path:
wav_path = os.path.join(base_path, wav_path)
gain = one_meeting['utterance_gain'][utt_idx]
# Make diarization_labels array.
diarization = np.zeros((max_dia_seg_per_utt, 2), dtype=np.int32)
spk_utt_idx[spknum] += 1
diarization_info = \
one_meeting['diarization_label'][spkid][spk_utt_idx[spknum] - 1]
# Go over diarization segments in utterance.
for i, segment_st_end in enumerate(diarization_info):
segment_start, segment_end = segment_st_end
if segment_start >= samples:
continue
if segment_end > samples:
segment_end = samples
adjusted_start = segment_start - start
adjusted_end = segment_end - start
diarization[i, 0] = adjusted_start
diarization[i, 1] = adjusted_end
meeting_info[spknum].append((wav_path, diarization, spkidx,
meeting_scale, start, gain))
for spknum in range(max_num_spk):
if spknum in meeting_info:
for utt in range(max_num_utt_per_spk):
if utt < len(meeting_info[spknum]):
yield meeting_info[spknum][utt]
else:
yield default_utt
else:
for utt in range(max_num_utt_per_spk):
yield default_utt
utterance_info_list = list(utterance_info_generator())
# No need for the original meeting_list from now on.
del meeting_list
num_utterances = len(utterance_info_list)
tensor_shape = [(num_utterances, 1),
(num_utterances, max_dia_seg_per_utt, 2),
(num_utterances, 1),
(num_utterances, 1),
(num_utterances, 1),
(num_utterances, 1)]
tensor_type = [np.string_, np.int32, np.int32, np.float32,
np.int32, np.float32]
(wav_paths, diarizations, spkindices, meeting_scales, start_samples,
utterance_gains) = [np.reshape(
tensor, tensor_shape[i]).astype(tensor_type[i]) for i, tensor in
enumerate(list(zip(*utterance_info_list)))]
dataset = tf.data.Dataset.from_tensor_slices(
(wav_paths, diarizations, spkindices, meeting_scales, start_samples,
utterance_gains))
if repeat:
dataset = dataset.repeat()
if randomize_order:
# Randomize meeting order for each epoch through the dataset.
dataset = dataset.batch(max_num_spk * max_num_utt_per_spk)
dataset = dataset.shuffle(num_meetings)
dataset = dataset.flat_map(
lambda w, d, s, m, t, u: tf.data.Dataset.from_tensor_slices(
(w, d, s, m, t, u)))
# Read in wav files.
def decode_wav(wav):
audio_bytes = tf.read_file(wav)
waveform, _ = tf.audio.decode_wav(audio_bytes,
desired_samples=max_utt_length)
waveform = tf.transpose(waveform)
num_read_mics = tf.shape(waveform)[0]
waveform = tf.cond(num_read_mics >= num_mics,
lambda: waveform[:num_mics, :],
lambda: _pad_mics_tf(waveform, num_mics - num_read_mics))
waveform = tf.reshape(waveform, (num_mics, max_utt_length))
return waveform
def decode_wav_or_return_zeros(wav, gain=1.0):
return tf.cond(
tf.equal(wav, '0'),
lambda: tf.zeros((num_mics, max_utt_length), dtype=tf.float32),
lambda: gain * decode_wav(wav))
def utterance_reader(wav_path, diarization, spkidx, meet_scale, start, gain):
"""Reads wave file for utterance and scale it."""
utt_tensor = decode_wav_or_return_zeros(wav_path[0], gain=gain)
return utt_tensor, diarization, spkidx, meet_scale, start
# Sandwich heavy IO part between prefetch's.
dataset = dataset.prefetch(parallel_readers)
dataset = dataset.map(utterance_reader,
num_parallel_calls=parallel_readers)
dataset = dataset.prefetch(parallel_readers)
def pad_utterance(utt_tensor, diarization, spkidx, meeting_scale, start):
"""Pads utterance to meeting length.
Args:
utt_tensor: Utterance with shape (num_mics, max_utt_length).
diarization: Diarization with shape (max_dia_seg_per_utt, 2).
spkidx: Speaker index (global) for the utterance.
meeting_scale: Target meeting scale.
start: Start index of utterance in the meeting.
Returns:
utt_tensor_padded: Padded utt tensor (num_mics, samples + max_utt_length)
diarization_padded: Diarization updated using the start index.
spkidx: Speaker index passed unchanged.
meeting_scale: Target meeting scale passed unchanged.
"""
start = start[0]
end_paddings = samples - start
utt_tensor_padded = tf.pad(utt_tensor, ((0, 0), (start, end_paddings)))
diarization_padded = start + diarization
return utt_tensor_padded, diarization_padded, spkidx, meeting_scale
dataset = dataset.map(pad_utterance,
num_parallel_calls=parallel_readers)
dataset = dataset.batch(max_num_utt_per_spk)
def make_reference(utt_tensor, diarization, spkidx, meeting_scale):
"""Makes a reference from fixed length utterance tensors.
Args:
utt_tensor: Utterances with shape
(max_num_utt_per_spk, num_mics, samples + max_utt_len)
diarization: Diarization ranges with shape
(max_num_utt_per_spk, max_dia_seg_per_utt, 2).
spkidx: Speaker indices (repeated) with shape (max_num_utt_per_spk)
meeting_scale: Target meeting scale (repeated).
Returns:
reference: Meeting audio with shape (num_mics, samples)
diarization_labels: tf.bool with shape (samples)
spkidx: Scalar speaker index.
meeting_scale: Target meeting scale.
"""
reference_waveform = tf.reduce_sum(utt_tensor, axis=0)
reference_waveform = reference_waveform[:, :samples]
diarization = tf.reshape(diarization,
(max_num_utt_per_spk * max_dia_seg_per_utt, 2))
active_samples_list = [
tf.range(diarization[i, 0], diarization[i, 1]) for i in
range(max_num_utt_per_spk * max_dia_seg_per_utt)]
active_samples = tf.reshape(
tf.concat(active_samples_list, axis=0), (-1, 1))
dia_full_init = tf.zeros((samples + max_utt_length, 1), dtype=tf.int32)
dia_full = tf.tensor_scatter_add(
dia_full_init, active_samples, tf.ones(tf.shape(active_samples),
dtype=tf.int32))
dia_full = tf.cast(dia_full[:samples, 0], dtype=tf.bool)
spkidx = spkidx[0]
meeting_scale = meeting_scale[0]
return reference_waveform, dia_full, spkidx, meeting_scale
dataset = dataset.map(make_reference,
num_parallel_calls=parallel_readers)
dataset = dataset.batch(max_num_spk)
# If num_meeting_subdivisions > 1, split time-dependent meeting data in time
# into num_meeting_subdivisions equal chunks. Note that speaker ids and
# meeting_scale are repeated for each chunk.
if num_meeting_subdivisions > 1:
def chop_meeting_data(reference_waveforms, diarization_labels, speaker_ids,
meeting_scale, nsplit=num_meeting_subdivisions):
samples = tf.shape(reference_waveforms)[-1]
new_samples = nsplit * (samples // nsplit)
reference_waveforms = tf.stack(
tf.split(reference_waveforms[..., :new_samples],
nsplit, axis=-1), axis=0)
diarization_labels = tf.stack(
tf.split(diarization_labels[..., :new_samples],
nsplit, axis=-1), axis=0)
speaker_ids = tf.reshape(speaker_ids, (1, max_num_spk))
speaker_ids = tf.broadcast_to(speaker_ids, (nsplit, max_num_spk))
meeting_scale = meeting_scale[0] * tf.ones((nsplit, max_num_spk))
return tf.data.Dataset.from_tensor_slices((reference_waveforms,
diarization_labels,
speaker_ids,
meeting_scale))
dataset = dataset.flat_map(chop_meeting_data)
samples = (samples // num_meeting_subdivisions)
# Build mixture and sources waveforms.
def combine_mixture_and_sources(reference_waveforms, diarization_labels,
speaker_ids, meeting_scale):
# waveforms has shape (num_sources, num_mics, num_samples).
speaker_ids = tf.reshape(speaker_ids, (max_num_spk,))
meeting_scale = meeting_scale[0]
mixture_waveform = tf.reduce_sum(reference_waveforms, axis=0)
current_mixture_scale = tf.reduce_max(tf.abs(mixture_waveform))
# Note that when meetings are chopped, we cannot apply a meeting level
# scale. Instead, we apply the scale in the chunk level so that each
# chunk has a maximum scale equal to the meeting_scale. However, we should
# not apply any gain to an all noise chunk to avoid amplifying the noise,
# so we try not to scale those chunks by checking the current_mixture_scale
# value.
scale_refs = tf.cond(current_mixture_scale > 0.005,
lambda: meeting_scale / current_mixture_scale,
lambda: 1.0)
reference_waveforms *= scale_refs
num_sources = max_num_spk
if sensor_noise_range[1] > 0.0:
num_sources += 1
sensor_noise_gain = tf.random.uniform((), minval=sensor_noise_range[0],
maxval=sensor_noise_range[1])
sensor_noise = sensor_noise_gain * tf.random.normal(
(1, num_mics, samples))
reference_waveforms = tf.concat(
(sensor_noise, reference_waveforms), axis=0)
mixture_waveform = tf.reduce_sum(reference_waveforms, axis=0)
reference_waveforms.set_shape((num_sources, num_mics, samples))
mixture_waveform.set_shape((num_mics, samples))
diarization_labels.set_shape((max_num_spk, samples))
speaker_ids.set_shape((max_num_spk,))
return {'receiver_audio': mixture_waveform,
'source_images': reference_waveforms,
'diarization_labels': diarization_labels,
'speaker_indices': speaker_ids,
}
dataset = dataset.map(combine_mixture_and_sources,
num_parallel_calls=parallel_readers)
if randomize_order and num_meeting_subdivisions > 1:
# It would be good to shuffle examples to avoid having all examples
# coming from a single meeting when we split a meeting.
dataset = dataset.shuffle(shuffle_buffer_size * num_meeting_subdivisions)
dataset = dataset.prefetch(prefetch_buffer_size)
dataset = dataset.take(num_examples)
dataset = dataset.batch(batch_size, drop_remainder=True)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def input_fn(params):
"""An input function that uses params['feature_spec'].
Args:
params: A dictionary of experiment params.
Returns:
Features specified by params['feature_spec']. If 'inference' exists and is
True in params, then placeholders will be returned based on the spec in
params['inference_spec'], otherwise a dataset of examples read from
params['input_data'] will be returned.
"""
if params.get('inference', False):
feature_spec = params['inference_spec']
with tf.variable_scope('input_audio'):
return placeholders_from_spec(feature_spec)
else:
json_file = params.get('input_data', None)
io_params = params.get('io_params', {})
batch_size = params.get('batch_size', None)
randomize_order = params.get('randomize_order', False)
io_params['randomize_order'] = randomize_order
return json_to_dataset(json_file,
batch_size,
**io_params)
|
google-research/sound-separation
|
models/train/data_meeting_io.py
|
Python
|
apache-2.0
| 23,738
|
[
"Gaussian"
] |
d1b8a627651bb6c0f3d2b36e72c496cf3ef04518095b011e9c8dc27c30bc97e5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import collections
import numbers
import string
from itertools import combinations_with_replacement, product
import os
import re
from collections import defaultdict
from monty.serialization import loadfn
from functools import total_ordering
from monty.fractions import gcd, gcd_float
from pymatgen.core.periodic_table import get_el_sp, Element, Specie, DummySpecie
from pymatgen.util.string import formula_double_format
from monty.json import MSONable
from pymatgen.core.units import unitized
"""
This module implements a Composition class to represent compositions,
and a ChemicalPotential class to represent potentials.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 10, 2012"
@total_ordering
class Composition(collections.abc.Hashable, collections.abc.Mapping, MSONable):
"""
Represents a Composition, which is essentially a {element:amount} mapping
type. Composition is written to be immutable and hashable,
unlike a standard Python dict.
Note that the key can be either an Element or a Specie. Elements and Specie
are treated differently. i.e., a Fe2+ is not the same as a Fe3+ Specie and
would be put in separate keys. This differentiation is deliberate to
support using Composition to determine the fraction of a particular Specie.
Works almost completely like a standard python dictionary, except that
__getitem__ is overridden to return 0 when an element is not found.
(somewhat like a defaultdict, except it is immutable).
Also adds more convenience methods relevant to compositions, e.g.,
get_fraction.
It should also be noted that many Composition related functionality takes
in a standard string as a convenient input. For example,
even though the internal representation of a Fe2O3 composition is
{Element("Fe"): 2, Element("O"): 3}, you can obtain the amount of Fe
simply by comp["Fe"] instead of the more verbose comp[Element("Fe")].
>>> comp = Composition("LiFePO4")
>>> comp.get_atomic_fraction(Element("Li"))
0.14285714285714285
>>> comp.num_atoms
7.0
>>> comp.reduced_formula
'LiFePO4'
>>> comp.formula
'Li1 Fe1 P1 O4'
>>> comp.get_wt_fraction(Element("Li"))
0.04399794666951898
>>> comp.num_atoms
7.0
"""
"""
Tolerance in distinguishing different composition amounts.
1e-8 is fairly tight, but should cut out most floating point arithmetic
errors.
"""
amount_tolerance = 1e-8
"""
Special formula handling for peroxides and certain elements. This is so
that formula output does not write LiO instead of Li2O2 for example.
"""
special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2",
"HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2",
"O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2",
"H": "H2"}
oxi_prob = None # prior probability of oxidation used by oxi_state_guesses
def __init__(self, *args, strict=False, **kwargs): # allow_negative=False
"""
Very flexible Composition construction, similar to the built-in Python
dict(). Also extended to allow simple string init.
Args:
Any form supported by the Python built-in dict() function.
1. A dict of either {Element/Specie: amount},
{string symbol:amount}, or {atomic number:amount} or any mixture
of these. E.g., {Element("Li"):2 ,Element("O"):1},
{"Li":2, "O":1}, {3:2, 8:1} all result in a Li2O composition.
2. Keyword arg initialization, similar to a dict, e.g.,
Composition(Li = 2, O = 1)
In addition, the Composition constructor also allows a single
string as an input formula. E.g., Composition("Li2O").
strict: Only allow valid Elements and Species in the Composition.
allow_negative: Whether to allow negative compositions. This
argument must be popped from the \\*\\*kwargs due to \\*args
ambiguity.
"""
self.allow_negative = kwargs.pop('allow_negative', False)
# it's much faster to recognize a composition and use the elmap than
# to pass the composition to dict()
if len(args) == 1 and isinstance(args[0], Composition):
elmap = args[0]
elif len(args) == 1 and isinstance(args[0], str):
elmap = self._parse_formula(args[0])
else:
elmap = dict(*args, **kwargs)
elamt = {}
self._natoms = 0
for k, v in elmap.items():
if v < -Composition.amount_tolerance and not self.allow_negative:
raise CompositionError("Amounts in Composition cannot be "
"negative!")
if abs(v) >= Composition.amount_tolerance:
elamt[get_el_sp(k)] = v
self._natoms += abs(v)
self._data = elamt
if strict and not self.valid:
raise ValueError("Composition is not valid, contains: {}"
.format(", ".join(map(str, self.elements))))
def __getitem__(self, item):
try:
sp = get_el_sp(item)
return self._data.get(sp, 0)
except ValueError as ex:
raise TypeError("Invalid key {}, {} for Composition\n"
"ValueError exception:\n{}".format(item,
type(item), ex))
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.keys().__iter__()
def __contains__(self, item):
try:
sp = get_el_sp(item)
return sp in self._data
except ValueError as ex:
raise TypeError("Invalid key {}, {} for Composition\n"
"ValueError exception:\n{}".format(item,
type(item), ex))
def __eq__(self, other):
# elements with amounts < Composition.amount_tolerance don't show up
# in the elmap, so checking len enables us to only check one
# compositions elements
if len(self) != len(other):
return False
for el, v in self.items():
if abs(v - other[el]) > Composition.amount_tolerance:
return False
return True
def __ge__(self, other):
"""
Defines >= for Compositions. Should ONLY be used for defining a sort
order (the behavior is probably not what you'd expect)
"""
for el in sorted(set(self.elements + other.elements)):
if other[el] - self[el] >= Composition.amount_tolerance:
return False
elif self[el] - other[el] >= Composition.amount_tolerance:
return True
return True
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
"""
Adds two compositions. For example, an Fe2O3 composition + an FeO
composition gives a Fe3O4 composition.
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] += v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __sub__(self, other):
"""
Subtracts two compositions. For example, an Fe2O3 composition - an FeO
composition gives an FeO2 composition.
Raises:
CompositionError if the subtracted composition is greater than the
original composition in any of its elements, unless allow_negative
is True
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] -= v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __mul__(self, other):
"""
Multiply a Composition by an integer or a float.
Fe2O3 * 4 -> Fe8O12
"""
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] * other for el in self},
allow_negative=self.allow_negative)
__rmul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] / other for el in self},
allow_negative=self.allow_negative)
__div__ = __truediv__
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between
Compositions with different elements.
"""
hashcode = 0
for el, amt in self.items():
if abs(amt) > Composition.amount_tolerance:
hashcode += el.Z
return hashcode
@property
def average_electroneg(self):
return sum((el.X * abs(amt) for el, amt in self.items())) / \
self.num_atoms
@property
def total_electrons(self):
return sum((el.Z * abs(amt) for el, amt in self.items()))
def almost_equals(self, other, rtol=0.1, atol=1e-8):
"""
Returns true if compositions are equal within a tolerance.
Args:
other (Composition): Other composition to check
rtol (float): Relative tolerance
atol (float): Absolute tolerance
"""
sps = set(self.elements + other.elements)
for sp in sps:
a = self[sp]
b = other[sp]
tol = atol + rtol * (abs(a) + abs(b)) / 2
if abs(b - a) > tol:
return False
return True
@property
def is_element(self):
"""
True if composition is for an element.
"""
return len(self) == 1
def copy(self):
return Composition(self, allow_negative=self.allow_negative)
@property
def formula(self):
"""
Returns a formula string, with elements sorted by electronegativity,
e.g., Li4 Fe4 P4 O16.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda sym: get_el_sp(sym).X)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def alphabetical_formula(self):
"""
Returns a formula string, with elements sorted by alphabetically
e.g., Fe4 Li4 O16 P4.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys())
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def iupac_formula(self):
"""
Returns a formula string, with elements sorted by the iupac
electronegativity ordering defined in Table VI of "Nomenclature of
Inorganic Chemistry (IUPAC Recommendations 2005)". This ordering
effectively follows the groups and rows of the periodic table, except
the Lanthanides, Actanides and hydrogen. Polyanions are still determined
based on the true electronegativity of the elements.
e.g. CH2(SO4)2
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(),
key=lambda s: get_el_sp(s).iupac_ordering)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def element_composition(self):
"""
Returns the composition replacing any species by the corresponding
element.
"""
return Composition(self.get_el_amt_dict(),
allow_negative=self.allow_negative)
@property
def fractional_composition(self):
"""
Returns the normalized composition which the number of species sum to
1.
Returns:
Normalized composition which the number of species sum to 1.
"""
return self / self._natoms
@property
def reduced_composition(self):
"""
Returns the reduced composition,i.e. amounts normalized by greatest
common denominator. e.g., Composition("FePO4") for
Composition("Fe4P4O16").
"""
return self.get_reduced_composition_and_factor()[0]
def get_reduced_composition_and_factor(self):
"""
Calculates a reduced composition and factor.
Returns:
A normalized composition and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (Composition("LiFePO4"), 4).
"""
factor = self.get_reduced_formula_and_factor()[1]
return self / factor, factor
def get_reduced_formula_and_factor(self, iupac_ordering=False):
"""
Calculates a reduced formula and factor.
Args:
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (LiFePO4, 4).
"""
all_int = all(abs(x - round(x)) < Composition.amount_tolerance
for x in self.values())
if not all_int:
return self.formula.replace(" ", ""), 1
d = {k: int(round(v)) for k, v in self.get_el_amt_dict().items()}
(formula, factor) = reduce_formula(
d, iupac_ordering=iupac_ordering)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor
def get_integer_formula_and_factor(self, max_denominator=10000,
iupac_ordering=False):
"""
Calculates an integer formula and factor.
Args:
max_denominator (int): all amounts in the el:amt dict are
first converted to a Fraction with this maximum denominator
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li0.5O0.25 returns (Li2O, 0.25). O0.25 returns (O2, 0.125)
"""
el_amt = self.get_el_amt_dict()
g = gcd_float(list(el_amt.values()), 1 / max_denominator)
d = {k: round(v / g) for k, v in el_amt.items()}
(formula, factor) = reduce_formula(
d, iupac_ordering=iupac_ordering)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor * g
@property
def reduced_formula(self):
"""
Returns a pretty normalized formula, i.e., LiFePO4 instead of
Li4Fe4P4O16.
"""
return self.get_reduced_formula_and_factor()[0]
@property
def hill_formula(self):
c = self.element_composition
elements = sorted([el.symbol for el in c.keys()])
if "C" in elements:
elements = ["C"] + [el for el in elements if el != "C"]
formula = ["%s%s" % (el, formula_double_format(c[el]) if c[el] != 1 else "")
for el in elements]
return " ".join(formula)
@property
def elements(self):
"""
Returns view of elements in Composition.
"""
return list(self.keys())
def __str__(self):
return " ".join([
"{}{}".format(k, formula_double_format(v, ignore_ones=False))
for k, v in self.as_dict().items()])
@property
def num_atoms(self):
"""
Total number of atoms in Composition. For negative amounts, sum
of absolute values
"""
return self._natoms
@property
@unitized("amu")
def weight(self):
"""
Total molecular weight of Composition
"""
return sum([amount * el.atomic_mass
for el, amount in self.items()])
def get_atomic_fraction(self, el):
"""
Calculate atomic fraction of an Element or Specie.
Args:
el (Element/Specie): Element or Specie to get fraction for.
Returns:
Atomic fraction for element el in Composition
"""
return abs(self[el]) / self._natoms
def get_wt_fraction(self, el):
"""
Calculate weight fraction of an Element or Specie.
Args:
el (Element/Specie): Element or Specie to get fraction for.
Returns:
Weight fraction for element el in Composition
"""
return get_el_sp(el).atomic_mass * abs(self[el]) / self.weight
def contains_element_type(self, category):
"""
Check if Composition contains any elements matching a given category.
Args:
category (str): one of "noble_gas", "transition_metal",
"post_transition_metal", "rare_earth_metal", "metal", "metalloid",
"alkali", "alkaline", "halogen", "chalcogen", "lanthanoid",
"actinoid", "quadrupolar", "s-block", "p-block", "d-block", "f-block"
Returns:
True if any elements in Composition match category, otherwise False
"""
allowed_categories = ("noble_gas", "transition_metal", "post_transition_metal",
"rare_earth_metal", "metal", "metalloid", "alkali",
"alkaline", "halogen", "chalcogen", "lanthanoid",
"actinoid", "quadrupolar", "s-block", "p-block",
"d-block", "f-block")
if category not in allowed_categories:
raise ValueError("Please pick a category from: {}".format(", ".join(allowed_categories)))
if "block" in category:
return any([category[0] in el.block for el in self.elements])
else:
return any([getattr(el, "is_{}".format(category)) for el in self.elements])
def _parse_formula(self, formula):
"""
Args:
formula (str): A string formula, e.g. Fe2O3, Li3Fe2(PO4)3
Returns:
Composition with that formula.
Notes:
In the case of Metallofullerene formula (e.g. Y3N@C80),
the @ mark will be dropped and passed to parser.
"""
# for Metallofullerene like "Y3N@C80"
formula = formula.replace("@", "")
def get_sym_dict(f, factor):
sym_dict = collections.defaultdict(float)
for m in re.finditer(r"([A-Z][a-z]*)\s*([-*\.\d]*)", f):
el = m.group(1)
amt = 1
if m.group(2).strip() != "":
amt = float(m.group(2))
sym_dict[el] += amt * factor
f = f.replace(m.group(), "", 1)
if f.strip():
raise CompositionError("{} is an invalid formula!".format(f))
return sym_dict
m = re.search(r"\(([^\(\)]+)\)\s*([\.\d]*)", formula)
if m:
factor = 1
if m.group(2) != "":
factor = float(m.group(2))
unit_sym_dict = get_sym_dict(m.group(1), factor)
expanded_sym = "".join(["{}{}".format(el, amt)
for el, amt in unit_sym_dict.items()])
expanded_formula = formula.replace(m.group(), expanded_sym)
return self._parse_formula(expanded_formula)
return get_sym_dict(formula, 1)
@property
def anonymized_formula(self):
"""
An anonymized formula. Unique species are arranged in ordering of
increasing amounts and assigned ascending alphabets. Useful for
prototyping formulas. For example, all stoichiometric perovskites have
anonymized_formula ABC3.
"""
reduced = self.element_composition
if all(x == int(x) for x in self.values()):
reduced /= gcd(*(int(i) for i in self.values()))
anon = ""
for e, amt in zip(string.ascii_uppercase, sorted(reduced.values())):
if amt == 1:
amt_str = ""
elif abs(amt % 1) < 1e-8:
amt_str = str(int(amt))
else:
amt_str = str(amt)
anon += ("{}{}".format(e, amt_str))
return anon
@property
def chemical_system(self):
"""
Get the chemical system of a Composition, for example "O-Si" for
SiO2. Chemical system is a string of a list of elements
sorted alphabetically and joined by dashes, by convention for use
in database keys.
"""
return "-".join(sorted([str(el) for el in self.elements]))
@property
def valid(self):
"""
Returns True if Composition contains valid elements or species and
False if the Composition contains any dummy species.
"""
return not any([isinstance(el, DummySpecie) for el in self.elements])
def __repr__(self):
return "Comp: " + self.formula
@classmethod
def from_dict(cls, d):
"""
Creates a composition from a dict generated by as_dict(). Strictly not
necessary given that the standard constructor already takes in such an
input, but this method preserves the standard pymatgen API of having
from_dict methods to reconstitute objects generated by as_dict(). Allows
for easier introspection.
Args:
d (dict): {symbol: amount} dict.
"""
return cls(d)
def get_el_amt_dict(self):
"""
Returns:
Dict with element symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d = collections.defaultdict(float)
for e, a in self.items():
d[e.symbol] += a
return d
def as_dict(self):
"""
Returns:
dict with species symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d = collections.defaultdict(float)
for e, a in self.items():
d[str(e)] += a
return d
@property
def to_reduced_dict(self):
"""
Returns:
Dict with element symbol and reduced amount e.g.,
{"Fe": 2.0, "O":3.0}
"""
c = Composition(self.reduced_formula)
return c.as_dict()
@property
def to_data_dict(self):
"""
Returns:
A dict with many keys and values relating to Composition/Formula,
including reduced_cell_composition, unit_cell_composition,
reduced_cell_formula, elements and nelements.
"""
return {"reduced_cell_composition": self.to_reduced_dict,
"unit_cell_composition": self.as_dict(),
"reduced_cell_formula": self.reduced_formula,
"elements": list(self.as_dict().keys()),
"nelements": len(self.as_dict().keys())}
def oxi_state_guesses(self, oxi_states_override=None, target_charge=0,
all_oxi_states=False, max_sites=None):
"""
Checks if the composition is charge-balanced and returns back all
charge-balanced oxidation state combinations. Composition must have
integer values. Note that more num_atoms in the composition gives
more degrees of freedom. e.g., if possible oxidation states of
element X are [2,4] and Y are [-3], then XY is not charge balanced
but X2Y2 is. Results are returned from most to least probable based
on ICSD statistics. Use max_sites to improve performance if needed.
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
A list of dicts - each dict reports an element symbol and average
oxidation state across all sites in that composition. If the
composition is not charge balanced, an empty list is returned.
"""
return self._get_oxid_state_guesses(all_oxi_states, max_sites, oxi_states_override, target_charge)[0]
def add_charges_from_oxi_state_guesses(self,
oxi_states_override=None,
target_charge=0,
all_oxi_states=False,
max_sites=None):
"""
Assign oxidation states basedon guessed oxidation states.
See `oxi_state_guesses` for an explanation of how oxidation states are
guessed. This operation uses the set of oxidation states for each site
that were determined to be most likley from the oxidation state guessing
routine.
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
Composition, where the elements are assigned oxidation states based
on the results form guessing oxidation states. If no oxidation state
is possible, returns a Composition where all oxidation states are 0.
"""
_, oxidation_states = self._get_oxid_state_guesses(
all_oxi_states, max_sites, oxi_states_override, target_charge)
# Special case: No charged compound is possible
if len(oxidation_states) == 0:
return Composition(dict((Specie(e, 0), f) for e, f in self.items()))
# Generate the species
species = []
for el, charges in oxidation_states[0].items():
species.extend([Specie(el, c) for c in charges])
# Return the new object
return Composition(collections.Counter(species))
def _get_oxid_state_guesses(self, all_oxi_states, max_sites,
oxi_states_override, target_charge):
"""
Utility operation for guessing oxidation states.
See `oxi_state_guesses` for full details. This operation does the
calculation of the most likely oxidation states
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
A list of dicts - each dict reports an element symbol and average
oxidation state across all sites in that composition. If the
composition is not charge balanced, an empty list is returned.
A list of dicts - each dict maps the element symbol to a list of
oxidation states for each site of that element. For example, Fe3O4 could
return a list of [2,2,2,3,3,3] for the oxidation states of If the composition
is
"""
comp = self.copy()
# reduce Composition if necessary
if max_sites and max_sites < 0:
comp = self.reduced_composition
if max_sites < -1 and comp.num_atoms > abs(max_sites):
raise ValueError(
"Composition {} cannot accommodate max_sites "
"setting!".format(comp))
elif max_sites and comp.num_atoms > max_sites:
reduced_comp, reduced_factor = self. \
get_reduced_composition_and_factor()
if reduced_factor > 1:
reduced_comp *= max(1, int(max_sites / reduced_comp.num_atoms))
comp = reduced_comp # as close to max_sites as possible
if comp.num_atoms > max_sites:
raise ValueError("Composition {} cannot accommodate max_sites "
"setting!".format(comp))
# Load prior probabilities of oxidation states, used to rank solutions
if not Composition.oxi_prob:
module_dir = os.path.join(os.path.
dirname(os.path.abspath(__file__)))
all_data = loadfn(os.path.join(module_dir, "..",
"analysis", "icsd_bv.yaml"))
Composition.oxi_prob = {Specie.from_string(sp): data
for sp, data in
all_data["occurrence"].items()}
oxi_states_override = oxi_states_override or {}
# assert: Composition only has integer amounts
if not all(amt == int(amt) for amt in comp.values()):
raise ValueError("Charge balance analysis requires integer "
"values in Composition!")
# for each element, determine all possible sum of oxidations
# (taking into account nsites for that particular element)
el_amt = comp.get_el_amt_dict()
els = el_amt.keys()
el_sums = [] # matrix: dim1= el_idx, dim2=possible sums
el_sum_scores = defaultdict(set) # dict of el_idx, sum -> score
el_best_oxid_combo = {} # dict of el_idx, sum -> oxid combo with best score
for idx, el in enumerate(els):
el_sum_scores[idx] = {}
el_best_oxid_combo[idx] = {}
el_sums.append([])
if oxi_states_override.get(el):
oxids = oxi_states_override[el]
elif all_oxi_states:
oxids = Element(el).oxidation_states
else:
oxids = Element(el).icsd_oxidation_states or \
Element(el).oxidation_states
# get all possible combinations of oxidation states
# and sum each combination
for oxid_combo in combinations_with_replacement(oxids,
int(el_amt[el])):
# List this sum as a possible option
oxid_sum = sum(oxid_combo)
if oxid_sum not in el_sums[idx]:
el_sums[idx].append(oxid_sum)
# Determine how probable is this combo?
score = sum([Composition.oxi_prob.get(Specie(el, o), 0) for
o in oxid_combo])
# If it is the most probable combo for a certain sum,
# store the combination
if oxid_sum not in el_sum_scores[idx] or score > el_sum_scores[idx].get(oxid_sum, 0):
el_sum_scores[idx][oxid_sum] = score
el_best_oxid_combo[idx][oxid_sum] = oxid_combo
# Determine which combination of oxidation states for each element
# is the most probable
all_sols = [] # will contain all solutions
all_oxid_combo = [] # will contain the best combination of oxidation states for each site
all_scores = [] # will contain a score for each solution
for x in product(*el_sums):
# each x is a trial of one possible oxidation sum for each element
if sum(x) == target_charge: # charge balance condition
el_sum_sol = dict(zip(els, x)) # element->oxid_sum
# normalize oxid_sum by amount to get avg oxid state
sol = {el: v / el_amt[el] for el, v in el_sum_sol.items()}
all_sols.append(sol) # add the solution to the list of solutions
# determine the score for this solution
score = 0
for idx, v in enumerate(x):
score += el_sum_scores[idx][v]
all_scores.append(score)
# collect the combination of oxidation states for each site
all_oxid_combo.append(dict((e,el_best_oxid_combo[idx][v]) for idx, (e,v) in enumerate(zip(els,x))))
# sort the solutions by highest to lowest score
if len(all_scores) > 0:
all_sols, all_oxid_combo = zip(*[(y, x) for (z, y, x) in sorted(zip(all_scores, all_sols, all_oxid_combo),
key=lambda pair: pair[0],
reverse=True)])
return all_sols, all_oxid_combo
@staticmethod
def ranked_compositions_from_indeterminate_formula(fuzzy_formula,
lock_if_strict=True):
"""
Takes in a formula where capitilization might not be correctly entered,
and suggests a ranked list of potential Composition matches.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations
lock_if_strict (bool): If true, a properly entered formula will
only return the one correct interpretation. For example,
"Co1" will only return "Co1" if true, but will return both
"Co1" and "C1 O1" if false.
Returns:
A ranked list of potential Composition matches
"""
#if we have an exact match and the user specifies lock_if_strict, just
#return the exact match!
if lock_if_strict:
#the strict composition parsing might throw an error, we can ignore
#it and just get on with fuzzy matching
try:
comp = Composition(fuzzy_formula)
return [comp]
except (CompositionError, ValueError):
pass
all_matches = Composition._comps_from_fuzzy_formula(fuzzy_formula)
#remove duplicates
all_matches = list(set(all_matches))
#sort matches by rank descending
all_matches = sorted(all_matches,
key=lambda match: match[1], reverse=True)
all_matches = [m[0] for m in all_matches]
return all_matches
@staticmethod
def _comps_from_fuzzy_formula(fuzzy_formula, m_dict={}, m_points=0,
factor=1):
"""
A recursive helper method for formula parsing that helps in
interpreting and ranking indeterminate formulas.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations.
m_dict (dict): A symbol:amt dictionary from the previously parsed
formula.
m_points: Number of points gained from the previously parsed
formula.
factor: Coefficient for this parse, e.g. (PO4)2 will feed in PO4
as the fuzzy_formula with a coefficient of 2.
Returns:
A list of tuples, with the first element being a Composition and
the second element being the number of points awarded that
Composition intepretation.
"""
def _parse_chomp_and_rank(m, f, m_dict, m_points):
"""
A helper method for formula parsing that helps in interpreting and
ranking indeterminate formulas
Author: Anubhav Jain
Args:
m: A regex match, with the first group being the element and
the second group being the amount
f: The formula part containing the match
m_dict: A symbol:amt dictionary from the previously parsed
formula
m_points: Number of points gained from the previously parsed
formula
Returns:
A tuple of (f, m_dict, points) where m_dict now contains data
from the match and the match has been removed (chomped) from
the formula f. The "goodness" of the match determines the
number of points returned for chomping. Returns
(None, None, None) if no element could be found...
"""
points = 0
# Points awarded if the first element of the element is correctly
# specified as a capital
points_first_capital = 100
# Points awarded if the second letter of the element is correctly
# specified as lowercase
points_second_lowercase = 100
#get element and amount from regex match
el = m.group(1)
if len(el) > 2 or len(el) < 1:
raise CompositionError("Invalid element symbol entered!")
amt = float(m.group(2)) if m.group(2).strip() != "" else 1
#convert the element string to proper [uppercase,lowercase] format
#and award points if it is already in that format
char1 = el[0]
char2 = el[1] if len(el) > 1 else ""
if char1 == char1.upper():
points += points_first_capital
if char2 and char2 == char2.lower():
points += points_second_lowercase
el = char1.upper() + char2.lower()
#if it's a valid element, chomp and add to the points
if Element.is_valid_symbol(el):
if el in m_dict:
m_dict[el] += amt * factor
else:
m_dict[el] = amt * factor
return f.replace(m.group(), "", 1), m_dict, m_points + points
#else return None
return None, None, None
fuzzy_formula = fuzzy_formula.strip()
if len(fuzzy_formula) == 0:
# The entire formula has been parsed into m_dict. Return the
# corresponding Composition and number of points
if m_dict:
yield (Composition.from_dict(m_dict), m_points)
else:
# if there is a parenthesis, remove it and match the remaining stuff
# with the appropriate factor
for mp in re.finditer(r"\(([^\(\)]+)\)([\.\d]*)", fuzzy_formula):
mp_points = m_points
mp_form = fuzzy_formula.replace(mp.group(), " ", 1)
mp_dict = dict(m_dict)
mp_factor = 1 if mp.group(2) == "" else float(mp.group(2))
# Match the stuff inside the parenthesis with the appropriate
# factor
for match in \
Composition._comps_from_fuzzy_formula(mp.group(1),
mp_dict,
mp_points,
factor=mp_factor):
only_me = True
# Match the stuff outside the parentheses and return the
# sum.
for match2 in \
Composition._comps_from_fuzzy_formula(mp_form,
mp_dict,
mp_points,
factor=1):
only_me = False
yield (match[0] + match2[0], match[1] + match2[1])
# if the stuff inside the parenthesis is nothing, then just
# return the stuff inside the parentheses
if only_me:
yield match
return
# try to match the single-letter elements
m1 = re.match(r"([A-z])([\.\d]*)", fuzzy_formula)
if m1:
m_points1 = m_points
m_form1 = fuzzy_formula
m_dict1 = dict(m_dict)
(m_form1, m_dict1, m_points1) = \
_parse_chomp_and_rank(m1, m_form1, m_dict1, m_points1)
if m_dict1:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form1,
m_dict1,
m_points1,
factor):
yield match
#try to match two-letter elements
m2 = re.match(r"([A-z]{2})([\.\d]*)", fuzzy_formula)
if m2:
m_points2 = m_points
m_form2 = fuzzy_formula
m_dict2 = dict(m_dict)
(m_form2, m_dict2, m_points2) = \
_parse_chomp_and_rank(m2, m_form2, m_dict2, m_points2)
if m_dict2:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form2, m_dict2,
m_points2,
factor):
yield match
def reduce_formula(sym_amt, iupac_ordering=False):
"""
Helper method to reduce a sym_amt dict to a reduced formula and factor.
Args:
sym_amt (dict): {symbol: amount}.
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
(reduced_formula, factor).
"""
syms = sorted(sym_amt.keys(), key=lambda x: [get_el_sp(x).X, x])
syms = list(filter(
lambda x: abs(sym_amt[x]) > Composition.amount_tolerance, syms))
factor = 1
# Enforce integers for doing gcd.
if all((int(i) == i for i in sym_amt.values())):
factor = abs(gcd(*(int(i) for i in sym_amt.values())))
polyanion = []
# if the composition contains a poly anion
if len(syms) >= 3 and get_el_sp(syms[-1]).X - get_el_sp(syms[-2]).X < 1.65:
poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor
for i in [-2, -1]}
(poly_form, poly_factor) = reduce_formula(
poly_sym_amt, iupac_ordering=iupac_ordering)
if poly_factor != 1:
polyanion.append("({}){}".format(poly_form, int(poly_factor)))
syms = syms[:len(syms) - 2 if polyanion else len(syms)]
if iupac_ordering:
syms = sorted(syms,
key=lambda x: [get_el_sp(x).iupac_ordering, x])
reduced_form = []
for s in syms:
normamt = sym_amt[s] * 1.0 / factor
reduced_form.append(s)
reduced_form.append(formula_double_format(normamt))
reduced_form = "".join(reduced_form + polyanion)
return reduced_form, factor
class CompositionError(Exception):
"""Exception class for composition errors"""
pass
class ChemicalPotential(dict, MSONable):
"""
Class to represent set of chemical potentials. Can be:
multiplied/divided by a Number
multiplied by a Composition (returns an energy)
added/subtracted with other ChemicalPotentials.
"""
def __init__(self, *args, **kwargs):
"""
Args:
*args, **kwargs: any valid dict init arguments
"""
d = dict(*args, **kwargs)
super().__init__((get_el_sp(k), v)
for k, v in d.items())
if len(d) != len(self):
raise ValueError("Duplicate potential specified")
def __mul__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v * other for k, v in self.items()})
else:
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v / other for k, v in self.items()})
else:
return NotImplemented
__div__ = __truediv__
def __sub__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) - other.get(e, 0)
for e in els})
else:
return NotImplemented
def __add__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) + other.get(e, 0)
for e in els})
else:
return NotImplemented
def get_energy(self, composition, strict=True):
"""
Calculates the energy of a composition.
Args:
composition (Composition): input composition
strict (bool): Whether all potentials must be specified
"""
if strict and set(composition.keys()) > set(self.keys()):
s = set(composition.keys()) - set(self.keys())
raise ValueError("Potentials not specified for {}".format(s))
return sum(self.get(k, 0) * v for k, v in composition.items())
def __repr__(self):
return "ChemPots: " + super().__repr__()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
blondegeek/pymatgen
|
pymatgen/core/composition.py
|
Python
|
mit
| 49,756
|
[
"pymatgen"
] |
b04866790d9bbb7f21fdec978da0d5ae2bfe7c55287303952480da76472477ce
|
# (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
import sys
import time
import threading
from itertools import chain
from ansible import constants as C
from ansible.plugins.cache.base import BaseCacheModule
try:
import memcache
except ImportError:
print('python-memcached is required for the memcached fact cache')
sys.exit(1)
class ProxyClientPool(object):
"""
Memcached connection pooling for thread/fork safety. Inspired by py-redis
connection pool.
Available connections are maintained in a deque and released in a FIFO manner.
"""
def __init__(self, *args, **kwargs):
self.max_connections = kwargs.pop('max_connections', 1024)
self.connection_args = args
self.connection_kwargs = kwargs
self.reset()
def reset(self):
self.pid = os.getpid()
self._num_connections = 0
self._available_connections = collections.deque(maxlen=self.max_connections)
self._locked_connections = set()
self._lock = threading.Lock()
def _check_safe(self):
if self.pid != os.getpid():
with self._lock:
if self.pid == os.getpid():
# bail out - another thread already acquired the lock
return
self.disconnect_all()
self.reset()
def get_connection(self):
self._check_safe()
try:
connection = self._available_connections.popleft()
except IndexError:
connection = self.create_connection()
self._locked_connections.add(connection)
return connection
def create_connection(self):
if self._num_connections >= self.max_connections:
raise RuntimeError("Too many memcached connections")
self._num_connections += 1
return memcache.Client(*self.connection_args, **self.connection_kwargs)
def release_connection(self, connection):
self._check_safe()
self._locked_connections.remove(connection)
self._available_connections.append(connection)
def disconnect_all(self):
for conn in chain(self._available_connections, self._locked_connections):
conn.disconnect_all()
def __getattr__(self, name):
def wrapped(*args, **kwargs):
return self._proxy_client(name, *args, **kwargs)
return wrapped
def _proxy_client(self, name, *args, **kwargs):
conn = self.get_connection()
try:
return getattr(conn, name)(*args, **kwargs)
finally:
self.release_connection(conn)
class CacheModuleKeys(collections.MutableSet):
"""
A set subclass that keeps track of insertion time and persists
the set in memcached.
"""
PREFIX = 'ansible_cache_keys'
def __init__(self, cache, *args, **kwargs):
self._cache = cache
self._keyset = dict(*args, **kwargs)
def __contains__(self, key):
return key in self._keyset
def __iter__(self):
return iter(self._keyset)
def __len__(self):
return len(self._keyset)
def add(self, key):
self._keyset[key] = time.time()
self._cache.set(self.PREFIX, self._keyset)
def discard(self, key):
del self._keyset[key]
self._cache.set(self.PREFIX, self._keyset)
def remove_by_timerange(self, s_min, s_max):
for k in self._keyset.keys():
t = self._keyset[k]
if s_min < t < s_max:
del self._keyset[k]
self._cache.set(self.PREFIX, self._keyset)
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
else:
connection = ['127.0.0.1:11211']
self._timeout = C.CACHE_PLUGIN_TIMEOUT
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = ProxyClientPool(connection, debug=0)
self._keys = CacheModuleKeys(self._cache, self._cache.get(CacheModuleKeys.PREFIX) or [])
def _make_key(self, key):
return "{0}{1}".format(self._prefix, key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._keys.remove_by_timerange(0, expiry_age)
def get(self, key):
value = self._cache.get(self._make_key(key))
# guard against the key not being removed from the keyset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
return value
def set(self, key, value):
self._cache.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
self._keys.add(key)
def keys(self):
self._expire_keys()
return list(iter(self._keys))
def contains(self, key):
self._expire_keys()
return key in self._keys
def delete(self, key):
self._cache.delete(self._make_key(key))
self._keys.discard(key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
return self._keys.copy()
|
majidaldo/ansible
|
v2/ansible/plugins/cache/memcached.py
|
Python
|
gpl-3.0
| 5,987
|
[
"Brian"
] |
62fba83cc21ce9f4ab954235d638870da8edaf6befc05c6667410a9f127789ec
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2014 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br> ##
import doctest
import os
import re
import sys
import nose
from nose.plugins.doctests import DocFileCase
from nose.plugins import Plugin
import stoqlib
#
# YANC nose plugin
# Copyright 2011-2013 Arthur Noel
#
_RESET = '\033[0m' # pylint: disable=W1401
_ATTRIBUTES = dict(
bold=1, dark=2, underline=4, blink=5, reverse=7, concealed=8)
_COLORS = dict(
grey=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37)
def _colored(text, color=None, attrs=None):
if os.getenv('ANSI_COLORS_DISABLED') is None:
fmt_str = '\033[%dm%s' # pylint: disable=W1401
if color is not None:
text = fmt_str % (_COLORS[color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (_ATTRIBUTES[attr], text)
text += _RESET
return text
class ColorStream(object):
_colors = {
"green": ("OK", "ok", "."),
"red": ("ERROR", "FAILED", "errors", "E"),
"yellow": ("FAILURE", "FAIL", "failures", "F"),
"magenta": ("SKIP", "S"),
"blue": ("-" * 70, "=" * 70),
}
def __init__(self, stream):
self._stream = stream
self._color_map = {}
self._patten_map = {}
for color, labels in self._colors.items():
for label in labels:
self._color_map[label] = color
if len(label) > 1:
self._patten_map[label] = re.compile(r"%s=\d+" % label)
def __getattr__(self, key):
return getattr(self._stream, key)
def _colorize(self, string, color=None):
if not string or color is not None:
return string
color = self._color_map.get(string)
if color is None:
for key in self._color_map:
# looking for a test failure as LABEL: str(test)
if string.startswith(key + ":"):
segments = string.split(":")
label = self._colorize(segments[0] + ":",
self._color_map[key])
desc = ":".join(segments[1:])
if desc.startswith(" Failure: "):
desc = _colored(desc, self._color_map[key])
return label + desc
for key, key_color in self._color_map.items():
# looking for label=number in the summary
pattern = self._patten_map.get(key)
if pattern is not None:
for match in pattern.findall(string):
string = string.replace(
match, self._colorize(match, key_color))
if color is not None:
string = _colored(string, color, attrs=("bold",))
return string
def write(self, string):
self._stream.write(self._colorize(string))
def writeln(self, string=""):
self._stream.writeln(self._colorize(string))
class YANC(Plugin):
"""Yet another nose colorer"""
name = "yanc"
previous_path = None
previous_klass = None
def options(self, parser, env):
super(YANC, self).options(parser, env)
def configure(self, options, conf):
super(YANC, self).configure(options, conf)
self.color = (
hasattr(self.conf, "stream") and
hasattr(self.conf.stream, "isatty") and
self.conf.stream.isatty())
def startContext(self, context):
self.should_format = True
def stopContext(self, context):
self.should_format = False
def describeTest(self, test):
path = test.id()
parts = path.split('.')
method = parts.pop()
try:
klass = parts.pop()
except IndexError:
return test.test._dt_test.filename[len(os.getcwd()) + 1:]
path = '.'.join(parts)
return '%s:%s.%s' % (path, klass, method)
def begin(self):
if self.color:
self.conf.stream = ColorStream(self.conf.stream)
def finalize(self, result):
if self.color:
self.conf.stream = self.conf.stream._stream
#
# Stoq nose plugin
#
class Stoq(Plugin):
"""Stoq plugin for nose tests
This plugin is reponsible for setting up the environment so Stoq
and it's plugins can be tested right.
"""
name = "stoq"
def begin(self):
# The tests require that the environment is currently set to C, to avoid
# translated strings and use the default date/number/currency formatting
os.environ['LC_ALL'] = 'C'
os.environ['LANG'] = 'C'
os.environ['LANGUAGE'] = 'C'
if 'STOQ_USE_GI' in os.environ:
from stoq.lib import gicompat
gicompat.enable()
# If we import tests.base before Cover.setup() in the coverage plugin
# is called the statistics will skip the modules imported by tests.base
from stoqlib.database.testsuite import bootstrap_suite
hostname = os.environ.get('STOQLIB_TEST_HOSTNAME')
dbname = os.environ.get('STOQLIB_TEST_DBNAME')
username = os.environ.get('STOQLIB_TEST_USERNAME')
password = os.environ.get('STOQLIB_TEST_PASSWORD')
port = int(os.environ.get('STOQLIB_TEST_PORT') or 0)
quick = os.environ.get('STOQLIB_TEST_QUICK', None) is not None
config = os.path.join(
os.path.dirname(stoqlib.__file__), 'tests', 'config.py')
if os.path.exists(config):
execfile(config, globals(), locals())
bootstrap_suite(address=hostname, dbname=dbname, port=port,
username=username, password=password, quick=quick)
# The doctests plugin in nosetests 1.1.2 doesn't have --doctest-options,
# which we need to set to ELLIPSIS, so monkeypatch that support in.
# We can remove this monkeypatch as soon as we migrate to trusty
def _init(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None, result_var='_'):
self._result_var = result_var
self._nose_obj = obj
super(DocFileCase, self).__init__(
test, optionflags=doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE,
setUp=setUp, tearDown=tearDown,
checker=checker)
DocFileCase.__init__ = _init
def _collect_coverage_modules(filenames):
# Collects a list of coverage modules given a set of filenames
# stoqlib/domain -> stoqlib.domain
# stoqlib/domain/test -> stoqlib.domain
# stoqlib/domain/test/test_account -> stoqlib.domain.account
# stoqlib/domain/test/test_account.py -> stoqlib.domain.account
# (for instance via __tests__ global attribute in test_account.py)
for filename in filenames:
if os.path.isdir(filename):
filename = filename.rstrip('/')
if filename.endswith('/test'):
filename = filename[:-5]
yield filename.replace('/', '.')
continue
try:
fd = open(filename)
except IOError:
continue
for line in fd.readlines():
if not line.startswith('__tests__'):
continue
line = line[:-1]
test_filename = line.split(' = ', 1)[1][1:-1]
if test_filename.endswith('.py'):
test_filename = test_filename[:-3]
test_filename = test_filename.replace('/', '.')
yield test_filename
break
# FIXME: This is mimicking what is done on the module containing the IPlugin
# implemented class. Different from stoq that will always import that module,
# nosetests will try to look for tests in each .py, producing ImportErrors.
# This can be removed when the plugins import situation is solved.
plugins_topdir = os.path.join(
os.path.dirname(os.path.dirname(stoqlib.__file__)), 'plugins')
for plugin_dir in os.listdir(plugins_topdir):
sys.path.append(os.path.join(plugins_topdir, plugin_dir))
def main(args, extra_plugins=None):
if '--sql' in args:
args.remove('--sql')
from stoqlib.database.debug import enable
enable()
if '--coverage' in args:
args.remove('--coverage')
modules = _collect_coverage_modules(args)
if modules:
args.append('--with-coverage')
args.append('--cover-package=%s' % (','.join(modules), ))
for extra_option in [
# Disable capturing of stdout, we often use this for print debugging
'--nocapture',
# Disable logging capture, kiwi is quite verbose and doesn't give
# a lot of useful information
'--nologcapture',
# Be verbose, one line per test instead of just a dot (like trial)
'--verbose',
# Detailed errors, useful for tracking down assertEquals
'--detailed-errors',
# Enable doctests
'--with-doctest',
# Our doctests ends with .txt, eg sellable.txt
'--doctest-extension=txt']:
if not extra_option in args:
args.append(extra_option)
plugins = [Stoq(), YANC()]
if extra_plugins is not None:
plugins.extend(extra_plugins)
# --with-plugin must be the last args
for p in plugins:
args.append('--with-%s' % p.name)
return nose.main(argv=args, addplugins=plugins)
|
tiagocardosos/stoq
|
stoqlib/test/tests_runner.py
|
Python
|
gpl-2.0
| 10,136
|
[
"VisIt"
] |
6b4dc73d64cb4e3724029754a70cb19592769acbe3769aa910af69ec70f84e3e
|
# $HeadURL$
__RCSID__ = "$Id$"
"""This Backend sends the Log Messages to a Log Server
It will only report to the server ERROR, EXCEPTION, FATAL
and ALWAYS messages.
"""
import threading
import Queue
from DIRAC.Core.Utilities import Time, Network
from DIRAC.FrameworkSystem.private.logging.backends.BaseBackend import BaseBackend
from DIRAC.FrameworkSystem.private.logging.LogLevels import LogLevels
class RemoteBackend( BaseBackend, threading.Thread ):
def __init__( self, optionsDictionary ):
threading.Thread.__init__( self )
self.__interactive = optionsDictionary[ 'Interactive' ]
self.__sleep = optionsDictionary[ 'SleepTime' ]
self._messageQueue = Queue.Queue()
self._Transactions = []
self._alive = True
self._site = optionsDictionary[ 'Site' ]
self._hostname = Network.getFQDN()
self._logLevels = LogLevels()
self._negativeLevel = self._logLevels.getLevelValue( 'ERROR' )
self._positiveLevel = self._logLevels.getLevelValue( 'ALWAYS' )
self._maxBundledMessages = 20
self.setDaemon(1)
self.start()
def doMessage( self, messageObject ):
self._messageQueue.put( messageObject )
def run( self ):
import time
while self._alive:
self._bundleMessages()
time.sleep( self.__sleep )
def _bundleMessages( self ):
while not self._messageQueue.empty():
bundle = []
while ( len( bundle ) < self._maxBundledMessages ) and \
( not self._messageQueue.empty() ):
message = self._messageQueue.get()
if self._testLevel( message.getLevel() ):
bundle.append( message.toTuple() )
if len( bundle ):
self._sendMessageToServer( bundle )
if len( self._Transactions ):
self._sendMessageToServer()
def _sendMessageToServer( self, messageBundle=None ):
from DIRAC.Core.DISET.RPCClient import RPCClient
if messageBundle:
self._Transactions.append( messageBundle )
TransactionsLength = len( self._Transactions )
if TransactionsLength > 100:
del self._Transactions[:TransactionsLength-100]
TransactionsLength = 100
try:
oSock = RPCClient( "Framework/SystemLogging" )
except Exception,v:
return False
while TransactionsLength:
result = oSock.addMessages( self._Transactions[0],
self._site, self._hostname )
if result['OK']:
TransactionsLength = TransactionsLength - 1
self._Transactions.pop(0)
else:
return False
return True
def _testLevel( self, sLevel ):
messageLevel = self._logLevels.getLevelValue( sLevel )
return messageLevel <= self._negativeLevel or \
messageLevel >= self._positiveLevel
def flush( self ):
self._alive = False
if not self.__interactive and self._sendMessageToServer()['OK']:
while not self._messageQueue.empty():
self._bundleMessages()
|
avedaee/DIRAC
|
FrameworkSystem/private/logging/backends/RemoteBackend.py
|
Python
|
gpl-3.0
| 2,903
|
[
"DIRAC"
] |
5f1da7aa092689618507cf59b51908104204547ce352643dc4c6c8be93cf95da
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the BorgQueen class, which manages drones to assimilate
data using Python's multiprocessing.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 18, 2012"
import os
import json
import logging
from monty.io import zopen
from monty.json import MontyEncoder, MontyDecoder
from multiprocessing import Manager, Pool
logger = logging.getLogger("BorgQueen")
class BorgQueen:
"""
The Borg Queen controls the drones to assimilate data in an entire
directory tree. Uses multiprocessing to speed up things considerably. It
also contains convenience methods to save and load data between sessions.
Args:
drone (Drone): An implementation of
:class:`pymatgen.apps.borg.hive.AbstractDrone` to use for
assimilation.
rootpath (str): The root directory to start assimilation. Leave it
as None if you want to do assimilation later, or is using the
BorgQueen to load previously assimilated data.
ndrones (int): Number of drones to parallelize over.
Typical machines today have up to four processors. Note that you
won't see a 100% improvement with two drones over one, but you
will definitely see a significant speedup of at least 50% or so.
If you are running this over a server with far more processors,
the speedup will be even greater.
"""
def __init__(self, drone, rootpath=None, number_of_drones=1):
self._drone = drone
self._num_drones = number_of_drones
self._data = []
if rootpath:
if number_of_drones > 1:
self.parallel_assimilate(rootpath)
else:
self.serial_assimilate(rootpath)
def parallel_assimilate(self, rootpath):
"""
Assimilate the entire subdirectory structure in rootpath.
"""
logger.info('Scanning for valid paths...')
valid_paths = []
for (parent, subdirs, files) in os.walk(rootpath):
valid_paths.extend(self._drone.get_valid_paths((parent, subdirs,
files)))
manager = Manager()
data = manager.list()
status = manager.dict()
status['count'] = 0
status['total'] = len(valid_paths)
logger.info('{} valid paths found.'.format(len(valid_paths)))
p = Pool(self._num_drones)
p.map(order_assimilation, ((path, self._drone, data, status)
for path in valid_paths))
for d in data:
self._data.append(json.loads(d, cls=MontyDecoder))
def serial_assimilate(self, rootpath):
"""
Assimilate the entire subdirectory structure in rootpath serially.
"""
valid_paths = []
for (parent, subdirs, files) in os.walk(rootpath):
valid_paths.extend(self._drone.get_valid_paths((parent, subdirs,
files)))
data = []
count = 0
total = len(valid_paths)
for path in valid_paths:
newdata = self._drone.assimilate(path)
self._data.append(newdata)
count += 1
logger.info('{}/{} ({:.2f}%) done'.format(count, total,
count / total * 100))
for d in data:
self._data.append(json.loads(d, cls=MontyDecoder))
def get_data(self):
"""
Returns an list of assimilated objects
"""
return self._data
def save_data(self, filename):
"""
Save the assimilated data to a file.
Args:
filename (str): filename to save the assimilated data to. Note
that if the filename ends with gz or bz2, the relevant gzip
or bz2 compression will be applied.
"""
with zopen(filename, "wt") as f:
json.dump(list(self._data), f, cls=MontyEncoder)
def load_data(self, filename):
"""
Load assimilated data from a file
"""
with zopen(filename, "rt") as f:
self._data = json.load(f, cls=MontyDecoder)
def order_assimilation(args):
"""
Internal helper method for BorgQueen to process assimilation
"""
(path, drone, data, status) = args
newdata = drone.assimilate(path)
if newdata:
data.append(json.dumps(newdata, cls=MontyEncoder))
status['count'] += 1
count = status['count']
total = status['total']
logger.info('{}/{} ({:.2f}%) done'.format(count, total,
count / total * 100))
|
dongsenfo/pymatgen
|
pymatgen/apps/borg/queen.py
|
Python
|
mit
| 4,940
|
[
"pymatgen"
] |
817df79eaed5c31096b1f46c7185a024a0d48f85155effbad8289bb2bb556c5a
|
# coding: utf-8
from vale import ValeCodegen
from vale import ValeParser
from vale import construct_model
from sympy import S
from sympy.core.sympify import sympify
import numpy as np
# ...
def run(filename):
# ...
from caid.cad_geometry import cube
geometry = cube()
from clapp.spl.mapping import Mapping
mapping = Mapping(geometry=geometry)
# ...
# ... creates discretization parameters
from clapp.disco.parameters.bspline import BSpline
bspline_params = BSpline([8,8,8], [2,2,2], \
bc_min=[0,0,0], \
bc_max=[0,0,0])
# ...
# ... create a context from discretization
from clapp.fema.context import Context
context = Context(dirname="input", \
discretization_params=bspline_params)
# ...
# ...
pde = construct_model(filename, backend="clapp", \
context=context, mapping=mapping)
# ...
# ... accessing the pde declarations
V = pde["V"]
phi = pde["phi"]
form_a = pde["a"]
form_b = pde["b"]
f = pde["f"]
# ...
# ...
assembler_a = form_a.assembler
matrix = form_a.matrix
assembler_b = form_b.assembler
rhs = form_b.vector
# ...
# ... define the constants
constants = {"r": 0.5}
# ...
# ... set expression for the function f
f.set("2*x*(1-x)*y*(1-y) + 2*y*(1-y)*z*(1-z) + 2*z*(1-z)*x*(1-x) + 0.5*x*(1-x)*y*(1-y)*z*(1-z) ", \
constants=constants)
# ...
# ...
assembler_a.set_constants(constants, verbose=False)
# ...
# ...
assembler_a.assemble()
assembler_b.assemble()
# ...
# ...
from clapp.plaf.parameters.linear_solver import LAPACK_LU
from clapp.plaf.parameters.linear_solver import DRIVER
from clapp.plaf.linear_solver import Linear_solver
params = DRIVER(solver=LAPACK_LU())
linsol = Linear_solver(matrix=matrix, dirname="input", parameters=params)
# ...
# ...
y = linsol.solve(rhs)
# ...
# ... exports the field
phi.set(y)
# ...
# ... exports phi to vtk file. Can be used in Visit and Paraview
filename_out = "uh_3d_"+filename.split('/')[-1].split('.')[0] + ".vtk"
phi.to_vtk(filename_out, mapping=mapping, n_pts=20)
# ...
# ... define the analytical solution for phi
from clapp.vale.expressions.function import Function
phi_analytic = Function("phi_analytic", "x*(1-x)*y*(1-y)*z*(1-z)", args=["x", "y", "z"])
# ...
# ... compute L2 error
x = phi.compute_l2_error(mapping=mapping, function=phi_analytic)[0,0]
print((" L2-error norm : " + str(x)))
# ...
# ...
cmd = "rm -rf input"
os.system(cmd)
# ...
print(("> run using ", filename, " passed."))
# ...
# ...
import clapp.common.utils as clapp_utils
# ... initializing Clapp
clapp_utils.initialize()
# ...
import os
cmd = "rm -rf input"
os.system(cmd)
run(filename="inputs/3d/laplace.vl")
cmd = "rm -rf input"
os.system(cmd)
# ... Finalizing Clapp
clapp_utils.finalize()
# ...
|
ratnania/vale
|
tests/test_laplace_3d.py
|
Python
|
mit
| 3,147
|
[
"ParaView",
"VTK",
"VisIt"
] |
fb590ec6b0b554bbaf72f13dab717f303449f305d79436e595ecb6422d7f3b20
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2015 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from version import __version__
# Structural Analysis module
from pysces.PyscesStoich import Stoich
from PyscesCore2 import StructMatrix
class PyscesEnhancedStoich(Stoich):
"""PySCeS stoichiometry class for use with core2"""
N = None
Nr = None
K = None
K0 = None
L = None
L0 = None
Gamma = None
def __init__(self, core):
Stoich.__init__(self, core.stoichiometric_matrix.array)
self.species = core.stoichiometric_matrix.row
self.reactions = core.stoichiometric_matrix.col
def getNullSpaces(self):
self.AnalyseK()
self.AnalyseL()
def testNullSpaces(self):
#TODO: build in nullspace validity checks from PyscesModel
pass
def setStructMatrices(self):
self.N = StructMatrix(self.nmatrix, self.nmatrix_row, self.nmatrix_col)
self.N.setRow(self.species)
self.N.setCol(self.reactions)
self.Nr = StructMatrix(self.nrmatrix, self.nrmatrix_row, self.nrmatrix_col)
self.Nr.setRow(self.species)
self.Nr.setCol(self.reactions)
self.K = StructMatrix(self.kmatrix, self.kmatrix_row, self.kmatrix_col)
self.K.setRow(self.reactions)
self.K.setCol(self.reactions)
self.K0 = StructMatrix(self.kzeromatrix, self.kzeromatrix_row, self.kzeromatrix_col)
self.K0.setRow(self.reactions)
self.K0.setCol(self.reactions)
self.L = StructMatrix(self.lmatrix, self.lmatrix_row, self.lmatrix_col)
self.L.setRow(self.species)
self.L.setCol(self.species)
self.L0 = StructMatrix(self.lzeromatrix, self.lzeromatrix_row, self.lzeromatrix_col)
self.L0.setRow(self.species)
self.L0.setCol(self.species)
if self.info_moiety_conserve:
self.Gamma = StructMatrix(self.conservation_matrix, self.conservation_matrix_row, self.conservation_matrix_col)
self.Gamma.setRow(self.species)
self.Gamma.setCol(self.species)
class StructuralModule(object):
core = None
struct = None
def setCore(self, core):
self.core = core
self.struct = None
if self.core.stoichiometric_matrix == None:
print "StructuralModule building stoichiometric matrix ..."
self.core.setStoichiometricMatrix()
def getCore(self):
self.core.struct = self.struct
return self.core
def analyseStoichiometry(self):
self.struct = PyscesEnhancedStoich(self.core)
self.struct.getNullSpaces()
self.struct.setStructMatrices()
# Integration Module
import numpy
## class StateDataObj(object):
## flux = None
## flux_labels = None
## species = None
## species_labels = None
## valid = True
## _suffix = None
## _prefix = None
## def __init__(self):
## self.species_labels = []
## self.flux_labels = []
## def setSpecies(self, name, value, suffix=None):
## if suffix != None:
## name = name + suffix
## if name not in self.species_labels:
## self.species_labels.append(name)
## self._suffix = suffix
## setattr(self, name, value)
## def setFlux(self, name, value, prefix=None):
## if prefix != None:
## name = prefix + name
## if name not in self.flux_labels:
## self.flux_labels.append(name)
## self._prefix = prefix
## setattr(self, name, value)
## def setAllSpecies(self, species_labels, species, suffix=None):
## assert len(species_labels) == len(species), '\nThis aint gonna work1'
## self.species_labels = []
## # self.species_labels = tuple(species_labels)
## self.species = species.copy()
## for S in range(len(species_labels)):
## self.setSpecies(species_labels[S], species[S], suffix)
## def setAllFluxes(self, flux_labels, flux, prefix=None):
## assert len(flux_labels) == len(flux), '\nThis aint gonna work2'
## self.flux_labels = []
## # self.flux_labels = tuple(flux_labels)
## self.flux = flux.copy()
## for J in range(len(flux_labels)):
## self.setFlux(flux_labels[J], flux[J], prefix)
## def getFlux(self, name):
## if prefix != None:
## name = prefix + name
## return getattr(self, name)
## def getSpecies(self, name):
## if suffix != None:
## name = name + suffix
## return getattr(self, name)
class StateDataObj(object):
"""
New class used to store steady-state data.
"""
fluxes = None
species = None
rules = None
xdata = None
flux_labels = None
species_labels = None
rules_labels = None
xdata_labels = None
HAS_FLUXES = False
HAS_SPECIES = False
HAS_RULES = False
HAS_XDATA = False
IS_VALID = True
## def setLabels(self, species=None, fluxes=None, rules=None):
## """set the species, rate and rule label lists"""
## if species != None:
## self.species_labels = species
## if fluxes != None:
## self.flux_labels = fluxes
## if rules != None:
## self.rules_labels = rules
def setSpecies(self, species, lbls=None):
"""Set the species array"""
self.species = species
self.HAS_SPECIES = True
if lbls != None:
self.species_labels = lbls
for s in range(len(self.species_labels)):
setattr(self, self.species_labels[s], self.species[s])
def setFluxes(self, fluxes, lbls=None):
"""set the flux array"""
self.fluxes = fluxes
self.HAS_FLUXES = True
if lbls != None:
self.flux_labels = lbls
for f in range(len(self.flux_labels)):
setattr(self, self.flux_labels[f], self.fluxes[f])
def setRules(self, rules, lbls=None):
"""Set the results of rate rules"""
self.rules = rules
self.HAS_RULES = True
if lbls != None:
self.rules_labels = lbls
for r in range(len(self.rules_labels)):
setattr(self, self.rules_labels[r], self.rules[r])
def setXData(self, xdata, lbls=None):
"""Sets extra simulation data"""
self.xdata = xdata
self.HAS_XDATA = True
if lbls != None:
self.xdata_labels = lbls
for x in range(len(self.xdata_labels)):
setattr(self, self.xdata_labels[x], self.xdata[x])
def getSpecies(self, lbls=False):
"""return species array"""
output = None
if self.HAS_SPECIES:
output = self.species
if not lbls:
return output
else:
return output, self.species_labels
def getFluxes(self, lbls=False):
"""return flux array"""
output = None
if self.HAS_FLUXES:
output = self.fluxes
if not lbls:
return output
else:
return output, self.flux_labels
def getRules(self, lbls=False):
"""Return rule array"""
output = None
if self.HAS_RULES:
output = self.rules
if not lbls:
return output
else:
return output, self.rules_labels
def getXData(self, lbls=False):
"""Return xdata array"""
output = None
if self.HAS_XDATA:
output = self.xdata
if not lbls:
return output
else:
return output, self.xdata_labels
def getAllStateData(self, lbls=False):
"""
Return all available data as species+fluxes+rules
if lbls=True returns (array,labels) else just array
"""
labels = []
output = None
if self.HAS_SPECIES:
output = self.species
labels += self.species_labels
if self.HAS_FLUXES:
if output == None:
output = self.fluxes
else:
output = numpy.hstack((output, self.fluxes))
labels += self.flux_labels
if self.HAS_RULES:
if output == None:
output = self.rules
else:
output = numpy.hstack((output, self.rules))
labels += self.rules_labels
if self.HAS_XDATA:
if output == None:
output = self.xdata
else:
output = numpy.hstack((output, self.xdata))
labels += self.xdata_labels
if not lbls:
return output
else:
return output, labels
def getStateData(self, *args, **kwargs):
"""getSimData(\*args) feed this method species/rate labels and it
will return an array of [time, sp1, r1, ....]
"""
if kwargs.has_key('lbls'):
lbls = kwargs['lbls']
else:
lbls = False
lout = []
output = []
for roc in args:
if self.HAS_SPECIES and roc in self.species_labels:
lout.append(roc)
output.append(self.species[self.species_labels.index(roc)])
elif self.HAS_FLUXES and roc in self.flux_labels:
lout.append(roc)
output.append(self.fluxes[self.flux_labels.index(roc)])
elif self.HAS_RULES and roc in self.rules_labels:
lout.append(roc)
output.append(self.rules[self.rules_labels.index(roc)])
elif self.HAS_XDATA and roc in self.xdata_labels:
lout.append(roc)
output.append(self.xdata[self.xdata_labels.index(roc)])
else:
print 'I don\'t have an attribute %s ... ignoring.' % roc
if not lbls:
return output
else:
return numpy.array(output), lout
## class IntegrationDataObj(object):
## """
## This class is specifically designed to store the results of a time simulation
## It has methods for setting the Time, Labels, Species and Rate data and
## getting Time, Species and Rate (including time) arrays. However, of more use:
## - getOutput(*arg) feed this method species/rate labels and it will return
## an array of [time, sp1, r1, ....]
## - getDataAtTime(time) the data generated at time point "time".
## - getDataInTimeInterval(time, bounds=None) more intelligent version of the above
## returns an array of all data points where: time-bounds <= time <= time+bounds
## where bounds defaults to stepsize.
## """
## time = None
## rates = None
## species = None
## rate_labels = None
## species_labels = None
## def setLabels(self, species, rates):
## """set the species and rate label lists"""
## self.species_labels = species
## self.rate_labels = rates
## def setTime(self, time):
## """Set the time vector"""
## self.time = time.reshape(len(time), 1)
## def setSpecies(self, species):
## """Set the species array"""
## self.species = species
## def setRates(self, rates):
## """set the rate array"""
## self.rates = rates
## def getTime(self):
## """return the time vector"""
## assert self.time != None, "\nNo time"
## return self.time.reshape(len(self.time),)
## def getSpecies(self):
## """return time+species array"""
## assert self.species != None, "\nNo species"
## return numpy.hstack((self.time, self.species))
## def getRates(self):
## """return time+rate array"""
## assert self.rates != None, "\nNo rates"
## return numpy.hstack((self.time, self.rates))
## def getDataAtTime(self, time):
## """Return all data generated at "time" """
## t = None
## sp = None
## ra = None
## temp_t = self.time.reshape(len(self.time),)
## for tt in range(len(temp_t)):
## if temp_t[tt] == time:
## t = tt
## if self.species is not None:
## sp = self.species.take([tt], axis=0)
## if self.rates is not None:
## ra = self.rates.take([tt], axis=0)
## break
## output = None
## if t is not None:
## output = numpy.array([[temp_t[t]]])
## if sp is not None:
## output = numpy.hstack((output,sp))
## if ra is not None:
## output = numpy.hstack((output,ra))
## return output
## def getDataInTimeInterval(self, time, bounds=None):
## """
## getDataInTimeInterval(time, bounds=None) returns an array of all
## data points where: time-bounds <= time <= time+bounds
## where bound defaults to stepsize
## """
## temp_t = self.time.reshape(len(self.time),)
## if bounds == None:
## bounds = temp_t[1] - temp_t[0]
## c1 = (temp_t >= time-bounds)
## c2 = (temp_t <= time+bounds)
## print 'Searching (%s:%s:%s)' % (time-bounds, time, time+bounds)
## t = []
## sp = None
## ra = None
## for tt in range(len(c1)):
## if c1[tt] and c2[tt]:
## t.append(tt)
## output = None
## if len(t) > 0:
## output = self.time.take(t)
## output = output.reshape(len(output),1)
## if self.species is not None:
## output = numpy.hstack((output, self.species.take(t, axis=0)))
## if self.rates is not None:
## output = numpy.hstack((output, self.rates.take(t, axis=0)))
## return output
## def getOutput(self, *args):
## """getOutput(*arg) feed this method species/rate labels and it
## will return an array of [time, sp1, r1, ....]
## """
## output = self.time
## for roc in args:
## if roc in self.species_labels:
## assert self.species != None, "\nNo species"
## output = numpy.hstack((output, self.species.take([self.species_labels.index(roc)], axis=-1)))
## if roc in self.rate_labels:
## assert self.rates != None, "\nNo rates"
## output = numpy.hstack((output, self.rates.take([self.rate_labels.index(roc)], axis=-1)))
## return output
class IntegrationDataObj(object):
"""
This class is specifically designed to store the results of a time simulation
It has methods for setting the Time, Labels, Species and Rate data and
getting Time, Species and Rate (including time) arrays. However, of more use:
- getOutput(\*args) feed this method species/rate labels and it will return
an array of [time, sp1, r1, ....]
- getDataAtTime(time) the data generated at time point "time".
- getDataInTimeInterval(time, bounds=None) more intelligent version of the above
returns an array of all data points where: time-bounds <= time <= time+bounds
"""
time = None
rates = None
species = None
rules = None
xdata = None
time_label = 'Time'
rate_labels = None
species_labels = None
rules_labels = None
xdata_labels = None
HAS_SPECIES = False
HAS_RATES = False
HAS_RULES = False
HAS_TIME = False
HAS_XDATA = False
IS_VALID = True
TYPE_INFO = 'Deterministic'
def setLabels(self, species=None, rates=None, rules=None):
"""set the species, rate and rule label lists"""
if species != None:
self.species_labels = species
if rates != None:
self.rate_labels = rates
if rules != None:
self.rules_labels = rules
def setTime(self, time, lbl=None):
"""Set the time vector"""
self.time = time.reshape(len(time), 1)
self.HAS_TIME = True
if lbl != None:
self.time_label = lbl
def setSpecies(self, species, lbls=None):
"""Set the species array"""
self.species = species
self.HAS_SPECIES = True
if lbls != None:
self.species_labels = lbls
def setRates(self, rates, lbls=None):
"""set the rate array"""
self.rates = rates
self.HAS_RATES = True
if lbls != None:
self.rate_labels = lbls
def setRules(self, rules, lbls=None):
"""Set the results of rate rules"""
self.rules = rules
self.HAS_RULES = True
if lbls != None:
self.rules_labels = lbls
def setXData(self, xdata, lbls=None):
"""Sets extra simulation data"""
self.xdata = xdata
self.HAS_XDATA = True
if lbls != None:
self.xdata_labels = lbls
def getTime(self, lbls=False):
"""return the time vector"""
output = None
if self.HAS_TIME:
output = self.time.reshape(len(self.time),)
if not lbls:
return output
else:
return output, [self.time_label]
def getSpecies(self, lbls=False):
"""return time+species array"""
output = None
if self.HAS_SPECIES:
output = numpy.hstack((self.time, self.species))
labels = [self.time_label]+self.species_labels
else:
output = self.time
labels = [self.time_label]
if not lbls:
return output
else:
return output, labels
def getRates(self, lbls=False):
"""return time+rate array"""
output = None
if self.HAS_RATES:
output = numpy.hstack((self.time, self.rates))
labels = [self.time_label]+self.rate_labels
else:
output = self.time
labels = [self.time_label]
if not lbls:
return output
else:
return output, labels
def getRules(self, lbls=False):
"""Return time+rule array"""
## assert self.rules != None, "\nNo rules"
output = None
if self.HAS_RULES:
output = numpy.hstack((self.time, self.rules))
labels = [self.time_label]+self.rules_labels
else:
output = self.time
labels = [self.time_label]
if not lbls:
return output
else:
return output, labels
def getXData(self, lbls=False):
"""Return time+xdata array"""
## assert self.rules != None, "\nNo rules"
output = None
if self.HAS_XDATA:
output = numpy.hstack((self.time, self.xdata))
labels = [self.time_label]+self.xdata_labels
else:
output = self.time
labels = [self.time_label]
if not lbls:
return output
else:
return output, labels
def getDataAtTime(self, time):
"""Return all data generated at "time" """
#TODO add rate rule data
t = None
sp = None
ra = None
ru = None
xd = None
temp_t = self.time.reshape(len(self.time),)
for tt in range(len(temp_t)):
if temp_t[tt] == time:
t = tt
if self.HAS_SPECIES:
sp = self.species.take([tt], axis=0)
if self.HAS_RATES:
ra = self.rates.take([tt], axis=0)
if self.HAS_RULES:
ru = self.rules.take([tt], axis=0)
if self.HAS_XDATA:
xd = self.xdata.take([tt], axis=0)
break
output = None
if t is not None:
output = numpy.array([[temp_t[t]]])
if sp is not None:
output = numpy.hstack((output,sp))
if ra is not None:
output = numpy.hstack((output,ra))
if ru is not None:
output = numpy.hstack((output,ru))
if xd is not None:
output = numpy.hstack((output,xd))
return output
def getDataInTimeInterval(self, time, bounds=None):
"""
getDataInTimeInterval(time, bounds=None) returns an array of all
data points where: time-bounds <= time <= time+bounds
where bound defaults to stepsize
"""
#TODO add rate rule data
temp_t = self.time.reshape(len(self.time),)
if bounds == None:
bounds = temp_t[1] - temp_t[0]
c1 = (temp_t >= time-bounds)
c2 = (temp_t <= time+bounds)
print 'Searching (%s:%s:%s)' % (time-bounds, time, time+bounds)
t = []
sp = None
ra = None
for tt in range(len(c1)):
if c1[tt] and c2[tt]:
t.append(tt)
output = None
if len(t) > 0:
output = self.time.take(t)
output = output.reshape(len(output),1)
if self.HAS_SPECIES and self.HAS_TIME:
output = numpy.hstack((output, self.species.take(t, axis=0)))
if self.HAS_RATES:
output = numpy.hstack((output, self.rates.take(t, axis=0)))
if self.HAS_RULES:
output = numpy.hstack((output, self.rules.take(t, axis=0)))
if self.HAS_XDATA:
output = numpy.hstack((output, self.xdata.take(t, axis=0)))
return output
def getOutput(self, *args):
"""
Old alias for getSimData()
getOutput(\*args) feed this method species/rate labels and it
will return an array of [time, sp1, r1, ....]
"""
return self.getSimData(*args)
def getAllSimData(self,lbls=False):
"""
Return all available data as time+species+rates+rules
if lbls=True returns (array,lables) else just array
"""
labels = [self.time_label]
if self.HAS_SPECIES and self.HAS_TIME:
output = numpy.hstack((self.time, self.species))
labels += self.species_labels
if self.HAS_RATES:
output = numpy.hstack((output, self.rates))
labels +=self.rate_labels
if self.HAS_RULES:
output = numpy.hstack((output, self.rules))
labels += self.rules_labels
if self.HAS_XDATA:
output = numpy.hstack((output, self.xdata))
labels += self.xdata_labels
if not lbls:
return output
else:
return output, labels
def getSimData(self, *args, **kwargs):
"""getSimData(\*args) feed this method species/rate labels and it
will return an array of [time, sp1, r1, ....]
"""
output = self.time
## print argimrgs
if kwargs.has_key('lbls'):
lbls = kwargs['lbls']
else:
lbls = False
lout = [self.time_label]
for roc in args:
if self.HAS_SPECIES and roc in self.species_labels:
lout.append(roc)
output = numpy.hstack((output, self.species.take([self.species_labels.index(roc)], axis=-1)))
if self.HAS_RATES and roc in self.rate_labels:
lout.append(roc)
output = numpy.hstack((output, self.rates.take([self.rate_labels.index(roc)], axis=-1)))
if self.HAS_RULES and roc in self.rules_labels:
lout.append(roc)
output = numpy.hstack((output, self.rules.take([self.rules_labels.index(roc)], axis=-1)))
if self.HAS_XDATA and roc in self.xdata_labels:
lout.append(roc)
output = numpy.hstack((output, self.xdata.take([self.xdata_labels.index(roc)], axis=-1)))
if not lbls:
return output
else:
return output, lout
#TODO:
class IntegrationBase(object):
name = None
core = None
data = None
sim_start = None
sim_end = None
sim_point = None
initial_value_vector = None
def setName(self, name):
self.name = name
def getName(self):
return self.name
def setCore(self, core):
self.core = core
self.data = IntegrationData()
self.data.setLabels(self.core.hasVariableSpecies(), self.core.hasReactions())
def getCore(self):
return self.core
def getData(self):
return self.data
|
asttra/pysces
|
pysces/core2/PyscesCore2Modules.py
|
Python
|
bsd-3-clause
| 25,783
|
[
"PySCeS"
] |
c1907ea193a2fef143c4b61fea93b6a57fbf03a70d11710766e3645b839fe9e2
|
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generate pins.cpp for a specified target, using target definitions from the
mbed OS source tree.
It's expecting to be run from the targets/mbedos5 directory.
"""
from __future__ import print_function
import argparse
import ast
import sys
import os
from pycparserext.ext_c_parser import GnuCParser
from pycparser import parse_file, c_ast
# import mbed tools
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'mbed-os'))
from tools.targets import Target
LICENSE = '''/* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the \"License\");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an \"AS IS\" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is generated by generate_pins.py. Please do not modify.
*/
'''
def find_file(root_dir, directories, name):
"""
Find the first instance of file with name 'name' in the directory tree
starting with 'root_dir'.
Filter out directories that are not in directories, or do not start with
TARGET_.
Since this looks in (essentially )the same directories as the compiler would
when compiling mbed OS, we should only find one PinNames.h.
"""
for root, dirs, files in os.walk(root_dir, topdown=True):
# modify dirs in place
dirs[:] = [directory for directory in dirs if directory in directories or not directory.startswith('TARGET_')]
if name in files:
return os.path.join(root, name)
def enumerate_includes(root_dir, directories):
"""
Walk through the directory tree, starting at root_dir, and enumerate all
valid include directories.
"""
for root, dirs, _ in os.walk(root_dir, topdown=True):
# modify dirs in place
dirs[:] = [dir_label for dir_label in dirs
if dir_label in directories
or (not dir_label.startswith('TARGET_')
and not dir_label.startswith('TOOLCHAIN_'))]
yield root
class TypeDeclVisitor(c_ast.NodeVisitor):
"""
A TypeDecl visitor class that walks the ast and calls a visitor function for every node found.
"""
def __init__(self, filter_names=None):
self.names = filter_names or []
def visit(self, node):
value = None
if node.__class__.__name__ == "TypeDecl":
value = self.visit_typedecl(node)
if value is None:
for _, child_node in node.children():
value = value or self.visit(child_node)
return value
def visit_typedecl(self, node):
"""
Visit a node.
"""
if node.declname in self.names:
return [pin.name for pin in node.type.values.enumerators]
def enumerate_pins(c_source_file, include_dirs, definitions):
"""
Enumerate pins specified in PinNames.h, by looking for a PinName enum
typedef somewhere in the file.
"""
definitions += ['__attribute(x)__=', '__extension__(x)=', 'register=', '__IO=', 'uint32_t=unsigned int']
gcc_args = ['-E', '-fmerge-all-constants']
gcc_args += ['-I' + directory for directory in include_dirs]
gcc_args += ['-D' + definition for definition in definitions]
parsed_ast = parse_file(c_source_file,
use_cpp=True,
cpp_path='arm-none-eabi-gcc',
cpp_args=gcc_args,
parser=GnuCParser())
# now, walk the AST
visitor = TypeDeclVisitor(['PinName'])
return visitor.visit(parsed_ast)
def write_pins_to_file(pins, pins_file, out_cpp_file):
"""
Write the generated pins for a specified mbed board into the output C++ file.
"""
include = '\n#include "../{}"'.format(pins_file)
count = '''
unsigned int jsmbed_js_magic_string_count = {};
'''.format(len(pins))
lengths = ',\n '.join(str(len(pin)) for pin in pins)
lenghts_source = '''
unsigned int jsmbed_js_magic_string_lengths[] = {
%s
};
''' % lengths
magic_values = ',\n '.join(pins)
magic_source = '''
int jsmbed_js_magic_string_values[] = {
%s
};
''' % magic_values
magic_strings = ',\n '.join('"' + pin + '"' for pin in pins)
magic_string_source = '''
const char * jsmbed_js_magic_strings[] = {
%s
};
''' % magic_strings
out_cpp_file.write(LICENSE + include + count + lenghts_source + magic_source + magic_string_source)
def main():
"""
Perform the main function of this program
"""
if not os.path.exists('./mbed-os'):
print("Fatal: mbed-os directory does not exist.")
print("Try running 'make getlibs'")
sys.exit(1)
description = """
Generate pins.cpp for a specified mbed board, using target definitions from the
mbed OS source tree.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('board', help='mbed board name')
parser.add_argument('-c',
help='Output C++ file (default: %(default)s)',
default='source/pins.cpp',
type=argparse.FileType('w'))
args = parser.parse_args()
board_name = args.board.upper()
target = Target.get_target(board_name)
directory_labels = ['TARGET_' + label for label in target.labels] + target.macros
targets_dir = os.path.join('.', 'mbed-os', 'targets')
pins_file = find_file(targets_dir, directory_labels, 'PinNames.h')
includes = enumerate_includes(targets_dir, directory_labels)
defines = list(directory_labels)
# enumerate pins from PinNames.h
pins = enumerate_pins(pins_file, ['./tools'] + list(includes), defines)
# first sort alphabetically, then by length.
pins = sorted(pins, key=lambda x: (len(x), x.lower()))
write_pins_to_file(pins, pins_file, args.c)
if __name__ == "__main__":
main()
|
zherczeg/jerryscript
|
targets/mbedos5/tools/generate_pins.py
|
Python
|
apache-2.0
| 6,927
|
[
"VisIt"
] |
e5a8f0598943629996173a35697582c4e9a24a3663aacef7535aa9940b66604f
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import matplotlib as mpl
from pymatgen.analysis.diffraction.neutron import NDCalculator
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
"""
These calculated values were verified with VESTA and FullProf.
"""
__author__ = "Yuta Suzuki"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Yuta Suzuki"
__email__ = "resnant@outlook.jp"
__date__ = "4/19/18"
class NDCalculatorTest(PymatgenTest):
def test_get_pattern(self):
s = self.get_structure("CsCl")
c = NDCalculator(wavelength=1.54184) # CuKa radiation
nd = c.get_pattern(s, two_theta_range=(0, 90))
# Check the first two peaks
self.assertAlmostEqual(nd.x[0], 21.107738329639844)
self.assertEqual(nd.hkls[0], [{"hkl": (1, 0, 0), "multiplicity": 6}])
self.assertAlmostEqual(nd.d_hkls[0], 4.2089999999999996)
self.assertAlmostEqual(nd.x[1], 30.024695921112777)
self.assertEqual(nd.hkls[1], [{"hkl": (1, 1, 0), "multiplicity": 12}])
self.assertAlmostEqual(nd.d_hkls[1], 2.976212442014178)
s = self.get_structure("LiFePO4")
nd = c.get_pattern(s, two_theta_range=(0, 90))
self.assertAlmostEqual(nd.x[1], 17.03504233621785)
self.assertAlmostEqual(nd.y[1], 46.2985965)
s = self.get_structure("Li10GeP2S12")
nd = c.get_pattern(s, two_theta_range=(0, 90))
self.assertAlmostEqual(nd.x[1], 14.058274883353876)
self.assertAlmostEqual(nd.y[1], 3.60588013)
# Test a hexagonal structure.
s = self.get_structure("Graphite")
nd = c.get_pattern(s, two_theta_range=(0, 90))
self.assertAlmostEqual(nd.x[0], 26.21057350859598)
self.assertAlmostEqual(nd.y[0], 100)
self.assertAlmostEqual(nd.x[2], 44.39599754)
self.assertAlmostEqual(nd.y[2], 42.62382267)
self.assertAlmostEqual(len(nd.hkls[0][0].keys()), 2)
# Test an exception in case of the input element is
# not in scattering length table.
# This curium structure is just for test, not the actual structure.
something = Structure(Lattice.cubic(a=1), ["Cm"], [[0, 0, 0]])
with self.assertRaises(ValueError):
nd = c.get_pattern(something, two_theta_range=(0, 90))
# Test with Debye-Waller factor
s = self.get_structure("Graphite")
c = NDCalculator(wavelength=1.54184, debye_waller_factors={"C": 1})
nd = c.get_pattern(s, two_theta_range=(0, 90))
self.assertAlmostEqual(nd.x[0], 26.21057350859598)
self.assertAlmostEqual(nd.y[0], 100)
self.assertAlmostEqual(nd.x[2], 44.39599754)
self.assertAlmostEqual(nd.y[2], 39.471514740)
def test_get_plot(self):
s = self.get_structure("Graphite")
c = NDCalculator(wavelength=1.54184, debye_waller_factors={"C": 1})
c.get_plot(s, two_theta_range=(0, 90))
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/analysis/diffraction/tests/test_neutron.py
|
Python
|
mit
| 3,130
|
[
"pymatgen"
] |
919753020b480cf530635610dbbc113584f8a5694b5fb1f04bcf3bb1a5fbf29c
|
from ase import *
from ase.lattice import bulk
from ase.dft.kpoints import monkhorst_pack
from gpaw import *
from gpaw.mpi import serial_comm
from gpaw.test import equal
from gpaw.xc.rpa import RPACorrelation
import numpy as np
a0 = 5.43
cell = bulk('Si', 'fcc', a=a0).get_cell()
Si = Atoms('Si2', cell=cell, pbc=True,
scaled_positions=((0,0,0), (0.25,0.25,0.25)))
kpts = monkhorst_pack((2,2,2))
kpts += np.array([1/4., 1/4., 1/4.])
calc = GPAW(mode='pw',
kpts=kpts,
occupations=FermiDirac(0.001),
communicator=serial_comm)
Si.set_calculator(calc)
E = Si.get_potential_energy()
calc.diagonalize_full_hamiltonian(nbands=50)
ecut = 50
rpa = RPACorrelation(calc, qsym=False, nfrequencies=8)
E_rpa_noqsym = rpa.calculate(ecut=[ecut])
rpa = RPACorrelation(calc, qsym=True, nfrequencies=8)
E_rpa_qsym = rpa.calculate(ecut=[ecut])
equal(E_rpa_qsym, E_rpa_noqsym, 0.001)
equal(E_rpa_qsym, -12.61, 0.01)
|
robwarm/gpaw-symm
|
gpaw/test/rpa_energy_Si.py
|
Python
|
gpl-3.0
| 948
|
[
"ASE",
"GPAW"
] |
b7577e89552b587adf42a8ac9cdc58e5335baca30a92150ba6bc158f3e87770d
|
# this script starts a new AIMS calculation. Ethylene, SA2-CASSCF(2/2).
import numpy as np
import pyspawn
import pyspawn.general
# terachemserver port
port = 54322
# random number seed
seed=87061
# Velocity Verlet classical propagator
clas_prop = "vv"
# adapative 2nd-order Runge-Kutta quantum propagator
qm_prop = "rk2"
# adiabtic NPI quantum Hamiltonian
qm_ham = "adiabatic"
# use TeraChem CASSCF or CASCI to compute potentials
potential = "terachem_cas"
# initial time
t0 = 0.0
# time step
ts = 10.0
# final simulation time
tfinal = 8000.0
# number of dimensions
numdims = 18
# number of electronic states
numstates = 2
# TeraChem job options
tc_options = {
"method": 'hf',
"basis": '6-31g',
"atoms": ["C", "C", "H", "H", "H", "H"],
"charge": 0,
"spinmult": 1,
"closed_shell": True,
"restricted": True,
"precision": "double",
"threall": 1.0e-20,
"casci": "yes",
"fon": "yes",
"closed": 7,
"active": 2,
"cassinglets": 2,
"castargetmult": 1,
"cas_energy_states": [0, 1],
"cas_energy_mults": [1, 1],
}
# trajectory parameters
traj_params = {
# terachem port
"tc_port": port,
# initial time
"time": t0,
# time step
"timestep": ts,
# final simulation time
"maxtime": tfinal,
# coupling threshhold
"spawnthresh": (0.5 * np.pi) / ts / 20.0,
# initial electronic state (indexed such that 0 is the ground state)
"istate": 1,
# Gaussian widths
"widths": np.asarray([30.0, 30.0, 30.0,
30.0, 30.0, 30.0,
6.0, 6.0, 6.0,
6.0, 6.0, 6.0,
6.0, 6.0, 6.0,
6.0, 6.0, 6.0]),
# atom labels
"atoms": tc_options["atoms"],
# nuclear masses (in a.u)
"masses": np.asarray([21864.0, 21864.0, 21864.0,
21864.0, 21864.0, 21864.0,
1822.0, 1822.0, 1822.0,
1822.0, 1822.0, 1822.0,
1822.0, 1822.0, 1822.0,
1822.0, 1822.0, 1822.0]),
# terachem options (above)
"tc_options": tc_options
}
sim_params = {
# initial time
"quantum_time": traj_params["time"],
# time step
"timestep": traj_params["timestep"],
# final simulation time
"max_quantum_time": traj_params["maxtime"],
# initial qm amplitudes
"qm_amplitudes": np.ones(1,dtype=np.complex128),
# energy shift used in quantum propagation
"qm_energy_shift": 77.6,
}
# import routines needed for propagation
exec("pyspawn.import_methods.into_simulation(pyspawn.qm_integrator." + qm_prop + ")")
exec("pyspawn.import_methods.into_simulation(pyspawn.qm_hamiltonian." + qm_ham + ")")
exec("pyspawn.import_methods.into_traj(pyspawn.potential." + potential + ")")
exec("pyspawn.import_methods.into_traj(pyspawn.classical_integrator." + clas_prop + ")")
# check for the existence of files from a past run
pyspawn.general.check_files()
# set up first trajectory
traj1 = pyspawn.traj(numdims, numstates)
traj1.set_numstates(numstates)
traj1.set_numdims(numdims)
traj1.set_parameters(traj_params)
# sample initial position and momentum from Wigner distribution (requires hessian.hdf5)
traj1.initial_wigner(seed)
# set up simulation
sim = pyspawn.simulation()
sim.add_traj(traj1)
sim.set_parameters(sim_params)
# begin propagation
sim.propagate()
|
blevine37/pySpawn17
|
examples/ethylene_fomocasci/start_c2h4.py
|
Python
|
mit
| 3,778
|
[
"Gaussian",
"TeraChem"
] |
58f781f192ddb8d99f87449bdb257d7cf0d718fb01c3a6f29cb240cdfae5f075
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkXYZMolReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkXYZMolReader(), 'Reading vtkXYZMol.',
(), ('vtkXYZMol',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkXYZMolReader.py
|
Python
|
bsd-3-clause
| 476
|
[
"VTK"
] |
d0662bc5a751109b90968060f25f15c49f37f0028aaaca42db1cc05f36516ab5
|
"""
Unit page in Studio
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, Promise
from . import BASE_URL
from .container import ContainerPage
class UnitPage(PageObject):
"""
Unit page in Studio
"""
NAME_SELECTOR = '#unit-display-name-input'
def __init__(self, browser, unit_locator):
super(UnitPage, self).__init__(browser)
self.unit_locator = unit_locator
@property
def url(self):
"""URL to the pages UI in a course."""
return "{}/unit/{}".format(BASE_URL, self.unit_locator)
def is_browser_on_page(self):
def _is_finished_loading():
# Wait until all components have been loaded
number_of_leaf_xblocks = len(self.q(css='{} .xblock-author_view,.xblock-student_view'.format(Component.BODY_SELECTOR)).results)
is_done = len(self.q(css=Component.BODY_SELECTOR).results) == number_of_leaf_xblocks
return (is_done, is_done)
# First make sure that an element with the view-unit class is present on the page,
# and then wait to make sure that the xblocks are all there
return (
self.q(css='body.view-unit').present and
Promise(_is_finished_loading, 'Finished rendering the xblocks in the unit.').fulfill()
)
@property
def name(self):
return self.q(css=self.NAME_SELECTOR).attrs('value')[0]
@property
def components(self):
"""
Return a list of components loaded on the unit page.
"""
return self.q(css=Component.BODY_SELECTOR).map(
lambda el: Component(self.browser, el.get_attribute('data-locator'))).results
def edit_draft(self):
"""
Started editing a draft of this unit.
"""
EmptyPromise(
lambda: self.q(css='.create-draft').present,
'Wait for edit draft link to be present'
).fulfill()
self.q(css='.create-draft').first.click()
EmptyPromise(
lambda: self.q(css='.editing-draft-alert').present,
'Wait for draft mode to be activated'
).fulfill()
def set_unit_visibility(self, visibility):
"""
Set unit visibility state
Arguments:
visibility (str): private or public
"""
self.q(css='select[name="visibility-select"] option[value="{}"]'.format(visibility)).first.click()
self.wait_for_ajax()
selector = '.edit-button'
if visibility == 'private':
check_func = lambda: self.q(css=selector).visible
elif visibility == 'public':
check_func = lambda: not self.q(css=selector).visible
EmptyPromise(check_func, 'Unit Visibility is {}'.format(visibility)).fulfill()
COMPONENT_BUTTONS = {
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'save_settings': '.action-save',
}
class Component(PageObject):
"""
A PageObject representing an XBlock child on the Studio UnitPage (including
the editing controls).
"""
url = None
BODY_SELECTOR = '.component'
NAME_SELECTOR = '.component-header'
def __init__(self, browser, locator):
super(Component, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def preview_selector(self):
return self._bounded_selector('.xblock-author_view,.xblock-student_view')
def edit(self):
"""
Clicks the "edit" button for the first component on the page.
Same as the implementation in unit.py, unit and component pages will be merging.
"""
self.q(css=self._bounded_selector('.edit-button')).first.click()
EmptyPromise(
lambda: self.q(css='.xblock-studio_view').present,
'Wait for the Studio editor to be present'
).fulfill()
return self
@property
def editor_selector(self):
return '.xblock-studio_view'
def go_to_container(self):
"""
Open the container page linked to by this component, and return
an initialized :class:`.ContainerPage` for that xblock.
"""
return ContainerPage(self.browser, self.locator).visit()
def _click_button(self, button_name):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
"""
self.q(css=COMPONENT_BUTTONS[button_name]).first.click()
self.wait_for_ajax()
def open_advanced_tab(self):
"""
Click on Advanced Tab.
"""
self._click_button('advanced_tab')
def save_settings(self):
"""
Click on settings Save button.
"""
self._click_button('save_settings')
def go_to_group_configuration_page(self):
"""
Go to the Group Configuration used by the component.
"""
self.q(css=self._bounded_selector('span.message-text a')).first.click()
@property
def group_configuration_link_name(self):
"""
Get Group Configuration name from link.
"""
return self.q(css=self._bounded_selector('span.message-text a')).first.text[0]
|
carsongee/edx-platform
|
common/test/acceptance/pages/studio/unit.py
|
Python
|
agpl-3.0
| 5,826
|
[
"VisIt"
] |
34e826354453e415a64983d3bf978a0f2f4171bae83761b2fd6c51c75c7062e6
|
__author__ = 'abel'
import montemodes.functions.calculate as calc
method_function = {
1: calc.get_energy_from_tinker,
2: calc.get_energy_from_gaussian
}
class gaussian:
def __init__(self,
methodology='pm3',
internal=False,
memory=None,
processors=None,
binary='g09'):
self._methodology = methodology
self._memory = memory
self._processors = processors
self._internal = internal
self._binary = binary
def single_point(self, molecule):
return calc.get_energy_from_gaussian(molecule,
calculation=self._methodology,
internal=self._internal,
processors=self._processors,
binary=self._binary)
def vibrations(self, molecule):
modes, energy = calc.get_modes_from_gaussian(molecule,
calculation=self._methodology,
binary=self._binary)
return modes, energy
@property
def internal(self):
return self._internal
@internal.setter
def internal(self, internal):
self._internal = internal
class tinker:
def __init__(self,
parameter_set='mm3.prm',
num_modes=None):
self._parameter_set = parameter_set
self._num_modes = num_modes
def single_point(self, molecule):
return calc.get_energy_from_tinker(molecule, force_field=self._parameter_set)
def vibrations(self, molecule):
modes = calc.get_modes_from_tinker(molecule,
force_field=self._parameter_set,
num_modes=self._num_modes)
energy = None
return modes, energy
|
abelcarreras/MonteModes
|
montemodes/functions/methods.py
|
Python
|
mit
| 1,986
|
[
"Gaussian",
"TINKER"
] |
a86795783b4bc545ba5072d92ba597a4488fd7d9dc01d3c62c5c2c53eba079f2
|
from django.test import SimpleTestCase
from unittest.mock import patch
from corehq.apps.app_manager.const import (
AUTO_SELECT_USERCASE,
WORKFLOW_CASE_LIST,
WORKFLOW_MODULE,
)
from corehq.apps.app_manager.models import (
AdvancedModule,
AdvancedOpenCaseAction,
Application,
AutoSelectCase,
ConditionalCaseUpdate,
DetailColumn,
LoadUpdateAction,
Module,
OpenCaseAction,
PreloadAction,
)
from corehq.apps.app_manager.tests.app_factory import AppFactory
from corehq.apps.app_manager.tests.util import patch_get_xform_resource_overrides, TestXmlMixin
from corehq.util.test_utils import flag_enabled
@patch('corehq.apps.app_manager.helpers.validators.domain_has_privilege', return_value=True)
@patch('corehq.apps.builds.models.BuildSpec.supports_j2me', return_value=False)
@patch_get_xform_resource_overrides()
class CaseListFormSuiteTests(SimpleTestCase, TestXmlMixin):
file_path = ('data', 'case_list_form')
def _prep_case_list_form_app(self):
return AppFactory.case_list_form_app_factory()
def test_case_list_registration_form(self, *args):
factory = self._prep_case_list_form_app()
app = factory.app
case_module = app.get_module(0)
case_module.case_list_form.set_icon('en', 'jr://file/commcare/image/new_case.png')
case_module.case_list_form.set_audio('en', 'jr://file/commcare/audio/new_case.mp3')
self.assertXmlEqual(self.get_xml('case-list-form-suite'), app.create_suite())
def test_case_list_registration_form_usercase(self, *args):
factory = self._prep_case_list_form_app()
app = factory.app
register_module = app.get_module(1)
register_form = register_module.get_form(0)
register_form.actions.usercase_preload = PreloadAction(preload={'/data/question1': 'question1'})
register_form.actions.usercase_preload.condition.type = 'always'
self.assertXmlEqual(self.get_xml('case-list-form-suite-usercase'), app.create_suite())
def test_case_list_registration_form_end_for_form_nav(self, *args):
factory = self._prep_case_list_form_app()
app = factory.app
registration_form = app.get_module(1).get_form(0)
registration_form.post_form_workflow = WORKFLOW_MODULE
self.assertXmlPartialEqual(
self.get_xml('case-list-form-suite-form-nav-entry'),
app.create_suite(),
"./entry[2]"
)
def test_case_list_registration_form_no_media(self, *args):
factory = self._prep_case_list_form_app()
self.assertXmlPartialEqual(
self.get_xml('case-list-form-suite-no-media-partial'),
factory.app.create_suite(),
"./detail[@id='m0_case_short']/action"
)
def test_case_list_form_multiple_modules(self, *args):
factory = self._prep_case_list_form_app()
case_module1 = factory.app.get_module(0)
case_module2, update2 = factory.new_basic_module('update case 2', case_module1.case_type)
factory.form_requires_case(update2)
case_module2.case_list_form.form_id = factory.get_form(1, 0).unique_id
case_module2.case_list_form.label = {
'en': 'New Case'
}
self.assertXmlEqual(
self.get_xml('case-list-form-suite-multiple-references'),
factory.app.create_suite(),
)
def test_case_list_registration_form_advanced(self, *args):
factory = AppFactory(build_version='2.9.0')
register_module, register_form = factory.new_advanced_module('register_dugong', 'dugong')
factory.form_opens_case(register_form)
case_module, update_form = factory.new_advanced_module('update_dugong', 'dugong')
factory.form_requires_case(update_form)
case_module.case_list_form.form_id = register_form.get_unique_id()
case_module.case_list_form.label = {
'en': 'Register another Dugong'
}
self.assertXmlEqual(self.get_xml('case-list-form-advanced'), factory.app.create_suite())
def test_case_list_registration_form_advanced_autoload(self, *args):
factory = AppFactory(build_version='2.9.0')
register_module, register_form = factory.new_advanced_module('register_dugong', 'dugong')
factory.form_opens_case(register_form)
register_form.actions.load_update_cases.append(LoadUpdateAction(
case_tag='usercase',
auto_select=AutoSelectCase(
mode=AUTO_SELECT_USERCASE,
)
))
case_module, update_form = factory.new_advanced_module('update_dugong', 'dugong')
factory.form_requires_case(update_form)
case_module.case_list_form.form_id = register_form.get_unique_id()
case_module.case_list_form.label = {
'en': 'Register another Dugong'
}
self.assertXmlEqual(self.get_xml('case-list-form-advanced-autoload'), factory.app.create_suite())
def test_case_list_registration_form_return_to_case_list(self, *args):
factory = self._prep_case_list_form_app()
app = factory.app
case_module = app.get_module(0)
case_module.case_list_form.post_form_workflow = WORKFLOW_CASE_LIST
self.assertXmlEqual(self.get_xml('case_list_form_end_of_form_case_list'), app.create_suite())
def test_case_list_registration_form_return_to_case_list_clmi_only(self, *args):
factory = self._prep_case_list_form_app()
app = factory.app
clmi_module = factory.new_basic_module('clmi_only', factory.app.get_module(0).case_type, with_form=False)
case_module = app.get_module(0)
case_module.case_list_form.post_form_workflow = WORKFLOW_CASE_LIST
clmi_module.case_list_form.form_id = case_module.case_list_form.form_id
clmi_module.case_list_form.post_form_workflow = WORKFLOW_CASE_LIST
clmi_module.case_list.show = True
#self.assertXmlEqual(self.get_xml('case_list_form_end_of_form_case_list_clmi_only'), app.create_suite())
self.assertXmlPartialEqual(self.get_xml('case_list_form_end_of_form_case_list_clmi_only'),
factory.app.create_suite(), './entry[2]')
def test_case_list_form_parent_child_advanced(self, *args):
# * Register house (case type = house, basic)
# * Register house form
# * Register person (case type = person, parent select = 'Register house', advanced)
# * Register person form
# * Manager person (case type = person, case list form = 'Register person form', basic)
# * Manage person form
factory = AppFactory(build_version='2.9.0')
register_house_module, register_house_form = factory.new_basic_module('register_house', 'house')
factory.form_opens_case(register_house_form)
register_person_module, register_person_form = factory.new_advanced_module('register_person', 'person')
factory.form_requires_case(register_person_form, 'house')
factory.form_opens_case(register_person_form, 'person', is_subcase=True)
person_module, update_person_form = factory.new_basic_module(
'update_person',
'person',
case_list_form=register_person_form
)
person_module.parent_select.active = True
person_module.parent_select.module_id = register_house_module.unique_id
factory.form_requires_case(update_person_form)
self.assertXmlEqual(self.get_xml('case-list-form-suite-parent-child-advanced'), factory.app.create_suite())
def test_case_list_form_followup_form(self, *args):
# * Register house (case type = house, basic)
# * Register house form
# * Register person (case type = person, parent select = 'Register house', advanced)
# * Register person form
# * Manager person (case type = person, case list form = 'Register person form', basic)
# * Manage person form
factory = AppFactory(build_version='2.9.0')
register_house_module, register_house_form = factory.new_basic_module('register_house', 'house')
factory.form_opens_case(register_house_form)
register_person_module, register_person_form = factory.new_advanced_module('register_person', 'person')
factory.form_requires_case(register_person_form, 'house')
factory.form_opens_case(register_person_form, 'person', is_subcase=True)
house_module, update_house_form = factory.new_advanced_module(
'update_house',
'house',
)
factory.form_requires_case(update_house_form)
person_module, update_person_form = factory.new_basic_module(
'update_person',
'person',
case_list_form=update_house_form
)
factory.form_requires_case(update_person_form)
def _assert_app_build_error(error):
errors = factory.app.validate_app()
self.assertIn(
(error, person_module.unique_id),
[(e["type"], e.get("module", {}).get("unique_id", {})) for e in errors]
)
self.assertXmlPartialEqual(
"<partial></partial>",
factory.app.create_suite(),
"./detail[@id='m3_case_short']/action"
)
# should fail since feature flag isn't enabled
_assert_app_build_error('case list form not registration')
with flag_enabled('FOLLOWUP_FORMS_AS_CASE_LIST_FORM'):
# should fail since module doesn't have active parent_select
_assert_app_build_error("invalid case list followup form")
person_module.parent_select.active = True
person_module.parent_select.module_id = register_house_module.unique_id
person_module.case_list_form.relevancy_expression = "count(instance('casedb')/casedb/case) != 0"
errors = factory.app.validate_app()
self.assertNotIn(
('case list form not registration', person_module.unique_id),
[(e["type"], e.get("module", {}).get("unique_id", {})) for e in errors]
)
xml = """
<partial>
<action relevant="count(instance('casedb')/casedb/case) != 0">
<display>
<text><locale id="case_list_form.m3"/></text>
</display>
<stack>
<push>
<command value="'m2-f0'"/>
<datum id="case_id_load_house_0" value="instance('commcaresession')/session/data/parent_id"/>
<datum id="return_to" value="'m3'"/>
</push>
</stack>
</action>
</partial>
"""
self.assertXmlPartialEqual(
xml,
factory.app.create_suite(),
"./detail[@id='m3_case_short']/action"
)
person_module.parent_select.active = False
def test_case_list_form_parent_child_basic(self, *args):
# * Register house (case type = house, basic)
# * Register house form
# * Register person (case type = person, parent select = 'Register house', basic)
# * Register person form
# * Manager person (case type = person, case list form = 'Register person form', basic)
# * Manage person form
factory = AppFactory(build_version='2.9.0')
register_house_module, register_house_form = factory.new_basic_module('register_house', 'house')
factory.form_opens_case(register_house_form)
register_person_module, register_person_form = factory.new_basic_module('register_person', 'house')
factory.form_requires_case(register_person_form)
factory.form_opens_case(register_person_form, case_type='person', is_subcase=True)
person_module, update_person_form = factory.new_basic_module(
'update_person',
'person',
case_list_form=register_person_form
)
factory.form_requires_case(update_person_form, parent_case_type='house')
register_person_module.case_details.short.columns.append(
DetailColumn(
header={'en': 'a'},
model='case',
field='parent/case_name', # Include a parent case property in the case list
format='plain',
)
)
self.assertXmlEqual(self.get_xml('case-list-form-suite-parent-child-basic'), factory.app.create_suite())
def test_case_list_form_reg_form_creates_child_case(self, *args):
factory = AppFactory(build_version='2.9.0')
register_person_module, register_person_form = factory.new_basic_module('reg_person_and_stub', 'person')
factory.form_opens_case(register_person_form)
factory.form_opens_case(register_person_form, case_type='stub', is_subcase=True)
person_module, update_person_form = factory.new_basic_module(
'update_person',
'person',
case_list_form=register_person_form
)
factory.form_requires_case(update_person_form)
self.assertXmlPartialEqual(
self.get_xml('case_list_form_reg_form_creates_child_case'), factory.app.create_suite(), './entry[1]'
)
def test_case_list_form_parent_child_submodule_basic(self, *args):
# * Register house (case type = house, basic)
# * Register house form
# * Register person (case type = person, parent select = 'Register house', basic)
# * Register person form
# * Update house (case type = house, case list form = 'Register house')
# * Update house form
# * Update person (case type = person, case list form = 'Register person form', basic, parent module = 'Update house')
# * Update person form
factory = AppFactory(build_version='2.9.0')
register_house_module, register_house_form = factory.new_basic_module('register_house', 'house')
factory.form_opens_case(register_house_form)
register_person_module, register_person_form = factory.new_basic_module('register_person', 'house')
factory.form_requires_case(register_person_form, 'house')
factory.form_opens_case(register_person_form, 'person', is_subcase=True)
house_module, update_house_form = factory.new_basic_module(
'update_house',
'house',
case_list_form=register_house_form
)
factory.form_requires_case(update_house_form)
person_module, update_person_form = factory.new_basic_module(
'update_person',
'person',
parent_module=house_module,
case_list_form=register_person_form
)
factory.form_requires_case(update_person_form, 'person', parent_case_type='house')
self.assertXmlEqual(self.get_xml('case-list-form-suite-parent-child-submodule-basic'), factory.app.create_suite())
def test_case_list_form_parent_child_submodule_advanced(self, *args):
# * Register house (case type = house, basic)
# * Register house form
# * Register person (case type = person, parent select = 'Register house', advanced)
# * Register person form
# * Update house (case type = house, case list form = 'Register house')
# * Update house form
# * Update person (case type = person, case list form = 'Register person form', advanced, parent module = 'Update house')
# * Update person form
factory = AppFactory(build_version='2.9.0')
register_house_module, register_house_form = factory.new_basic_module('register_house', 'house')
factory.form_opens_case(register_house_form)
register_person_module, register_person_form = factory.new_advanced_module('register_person', 'person')
factory.form_requires_case(register_person_form, 'house')
factory.form_opens_case(register_person_form, 'person', is_subcase=True)
house_module, update_house_form = factory.new_advanced_module(
'update_house',
'house',
case_list_form=register_house_form
)
factory.form_requires_case(update_house_form)
person_module, update_person_form = factory.new_advanced_module(
'update_person',
'person',
parent_module=house_module,
case_list_form=register_person_form
)
factory.form_requires_case(update_person_form, 'house')
factory.form_requires_case(update_person_form, 'person', parent_case_type='house')
self.assertXmlEqual(self.get_xml('case-list-form-suite-parent-child-submodule-advanced'), factory.app.create_suite())
def test_case_list_form_parent_child_submodule_advanced_rename_case_var(self, *args):
# Test that the session vars in the entries for the submodule get updated
# to match the parent (and to avoid naming conflicts).
# m3-f0: 'case_id_load_house' -> 'case_id_load_house_renamed'
# m3-f0: 'case_id_load_house_renamed' -> 'case_id_load_house_renamed_person'
factory = AppFactory(build_version='2.9.0')
register_house_module, register_house_form = factory.new_basic_module('register_house', 'house')
factory.form_opens_case(register_house_form)
register_person_module, register_person_form = factory.new_advanced_module('register_person', 'person')
factory.form_requires_case(register_person_form, 'house')
factory.form_opens_case(register_person_form, 'person', is_subcase=True)
house_module, update_house_form = factory.new_advanced_module(
'update_house',
'house',
case_list_form=register_house_form
)
factory.form_requires_case(update_house_form)
# changing this case tag should result in the session var in the submodule entry being updated to match it
update_house_form.actions.load_update_cases[0].case_tag = 'load_house_renamed'
person_module, update_person_form = factory.new_advanced_module(
'update_person',
'person',
parent_module=house_module,
case_list_form=register_person_form
)
factory.form_requires_case(update_person_form, 'house')
factory.form_requires_case(update_person_form, 'person', parent_case_type='house')
# making this case tag the same as the one in the parent module should mean that it will also
# get changed to avoid conflicts
update_person_form.actions.load_update_cases[1].case_tag = 'load_house_renamed'
self.assertXmlEqual(self.get_xml('case-list-form-suite-parent-child-submodule-advanced-rename-var'), factory.app.create_suite())
def test_case_list_form_parent_child_submodule_mixed(self, *args):
# * Register house (case type = house, basic)
# * Register house form
# * Register person (case type = person, parent select = 'Register house', advanced)
# * Register person form
# * Update house (case type = house, case list form = 'Register house')
# * Update house form
# * Update person (case type = person, case list form = 'Register person form', advanced, parent module = 'Update house')
# * Update person form
factory = AppFactory(build_version='2.9.0')
register_house_module, register_house_form = factory.new_basic_module('register_house', 'house')
factory.form_opens_case(register_house_form)
register_person_module, register_person_form = factory.new_basic_module('register_person', 'house')
factory.form_requires_case(register_person_form, 'house')
factory.form_opens_case(register_person_form, 'person', is_subcase=True)
house_module, update_house_form = factory.new_advanced_module(
'update_house',
'house',
case_list_form=register_house_form
)
factory.form_requires_case(update_house_form)
person_module, update_person_form = factory.new_basic_module(
'update_person',
'person',
parent_module=house_module,
case_list_form=register_person_form
)
factory.form_requires_case(update_person_form, 'person', parent_case_type='house')
self.assertXmlEqual(self.get_xml('case-list-form-suite-parent-child-submodule-mixed'), factory.app.create_suite())
def test_target_module_different_datums(self, *args):
# * Registration
# * Register patient form
# * open case (patient), update usercase
# * Visits (case type = patient, case list form = 'Register patient')
# * Visit form
# * update case, open child case (visit), load from usercase
# * Record notes
# * update case, open child case (visit)
# * Update patient
# * update case
factory = AppFactory(build_version='2.9.0')
registration_module, registration_form = factory.new_basic_module('registration', 'patient')
factory.form_opens_case(registration_form)
factory.form_uses_usercase(registration_form, preload={'username': '/data/username'})
visit_module, visit_form = factory.new_basic_module(
'visits',
'patient',
case_list_form=registration_form
)
factory.form_requires_case(visit_form)
factory.form_opens_case(visit_form, 'visit', is_subcase=True)
factory.form_uses_usercase(visit_form, preload={'username': '/data/username'})
notes_form = factory.new_form(visit_module)
factory.form_requires_case(notes_form)
factory.form_opens_case(notes_form, 'visit', is_subcase=True)
update_patient_form = factory.new_form(visit_module)
factory.form_requires_case(update_patient_form)
self.assertXmlPartialEqual(
self.get_xml('target_module_different_datums'),
factory.app.create_suite(),
'./entry')
def test_case_list_form_requires_parent_case_but_target_doesnt(self, *args):
factory = AppFactory(build_version='2.9.0')
register_household_module, register_household_form = factory.new_basic_module('new_household', 'household')
factory.form_opens_case(register_household_form)
households, edit_household_form = factory.new_basic_module('households', 'household',
case_list_form=register_household_form)
factory.form_requires_case(edit_household_form)
register_member_module, register_member_form = factory.new_advanced_module('new_member', 'member')
factory.form_requires_case(register_member_form, 'household')
factory.form_opens_case(register_member_form, 'member', is_subcase=True)
members, edit_member_form = factory.new_basic_module('members', 'member', case_list_form=register_member_form)
factory.form_requires_case(edit_member_form)
suite = factory.app.create_suite()
self.assertXmlEqual(
self.get_xml('source_requires_case_target_doesnt'),
suite
)
class CaseListFormFormTests(SimpleTestCase, TestXmlMixin):
file_path = 'data', 'case_list_form'
def setUp(self):
self.is_usercase_in_use_patch = patch('corehq.apps.app_manager.models.is_usercase_in_use')
self.is_usercase_in_use_patch.start()
self.app = Application.new_app('domain', 'New App')
self.app.version = 3
def tearDown(self):
self.is_usercase_in_use_patch.stop()
def _add_module_and_form(self, ModuleClass):
self.module = self.app.add_module(ModuleClass.new_module('New Module', lang='en'))
self.module.case_type = 'test_case_type'
self.form = self.module.new_form("Untitled Form", "en",
self.get_xml('original_form', override_path=('data',)).decode('utf-8'))
def test_case_list_form_basic(self):
self._add_module_and_form(Module)
self.form.actions.open_case = OpenCaseAction(
name_update=ConditionalCaseUpdate(question_path="/data/question1"),
external_id=None
)
self.form.actions.open_case.condition.type = 'always'
self.module.case_list_form.form_id = self.form.get_unique_id()
self.assertXmlEqual(self.get_xml('case_list_form_basic_form'), self.form.render_xform())
def test_case_list_form_advanced(self):
self._add_module_and_form(AdvancedModule)
self.form.actions.open_cases.append(AdvancedOpenCaseAction(
case_type=self.module.case_type,
case_tag='open_1',
name_update=ConditionalCaseUpdate(question_path="/data/question1"),
))
self.form.actions.open_cases[0].open_condition.type = 'always'
self.module.case_list_form.form_id = self.form.get_unique_id()
self.assertXmlEqual(self.get_xml('case_list_form_advanced_form'), self.form.render_xform())
|
dimagi/commcare-hq
|
corehq/apps/app_manager/tests/test_case_list_form.py
|
Python
|
bsd-3-clause
| 25,183
|
[
"VisIt"
] |
3c1c0ba460c998faafa5432a781942bf5abde86dfbc47bf6ceeceeee81654ade
|
#!/usr/bin/env python
# ---------------------------------------------------------------------------
# Licensing Information: You are free to use or extend these projects for
# education or reserach purposes provided that (1) you retain this notice
# and (2) you provide clear attribution to UC Berkeley, including a link
# to http://barc-project.com
#
# Attibution Information: The barc project ROS code-base was developed
# at UC Berkeley in the Model Predictive Control (MPC) lab by Jon Gonzales
# (jon.gonzales@berkeley.edu). The cloud services integation with ROS was developed
# by Kiet Lam (kiet.lam@berkeley.edu). The web-server app Dator was
# based on an open source project by Bruce Wootton
# ---------------------------------------------------------------------------
import rospy
import time
from numpy import sqrt
from numpy import pi, cos, sin, tan, arctan
from numpy import eye, array, zeros, unwrap, diag, copy, dot
from scipy.linalg import inv
from tf import transformations
class EncoderModel:
def __init__(self):
# encoder measurement variables
self.vhat_m1 = 0
self.vhat_m2 = 0
self.t0_m1 = 0
self.t0_m2 = 0
self.r_tire = 0.05 # radius of the tire
self.ang_km1 = 0
self.ang_km2 = 0
self.s_m1 = 0 # distance travelled
self.s_m2 = 0 # distance travelled
# This call takes the encoder tick counts as input and returns a filtered velocity estimate
# M1 = method 1
def estimateVelocityM1(self,data):
n_FL = data.FL
n_FR = data.FR
n_BL = data.BL
n_BR = data.BR
# compute the average encoder measurement
n_mean = (n_FL + n_FR)/2
# transfer the encoder measurement to angular displacement
ang_mean = n_mean*2*pi/8
# compute time elapsed
tf = time.time()
dt = tf - self.t0_m1
# compute speed with second-order, backwards-finite-difference estimate
# compute distance
self.vhat_m1 = self.r_tire*(ang_mean - 4*self.ang_km1 + 3*self.ang_km2)/(dt)
self.s_m1 += self.vhat_m1*dt
# update
self.ang_km2 = self.ang_km1
self.ang_km1 = ang_mean
self.t0_m1 = time.time()
# This callback takes velocity estimates directly as input from the arduino and filters them
# M2 = method 2
def estimateVelocityM2(self,data):
# compute time elapsed
tf = time.time()
dt = tf - self.t0_m2
# get velocity estimates per wheel
v_FL = data.FL
v_FR = data.FR
v_BL = data.BL
v_BR = data.BR
# compute speed and distance travelled
self.vhat_m2 = (v_BL + v_BR)/2
self.s_m2 += self.vhat_m2 * dt
self.t0_m2 = time.time()
class ImuModel:
def __init__(self):
# units: [rad] and [rad/s]
# orientation (roll, pitch, yaw), angular velocity, linear acceleration
self.r = 0
self.p = 0
self.y = 0
self.wx = 0
self.wy = 0
self.wz = 0
self.ax = 0
self.ay = 0
self.az = 0
# planar acceleration
self.a = 0
# history
self.y0 = None
self.y_prev = 0
# amount of angle deviation from first yaw measurement
self.dy = 0.0
self.dy_deg = 0.0
def updateEstimates(self, data):
# get orientation from quaternion data, and convert to roll, pitch, yaw
# extract angular velocity and linear acceleration data
ori = data.orientation
quaternion = (ori.x, ori.y, ori.z, ori.w)
(r, p, y) = transformations.euler_from_quaternion(quaternion)
# save initial measurements
if self.y0 == None:
self.y0 = y
# unwrap measurement
self.y = unwrap(array([self.y_prev, y]), discont = pi)[1]
self.y_prev = self.y
self.dy = self.y - self.y0
self.dy_deg = self.dy*(180.0/pi)
# save orientation, angular velocity, linear acceleration data
self.r = r
self.p = p
self.wx = data.angular_velocity.x
self.wy = data.angular_velocity.y
self.wz = data.angular_velocity.z
self.ax = data.linear_acceleration.x
self.ay = data.linear_acceleration.y
self.az = data.linear_acceleration.z
self.a = sqrt( self.ax**2 + self.ay**2 )
class GPS:
def __init__(self, dt=0.1):
self.x = 0
self.y = 0
self.t = 0
self.x_prev = None
self.y_prev = None
self.t_prev = None
self.vx = None
self.vy = None
def updateEstimates(self, data):
self.t = data.timestamp_ms / 1000.0
self.x = data.x_m
self.y = data.y_m
if self.t_prev != None:
self.vx = (self.x - self.x_prev) / (self.t - self.t_prev)
self.vy = (self.y - self.y_prev) / (self.t - self.t_prev)
self.x_prev = self.x
self.y_prev = self.y
self.t_prev = self.t
# state estimation node
class Observer():
def __init__(self, dt=0.1):
# state vector
self.zhat = array([0.0, 0.0, 0.0, 0.0]) # z = [x,y,psi,v]
self.xDim = self.zhat.size
# input vector
self.u = array([0.0, 0.0]) # u = [df, a]
# model parameters
self.La = 0.16 # meters
self.Lb = 0.16 # meters
self.dt = dt
# sensors models
self.imu = ImuModel()
self.enc = EncoderModel()
self.y = array([0.0, 0.0]) # y = [psi, v]
# ekf parameters
self.P = eye(self.xDim)
self.Q = diag([0.1,0.1,0.01,0.5])
self.R = diag([0.01,0.1])
def imuCallback(self, data):
self.imu.updateEstimates(data)
self.y[0] = self.imu.dy
self.u[1] = self.imu.a
def encCallback(self, data):
self.enc.estimateVelocityM1(data)
self.y[1] = self.enc.vhat_m1
def controllerCallback(self, data):
# get steering angle using steering map
df_pwm = data.servo
self.u[0] = -0.0012*df_pwm + 1.8962
def getStateEstimate(self):
self.ekf()
return self.zhat
def f_KinBkMdl(self, z, u):
# get states / inputs / parameters
x = z[0]
y = z[1]
psi = z[2]
v = z[3]
df = u[0]
a = u[1]
La = self.La
Lb = self.Lb
dt = self.dt
# compute slip angle
bta = arctan( La / (La + Lb) * tan(df) )
# compute prediction
x_next = x + dt*( v*cos(psi + bta) )
y_next = y + dt*( v*sin(psi + bta) )
psi_next = psi + dt*v/Lb*sin(bta)
v_next = v + dt*a
return array([x_next, y_next, psi_next, v_next])
def h_KinBkMdl(self, z, u):
C = array([[0,0,1,0],[0,0,0,1]])
return dot(C, z)
def ekf(self):
"""
EKF Extended Kalman Filter for nonlinear dynamic systems
ekf(f,mx,P,h,z,Q,R) returns state estimate, x and state covariance, P
for nonlinear dynamic system:
x_k+1 = f(x_k) + w_k
y_k = h(x_k) + v_k
where w ~ N(0,Q) meaning w is gaussian noise with covariance Q
v ~ N(0,R) meaning v is gaussian noise with covariance R
Inputs: f: function handle for f(x)
mx_k: "a priori" state estimate
P_k: "a priori" estimated state covariance
h: fanction handle for h(x)
y_kp1: current measurement
Q: process noise covariance
R: measurement noise covariance
args: additional arguments to f(x, *args)
Output: mx_kp1: "a posteriori" state estimate
P_kp1: "a posteriori" state covariance
Notation: mx_k = E[x_k] and my_k = E[y_k], where m stands for "mean of"
"""
f = self.f_KinBkMdl
h = self.h_KinBkMdl
mx_k = self.zhat
u_k = self.u
y_kp1 = self.y
P_k = self.P
Q = self.Q
R = self.R
xDim = mx_k.size # dimension of the state
mx_kp1 = f(mx_k, u_k) # predict next state
A = self.numerical_jac(f, mx_k, u_k) # linearize process model about current state
P_kp1 = dot(dot(A,P_k),A.T) + Q # proprogate variance
my_kp1 = h(mx_kp1, u_k) # predict future output
H = self.numerical_jac(h, mx_kp1, u_k) # linearize measurement model about predicted next state
P12 = dot(P_kp1, H.T) # cross covariance
K = dot(P12, inv( dot(H,P12) + R)) # Kalman filter gain
mx_kp1 = mx_kp1 + dot(K,(y_kp1 - my_kp1)) # state estimate
P_kp1 = dot(dot(K,R),K.T) + dot( dot( (eye(xDim) - dot(K,H)) , P_kp1) , (eye(xDim) - dot(K,H)).T )
self.zhat = mx_kp1
self.P = P_kp1
def numerical_jac(self, f, x, u):
# numerical gradient and diagonal hessian
y = f(x,u)
jac = zeros( (y.size,x.size) )
eps = 1e-5
xp = copy(x)
for i in range(x.size):
xp[i] = x[i] + eps/2.0
yhi = f(xp, u)
xp[i] = x[i] - eps/2.0
ylo = f(xp, u)
xp[i] = x[i]
jac[:,i] = (yhi - ylo) / eps
return jac
|
BARCproject/barc
|
workspace/src/labs/src/lab8/observer.py
|
Python
|
mit
| 10,153
|
[
"Gaussian"
] |
9b4bcffc396031887296c5b8fba78752f499f721208418df26b8016f47486dff
|
#!python
# ----------------------------------------
# scikit-ribo
# ----------------------------------------
# pre-processing module
# ----------------------------------------
# author: Han Fang
# contact: hanfang.cshl@gmail.com
# website: hanfang.github.io
# date: 3/28/2017
# ----------------------------------------
from __future__ import print_function, division
import os
import sys
import argparse
import pybedtools as pbt
import pysam
import pandas as pd
import numpy as np
import csv
import errno
from datetime import datetime
import scikit_ribo
from gtf_preprocess import GtfPreProcess
from process_rnafold import ProcessRnafold
from merge_df import MergeDF
def log_status(gtf_fn, ref_fn, prefix, rnafold_fn, tpm_fn, out_dir):
"""
Logging the status
:param gtf_fn: str, gtf file
:param ref_fn: str, ref fasta file
:param prefix: str, prefix
:param rnafold_fn: str, rnafold file path
:param tpm_fn: str, tpm file path
:param out_dir: str, output directory
:return: None
"""
# create output folder
cmd = 'mkdir -p ' + out_dir
os.system(cmd)
print("[status]\tStarted the pre-processing module", file=sys.stderr)
print("[status]\tImport the gtf file: " + gtf_fn, file=sys.stderr)
print("[status]\tImport the ref genome fasta file: " + ref_fn, file=sys.stderr)
print("[status]\tImport RNAfold file: " + rnafold_fn, file=sys.stderr)
print("[status]\tImport TPM file of RNAseq sample: " + tpm_fn, file=sys.stderr)
print("[setting]\tPrefix to use: " + prefix, file=sys.stderr)
print("[setting]\tOutput path: " + out_dir, file=sys.stderr)
sys.stderr.flush()
def module_gtf(gtf_fn, ref_fn, prefix, out_dir):
"""
Module for processing gtf and ref
:param gtf_fn: str
:param ref_fn: str
:param prefix: str
:param out_dir: str
:return: None
"""
worker = GtfPreProcess(gtf_fn, ref_fn, prefix, out_dir)
print("[execute]\tLoading the the gtf file in to sql db", file=sys.stderr)
worker.convertGtf()
print("[execute]\tCalculating the length of each chromosome", file=sys.stderr)
worker.getChrLen()
print("[execute]\tExtracting the start codons' positions from the gtf db", file=sys.stderr)
worker.getStartCodon()
print("[execute]\tExtracting the sequences for each gene", file=sys.stderr)
worker.getSeq()
print("[execute]\tBuilding the index for each position at the codon level", file=sys.stderr)
worker.getCodons()
worker.getNts()
print("[execute]\tCreating the codon table for the coding region", file=sys.stderr)
worker.createCodonTable()
print("[status]\tGtf processing module finished", file=sys.stderr)
sys.stderr.flush()
def module_merge(prefix, tpm_fn, rnafold_fn, out_dir):
"""
merge data
:param prefix: prefix for the files
:param tpm_fn: tmp file name
:param rnafold_fn: rnafold file path
:param out_dir: output directory
:return:
"""
bed = out_dir + "/" + prefix + ".codons.bed"
# execute
dat = MergeDF(bed, rnafold_fn, tpm_fn, out_dir)
print("[execute]\tTransforming the dataframe of RNA secondary structure pairing probabilities", file=sys.stderr)
dat.transformPairProb()
print("[execute]\tLoading tpm", file=sys.stderr)
dat.loadTpm()
print("[execute]\tMerging all the df together", file=sys.stderr)
dat.mergeDf()
sys.stderr.flush()
def scikit_ribo_build(gtf_fn, ref_fn, prefix, rnafold_fn, tpm_fn, out):
"""
:param gtf_fn:
:param ref_fn:
:param prefix:
:param rnafold_fn:
:param tpm_fn:
:param out:
:return: None
"""
log_status(gtf_fn, ref_fn, prefix, rnafold_fn, tpm_fn, out)
module_gtf(gtf_fn, ref_fn, prefix, out)
module_merge(prefix, tpm_fn, rnafold_fn, out)
print("[status]\tPre-processing module finished", file=sys.stderr)
sys.stderr.flush()
# ----------------------------------------
# parse input arguments
# ----------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-g", help="Gtf file, required")
parser.add_argument("-f", help="Fasta file, required")
parser.add_argument("-p", help="Prefix to use, required")
parser.add_argument("-r", help="Path to the Rnafold file, required")
parser.add_argument("-t", help="TPM of RNAseq sample, required")
parser.add_argument("-o", help="Output path of the built indexes, required")
# check if there is any argument
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
if len(sys.argv) <= 1:
parser.print_usage()
sys.exit(1)
else:
args = parser.parse_args()
# process the file if the input files exist
if args.g != None and args.f != None and args.p != None and args.r != None and args.t != None and args.o != None:
sys.stderr.write("[status]\tReading the input file: " + args.g + "\n")
gtf = args.g
fasta = args.f
pre = args.p
rnafold = args.r
tpm = args.t
output = args.o
scikit_ribo_build(gtf, fasta, pre, rnafold, tpm, output)
else:
print("[error]\tmissing argument", file=sys.stderr)
parser.print_usage()
|
hanfang/scikit-ribo
|
build/scripts-3.4/scikit-ribo-build.py
|
Python
|
gpl-2.0
| 5,237
|
[
"pysam"
] |
031db0d76b32b41812b12f9d0d5cbd6be269af1ddd7d5e9fb42a07969cf225cc
|
from __future__ import unicode_literals
import json
import logging
import numbers
import phonenumbers
import pytz
import regex
import time
import urllib2
import xlwt
import re
from collections import OrderedDict, defaultdict
from datetime import timedelta
from decimal import Decimal
from django.conf import settings
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.files.temp import NamedTemporaryFile
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Group
from django.db import models, connection
from django.db.models import Q, Count, QuerySet, Sum
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _, ungettext_lazy as _n
from django.utils.html import escape
from enum import Enum
from redis_cache import get_redis_connection
from smartmin.models import SmartModel
from temba.airtime.models import AirtimeTransfer
from temba.contacts.models import Contact, ContactGroup, ContactField, ContactURN, URN, TEL_SCHEME, NEW_CONTACT_VARIABLE
from temba.channels.models import Channel
from temba.locations.models import AdminBoundary, STATE_LEVEL, DISTRICT_LEVEL, WARD_LEVEL
from temba.msgs.models import Broadcast, Msg, FLOW, INBOX, INCOMING, QUEUED, INITIALIZING, HANDLED, SENT, Label, PENDING
from temba.msgs.models import OUTGOING, UnreachableException
from temba.orgs.models import Org, Language, UNREAD_FLOW_MSGS, CURRENT_EXPORT_VERSION
from temba.utils import get_datetime_format, str_to_datetime, datetime_to_str, analytics, json_date_to_datetime, chunk_list
from temba.utils.email import send_template_email, is_valid_address
from temba.utils.models import TembaModel, ChunkIterator, generate_uuid
from temba.utils.profiler import SegmentProfiler
from temba.utils.queues import push_task
from temba.values.models import Value
from twilio import twiml
from uuid import uuid4
logger = logging.getLogger(__name__)
FLOW_DEFAULT_EXPIRES_AFTER = 60 * 12
START_FLOW_BATCH_SIZE = 500
class FlowException(Exception):
def __init__(self, *args, **kwargs):
super(FlowException, self).__init__(*args, **kwargs)
FLOW_LOCK_TTL = 60 # 1 minute
FLOW_LOCK_KEY = 'org:%d:lock:flow:%d:%s'
FLOW_PROP_CACHE_KEY = 'org:%d:cache:flow:%d:%s'
FLOW_PROP_CACHE_TTL = 24 * 60 * 60 * 7 # 1 week
FLOW_STAT_CACHE_KEY = 'org:%d:cache:flow:%d:%s'
UNREAD_FLOW_RESPONSES = 'unread_flow_responses'
# the most frequently we will check if our cache needs rebuilding
FLOW_STAT_CACHE_FREQUENCY = 24 * 60 * 60 # 1 day
class FlowLock(Enum):
"""
Locks that are flow specific
"""
participation = 1
activity = 2
definition = 3
class FlowPropsCache(Enum):
"""
Properties of a flow that we cache
"""
terminal_nodes = 1
category_nodes = 2
class FlowStatsCache(Enum):
"""
Stats we calculate and cache for flows
"""
runs_started_count = 1 # deprecated, no longer used
runs_completed_count = 2 # deprecated, no longer used
contacts_started_set = 3 # deprecated, no longer used
visit_count_map = 4
step_active_set = 5
cache_check = 6
def edit_distance(s1, s2): # pragma: no cover
"""
Compute the Damerau-Levenshtein distance between two given
strings (s1 and s2)
"""
# if first letters are different, infinite distance
if s1 and s2 and s1[0] != s2[0]:
return 100
d = {}
lenstr1 = len(s1)
lenstr2 = len(s2)
for i in xrange(-1, lenstr1 + 1):
d[(i, -1)] = i + 1
for j in xrange(-1, lenstr2 + 1):
d[(-1, j)] = j + 1
for i in xrange(0, lenstr1):
for j in xrange(0, lenstr2):
if s1[i] == s2[j]:
cost = 0
else:
cost = 1
d[(i, j)] = min(
d[(i - 1, j)] + 1, # deletion
d[(i, j - 1)] + 1, # insertion
d[(i - 1, j - 1)] + cost, # substitution
)
if i > 1 and j > 1 and s1[i] == s2[j - 1] and s1[i - 1] == s2[j]:
d[(i, j)] = min(d[(i, j)], d[i - 2, j - 2] + cost) # transposition
return d[lenstr1 - 1, lenstr2 - 1]
class Flow(TembaModel):
UUID = 'uuid'
ENTRY = 'entry'
RULE_SETS = 'rule_sets'
ACTION_SETS = 'action_sets'
RULES = 'rules'
CONFIG = 'config'
ACTIONS = 'actions'
DESTINATION = 'destination'
LABEL = 'label'
WEBHOOK_URL = 'webhook'
WEBHOOK_ACTION = 'webhook_action'
FINISHED_KEY = 'finished_key'
RULESET_TYPE = 'ruleset_type'
OPERAND = 'operand'
METADATA = 'metadata'
BASE_LANGUAGE = 'base_language'
SAVED_BY = 'saved_by'
VERSION = 'version'
CONTACT_CREATION = 'contact_creation'
CONTACT_PER_RUN = 'run'
CONTACT_PER_LOGIN = 'login'
SAVED_ON = 'saved_on'
NAME = 'name'
REVISION = 'revision'
VERSION = 'version'
FLOW_TYPE = 'flow_type'
ID = 'id'
EXPIRES = 'expires'
X = 'x'
Y = 'y'
FLOW = 'F'
MESSAGE = 'M'
VOICE = 'V'
SURVEY = 'S'
RULES_ENTRY = 'R'
ACTIONS_ENTRY = 'A'
FLOW_TYPES = ((FLOW, _("Message flow")),
(MESSAGE, _("Single Message Flow")),
(VOICE, _("Phone call flow")),
(SURVEY, _("Android Survey")))
ENTRY_TYPES = ((RULES_ENTRY, "Rules"),
(ACTIONS_ENTRY, "Actions"))
name = models.CharField(max_length=64,
help_text=_("The name for this flow"))
labels = models.ManyToManyField('FlowLabel', related_name='flows', verbose_name=_("Labels"), blank=True,
help_text=_("Any labels on this flow"))
org = models.ForeignKey(Org, related_name='flows')
entry_uuid = models.CharField(null=True, max_length=36, unique=True)
entry_type = models.CharField(max_length=1, null=True, choices=ENTRY_TYPES,
help_text=_("The type of node this flow starts with"))
is_archived = models.BooleanField(default=False,
help_text=_("Whether this flow is archived"))
flow_type = models.CharField(max_length=1, choices=FLOW_TYPES, default=FLOW,
help_text=_("The type of this flow"))
metadata = models.TextField(null=True, blank=True,
help_text=_("Any extra metadata attached to this flow, strictly used by the user interface."))
expires_after_minutes = models.IntegerField(default=FLOW_DEFAULT_EXPIRES_AFTER,
help_text=_("Minutes of inactivity that will cause expiration from flow"))
ignore_triggers = models.BooleanField(default=False,
help_text=_("Ignore keyword triggers while in this flow"))
saved_on = models.DateTimeField(auto_now_add=True,
help_text=_("When this item was saved"))
saved_by = models.ForeignKey(User, related_name="flow_saves",
help_text=_("The user which last saved this flow"))
base_language = models.CharField(max_length=4, null=True, blank=True,
help_text=_('The primary language for editing this flow'),
default='base')
version_number = models.IntegerField(default=CURRENT_EXPORT_VERSION,
help_text=_("The flow version this definition is in"))
@classmethod
def create(cls, org, user, name, flow_type=FLOW, expires_after_minutes=FLOW_DEFAULT_EXPIRES_AFTER, base_language=None):
flow = Flow.objects.create(org=org, name=name, flow_type=flow_type,
expires_after_minutes=expires_after_minutes, base_language=base_language,
saved_by=user, created_by=user, modified_by=user)
analytics.track(user.username, 'nyaruka.flow_created', dict(name=name))
return flow
@classmethod
def create_single_message(cls, org, user, message):
"""
Creates a special 'single message' flow
"""
name = 'Single Message (%s)' % unicode(uuid4())
flow = Flow.create(org, user, name, flow_type=Flow.MESSAGE)
flow.update_single_message_flow(message)
return flow
@classmethod
def label_to_slug(cls, label):
return regex.sub(r'[^a-z0-9]+', '_', label.lower(), regex.V0)
@classmethod
def create_join_group(cls, org, user, group, response=None, start_flow=None):
"""
Creates a special 'join group' flow
"""
base_language = org.primary_language.iso_code if org.primary_language else 'base'
name = Flow.get_unique_name(org, 'Join %s' % group.name)
flow = Flow.create(org, user, name, base_language=base_language)
uuid = unicode(uuid4())
actions = [dict(type='add_group', group=dict(uuid=group.uuid, name=group.name)),
dict(type='save', field='name', label='Contact Name', value='@(PROPER(REMOVE_FIRST_WORD(step.value)))')]
if response:
actions += [dict(type='reply', msg={base_language: response})]
if start_flow:
actions += [dict(type='flow', flow=dict(uuid=start_flow.uuid, name=start_flow.name))]
action_sets = [dict(x=100, y=0, uuid=uuid, actions=actions)]
flow.update(dict(entry=uuid, base_language=base_language,
rule_sets=[], action_sets=action_sets))
return flow
@classmethod
def import_flows(cls, exported_json, org, user, same_site=False):
"""
Import flows from our flow export file
"""
created_flows = []
flow_uuid_map = dict()
# create all the flow containers first
for flow_spec in exported_json['flows']:
FlowRevision.validate_flow_definition(flow_spec)
flow_type = flow_spec.get('flow_type', Flow.FLOW)
name = flow_spec['metadata']['name'][:64].strip()
flow = None
# Don't create our campaign message flows, we'll do that later
# this check is only needed up to version 3 of exports
if flow_type != Flow.MESSAGE:
# check if we can find that flow by id first
if same_site:
flow = Flow.objects.filter(org=org, is_active=True, uuid=flow_spec['metadata']['uuid']).first()
if flow:
flow.expires_after_minutes = flow_spec['metadata'].get('expires', FLOW_DEFAULT_EXPIRES_AFTER)
flow.name = Flow.get_unique_name(org, name, ignore=flow)
flow.save(update_fields=['name', 'expires_after_minutes'])
# if it's not of our world, let's try by name
if not flow:
flow = Flow.objects.filter(org=org, is_active=True, name=name).first()
# if there isn't one already, create a new flow
if not flow:
flow = Flow.create(org, user, Flow.get_unique_name(org, name), flow_type=flow_type,
expires_after_minutes=flow_spec['metadata'].get('expires', FLOW_DEFAULT_EXPIRES_AFTER))
created_flows.append(dict(flow=flow, flow_spec=flow_spec))
if 'uuid' in flow_spec['metadata']:
flow_uuid_map[flow_spec['metadata']['uuid']] = flow.uuid
# now let's update our flow definitions with any referenced flows
def remap_flow(element):
# first map our id accordingly
if element['uuid'] in flow_uuid_map:
element['uuid'] = flow_uuid_map[element['uuid']]
existing_flow = Flow.objects.filter(uuid=element['uuid'], org=org, is_active=True).first()
if not existing_flow:
existing_flow = Flow.objects.filter(org=org, name=element['name'], is_active=True).first()
if existing_flow:
element['uuid'] = existing_flow.uuid
for created in created_flows:
for ruleset in created['flow_spec'][Flow.RULE_SETS]:
if ruleset['ruleset_type'] == RuleSet.TYPE_SUBFLOW:
remap_flow(ruleset['config']['flow'])
for actionset in created['flow_spec'][Flow.ACTION_SETS]:
for action in actionset['actions']:
if action['type'] in ['flow', 'trigger-flow']:
remap_flow(action['flow'])
remap_flow(created['flow_spec']['metadata'])
created['flow'].import_definition(created['flow_spec'])
# remap our flow ids according to how they were resolved
if 'campaigns' in exported_json:
for campaign in exported_json['campaigns']:
for event in campaign['events']:
if 'flow' in event:
flow_uuid = event['flow']['uuid']
if flow_uuid in flow_uuid_map:
event['flow']['uuid'] = flow_uuid_map[flow_uuid]
if 'triggers' in exported_json:
for trigger in exported_json['triggers']:
if 'flow' in trigger:
flow_uuid = trigger['flow']['uuid']
if flow_uuid in flow_uuid_map:
trigger['flow']['uuid'] = flow_uuid_map[flow_uuid]
return exported_json
@classmethod
def copy(cls, flow, user):
copy = Flow.create(flow.org, user, "Copy of %s" % flow.name[:55], flow_type=flow.flow_type)
# grab the json of our original
flow_json = flow.as_json()
copy.import_definition(flow_json)
# copy our expiration as well
copy.expires_after_minutes = flow.expires_after_minutes
copy.save()
return copy
@classmethod
def get_node(cls, flow, uuid, destination_type):
if not uuid or not destination_type:
return None
if destination_type == FlowStep.TYPE_RULE_SET:
return RuleSet.get(flow, uuid)
else:
return ActionSet.get(flow, uuid)
@classmethod
def handle_call(cls, call, user_response=None, hangup=False):
if not user_response:
user_response = {}
flow = call.flow
run = FlowRun.objects.filter(call=call).first()
# make sure we have the latest version
flow.ensure_current_version()
# what we will send back
voice_response = twiml.Response()
run.voice_response = voice_response
# make sure our test contact is handled by simulation
if call.contact.is_test:
Contact.set_simulation(True)
# parse the user response
text = user_response.get('Digits', None)
media_url = user_response.get('RecordingUrl', None)
# if we've been sent a recording, go grab it
if media_url:
media_url = call.channel.get_ivr_client().download_media(media_url)
# create a message to hold our inbound message
from temba.msgs.models import IVR
if text is not None or media_url:
# we don't have text for media, so lets use the media value there too
if media_url and ':' in media_url:
text = media_url.partition(':')[2]
msg = Msg.create_incoming(call.channel, call.contact_urn.urn,
text, status=PENDING, msg_type=IVR, media=media_url)
else:
msg = Msg(org=call.org, contact=call.contact, text='', id=0)
# find out where we last left off
step = run.steps.all().order_by('-arrived_on').first()
# if we are just starting the flow, create our first step
if not step:
# lookup our entry node
destination = ActionSet.objects.filter(flow=run.flow, uuid=flow.entry_uuid).first()
if not destination:
destination = RuleSet.objects.filter(flow=run.flow, uuid=flow.entry_uuid).first()
# and add our first step for our run
if destination:
step = flow.add_step(run, destination, [], call=call)
# go and actually handle wherever we are in the flow
destination = Flow.get_node(run.flow, step.step_uuid, step.step_type)
(handled, msgs) = Flow.handle_destination(destination, step, run, msg, user_input=text is not None)
# if we stopped needing user input (likely), then wrap our response accordingly
voice_response = Flow.wrap_voice_response_with_input(call, run, voice_response)
# if we handled it, increment our unread count
if handled and not call.contact.is_test:
run.flow.increment_unread_responses()
if msg.id:
Msg.mark_handled(msg)
# if we didn't handle it, this is a good time to hangup
if not handled or hangup:
voice_response.hangup()
run.set_completed(final_step=step)
return voice_response
@classmethod
def wrap_voice_response_with_input(cls, call, run, voice_response):
""" Finds where we are in the flow and wraps our voice_response with whatever comes next """
step = run.steps.all().order_by('-pk').first()
destination = Flow.get_node(run.flow, step.step_uuid, step.step_type)
if isinstance(destination, RuleSet):
response = twiml.Response()
callback = 'https://%s%s' % (settings.TEMBA_HOST, reverse('ivr.ivrcall_handle', args=[call.pk]))
gather = destination.get_voice_input(response, action=callback)
# recordings have to be tacked on last
if destination.ruleset_type == RuleSet.TYPE_WAIT_RECORDING:
voice_response.record(action=callback)
elif gather:
# nest all of our previous verbs in our gather
for verb in voice_response.verbs:
gather.append(verb)
voice_response = response
# append a redirect at the end in case the user sends #
voice_response.append(twiml.Redirect(url=callback + "?empty=1"))
return voice_response
@classmethod
def get_unique_name(cls, org, base_name, ignore=None):
"""
Generates a unique flow name based on the given base name
"""
name = base_name[:64].strip()
count = 2
while True:
flows = Flow.objects.filter(name=name, org=org, is_active=True)
if ignore:
flows = flows.exclude(pk=ignore.pk)
if not flows.exists():
break
name = '%s %d' % (base_name[:59].strip(), count)
count += 1
return name
@classmethod
def find_and_handle(cls, msg, started_flows=None, voice_response=None,
triggered_start=False, resume_parent_run=False, resume_after_timeout=False):
if started_flows is None:
started_flows = []
steps = FlowStep.get_active_steps_for_contact(msg.contact, step_type=FlowStep.TYPE_RULE_SET)
for step in steps:
flow = step.run.flow
flow.ensure_current_version()
destination = Flow.get_node(flow, step.step_uuid, step.step_type)
# this node doesn't exist anymore, mark it as left so they leave the flow
if not destination: # pragma: no cover
step.run.set_completed(final_step=step)
continue
(handled, msgs) = Flow.handle_destination(destination, step, step.run, msg, started_flows,
user_input=True, triggered_start=triggered_start,
resume_parent_run=resume_parent_run,
resume_after_timeout=resume_after_timeout)
if handled:
# increment our unread count if this isn't the simulator
if not msg.contact.is_test:
flow.increment_unread_responses()
return True
return False
@classmethod
def handle_destination(cls, destination, step, run, msg,
started_flows=None, is_test_contact=False, user_input=False,
triggered_start=False, trigger_send=True, resume_parent_run=False, resume_after_timeout=False):
if started_flows is None:
started_flows = []
def add_to_path(path, uuid):
if uuid in path:
path.append(uuid)
raise FlowException("Flow cycle detected at runtime: %s" % path)
path.append(uuid)
start_time = time.time()
path = []
msgs = []
# lookup our next destination
handled = False
while destination:
result = {handled: False}
if destination.get_step_type() == FlowStep.TYPE_RULE_SET:
should_pause = False
# check if we need to stop
if destination.is_pause() or msg.status == HANDLED:
should_pause = True
if (user_input or resume_after_timeout) or not should_pause:
result = Flow.handle_ruleset(destination, step, run, msg, started_flows, resume_parent_run, resume_after_timeout)
add_to_path(path, destination.uuid)
# if we used this input, then mark our user input as used
if should_pause:
user_input = False
# once we handle user input, reset our path
path = []
elif destination.get_step_type() == FlowStep.TYPE_ACTION_SET:
result = Flow.handle_actionset(destination, step, run, msg, started_flows, is_test_contact)
add_to_path(path, destination.uuid)
# add any generated messages to be sent at once
msgs += result['msgs']
# if this is a triggered start, we only consider user input on the first step, so clear it now
if triggered_start:
user_input = False
# pull out our current state from the result
step = result.get('step')
# lookup our next destination
destination = result.get('destination', None)
# if any one of our destinations handled us, consider it handled
if result.get('handled', False):
handled = True
resume_parent_run = False
resume_after_timeout = False
if handled:
analytics.gauge('temba.flow_execution', time.time() - start_time)
# send any messages generated
if msgs and trigger_send:
msgs.sort(key=lambda message: message.created_on)
run.flow.org.trigger_send(msgs)
return handled, msgs
@classmethod
def handle_actionset(cls, actionset, step, run, msg, started_flows, is_test_contact=False):
# not found, escape out, but we still handled this message, user is now out of the flow
if not actionset: # pragma: no cover
run.set_completed(final_step=step)
return dict(handled=True, destination=None, destination_type=None)
# actually execute all the actions in our actionset
msgs = actionset.execute_actions(run, msg, started_flows)
for msg in msgs:
step.add_message(msg)
# and onto the destination
destination = Flow.get_node(actionset.flow, actionset.destination, actionset.destination_type)
if destination:
arrived_on = timezone.now()
step.left_on = arrived_on
step.next_uuid = destination.uuid
step.save(update_fields=['left_on', 'next_uuid'])
step = run.flow.add_step(run, destination, previous_step=step, arrived_on=arrived_on)
else:
run.set_completed(final_step=step)
step = None
return dict(handled=True, destination=destination, step=step, msgs=msgs)
@classmethod
def handle_ruleset(cls, ruleset, step, run, msg, started_flows, resume_parent_run=False, resume_after_timeout=False):
if ruleset.ruleset_type == RuleSet.TYPE_SUBFLOW:
if not resume_parent_run:
flow_uuid = json.loads(ruleset.config).get('flow').get('uuid')
flow = Flow.objects.filter(org=run.org, uuid=flow_uuid).first()
message_context = run.flow.build_message_context(run.contact, msg)
# our extra will be the current flow variables
extra = message_context.get('extra', {})
extra['flow'] = message_context.get('flow', {})
if msg.id > 0:
step.add_message(msg)
run.update_expiration(timezone.now())
if flow:
flow.start([], [run.contact], started_flows=started_flows, restart_participants=True,
extra=extra, parent_run=run, interrupt=False)
return dict(handled=True, destination=None, destination_type=None)
# find a matching rule
rule, value = ruleset.find_matching_rule(step, run, msg, resume_after_timeout=resume_after_timeout)
flow = ruleset.flow
# add the message to our step
if msg.id > 0:
step.add_message(msg)
run.update_expiration(timezone.now())
if ruleset.ruleset_type in RuleSet.TYPE_MEDIA:
# store the media path as the value
value = msg.media.split(':', 1)[1]
step.save_rule_match(rule, value)
ruleset.save_run_value(run, rule, value)
# output the new value if in the simulator
if run.contact.is_test:
ActionLog.create(run, _("Saved '%s' as @flow.%s") % (value, Flow.label_to_slug(ruleset.label)))
# no destination for our rule? we are done, though we did handle this message, user is now out of the flow
if not rule.destination:
# log it for our test contacts
run.set_completed(final_step=step)
return dict(handled=True, destination=None, destination_type=None)
# Create the step for our destination
destination = Flow.get_node(flow, rule.destination, rule.destination_type)
if destination:
arrived_on = timezone.now()
step.left_on = arrived_on
step.next_uuid = rule.destination
step.save(update_fields=['left_on', 'next_uuid'])
step = flow.add_step(run, destination, rule=rule.uuid, category=rule.category, previous_step=step)
return dict(handled=True, destination=destination, step=step)
@classmethod
def apply_action_label(cls, user, flows, label, add):
return label.toggle_label(flows, add)
@classmethod
def apply_action_archive(cls, user, flows):
changed = []
for flow in flows:
flow.archive()
changed.append(flow.pk)
return changed
@classmethod
def apply_action_restore(cls, user, flows):
changed = []
for flow in flows:
try:
flow.restore()
changed.append(flow.pk)
except FlowException: # pragma: no cover
pass
return changed
@classmethod
def build_flow_context(cls, flow, contact, contact_context=None):
"""
Get a flow context built on the last run for the contact in the given flow
"""
date_format = get_datetime_format(flow.org.get_dayfirst())[1]
tz = pytz.timezone(flow.org.timezone)
# wrapper around our value dict, lets us do a nice representation of both @flow.foo and @flow.foo.text
def value_wrapper(value):
values = dict(text=value['text'],
time=datetime_to_str(value['time'], format=date_format, tz=tz),
category=flow.get_localized_text(value['category'], contact),
value=unicode(value['rule_value']))
values['__default__'] = unicode(value['rule_value'])
return values
flow_context = {}
values = []
if contact:
results = flow.get_results(contact, only_last_run=True)
if results and results[0]:
for value in results[0]['values']:
field = Flow.label_to_slug(value['label'])
flow_context[field] = value_wrapper(value)
values.append("%s: %s" % (value['label'], value['rule_value']))
flow_context['__default__'] = "\n".join(values)
# if we don't have a contact context, build one
if not contact_context:
flow_context['contact'] = contact.build_message_context()
return flow_context
def as_select2(self):
return dict(id=self.uuid, text=self.name)
def release(self):
"""
Releases this flow, marking it inactive. We remove all flow runs, steps and values in a background process.
We keep FlowRevisions and FlowStarts however.
"""
from .tasks import delete_flow_results_task
self.is_active = False
self.save()
# release any campaign events that depend on this flow
from temba.campaigns.models import CampaignEvent
for event in CampaignEvent.objects.filter(flow=self, is_active=True):
event.release()
# release any triggers that depend on this flow
from temba.triggers.models import Trigger
for trigger in Trigger.objects.filter(flow=self, is_active=True):
trigger.release()
# delete our results in the background
delete_flow_results_task.delay(self.id)
def delete_results(self):
"""
Removes all flow runs, values and steps for a flow.
"""
# grab the ids of all our runs
run_ids = self.runs.all().values_list('id', flat=True)
# in chunks of 1000, remove any values or flowsteps associated with these runs
# we keep Runs around for auditing purposes
for chunk in chunk_list(run_ids, 1000):
Value.objects.filter(run__in=chunk).delete()
FlowStep.objects.filter(run__in=chunk).delete()
# clear all our cached stats
self.clear_props_cache()
self.clear_stats_cache()
def clear_props_cache(self):
r = get_redis_connection()
keys = [self.get_props_cache_key(c) for c in FlowPropsCache.__members__.values()]
r.delete(*keys)
def clear_stats_cache(self):
r = get_redis_connection()
keys = [self.get_stats_cache_key(c) for c in FlowStatsCache.__members__.values()]
r.delete(*keys)
def get_props_cache_key(self, kind):
return FLOW_PROP_CACHE_KEY % (self.org_id, self.pk, kind.name)
def get_stats_cache_key(self, kind, item=None):
name = kind
if hasattr(kind, 'name'):
name = kind.name
cache_key = FLOW_STAT_CACHE_KEY % (self.org_id, self.pk, name)
if item:
cache_key += (':%s' % item)
return cache_key
def calculate_active_step_keys(self):
"""
Returns a list of UUIDs for all ActionSets and RuleSets on this flow.
:return:
"""
# first look up any action set uuids
steps = list(self.action_sets.values('uuid'))
# then our ruleset uuids
steps += list(self.rule_sets.values('uuid'))
# extract just the uuids
return [self.get_stats_cache_key(FlowStatsCache.step_active_set, step['uuid']) for step in steps]
def lock_on(self, lock, qualifier=None, lock_ttl=None):
"""
Creates the requested type of flow-level lock
"""
r = get_redis_connection()
lock_key = FLOW_LOCK_KEY % (self.org_id, self.pk, lock.name)
if qualifier:
lock_key += (":%s" % qualifier)
if not lock_ttl:
lock_ttl = FLOW_LOCK_TTL
return r.lock(lock_key, lock_ttl)
def do_calculate_flow_stats(self, lock_ttl=None):
r = get_redis_connection()
# activity
with self.lock_on(FlowLock.activity, lock_ttl=lock_ttl):
(active, visits) = self._calculate_activity()
# remove our old active cache
keys = self.calculate_active_step_keys()
if keys:
r.delete(*keys)
r.delete(self.get_stats_cache_key(FlowStatsCache.visit_count_map))
# add current active cache
for step, runs in active.items():
for run in runs:
r.sadd(self.get_stats_cache_key(FlowStatsCache.step_active_set, step), run)
if len(visits):
r.hmset(self.get_stats_cache_key(FlowStatsCache.visit_count_map), visits)
def _calculate_activity(self, simulation=False):
"""
Calculate our activity stats from the database. This is expensive. It should only be run
for simulation or in an async task to rebuild the activity cache
"""
# who is actively at each step
steps = FlowStep.objects.values('run__pk', 'step_uuid').filter(run__is_active=True, run__flow=self, left_on=None, run__contact__is_test=simulation).annotate(count=Count('run_id'))
active = {}
for step in steps:
step_id = step['step_uuid']
if step_id not in active:
active[step_id] = {step['run__pk']}
else:
active[step_id].add(step['run__pk'])
# need to be a list for json
for key, value in active.items():
active[key] = list(value)
visits = {}
visited_actions = FlowStep.objects.values('step_uuid', 'next_uuid').filter(run__flow=self, step_type='A', run__contact__is_test=simulation).annotate(count=Count('run_id'))
visited_rules = FlowStep.objects.values('rule_uuid', 'next_uuid').filter(run__flow=self, step_type='R', run__contact__is_test=simulation).exclude(rule_uuid=None).annotate(count=Count('run_id'))
# where have people visited
for step in visited_actions:
if step['next_uuid'] and step['count']:
visits['%s:%s' % (step['step_uuid'], step['next_uuid'])] = step['count']
for step in visited_rules:
if step['next_uuid'] and step['count']:
visits['%s:%s' % (step['rule_uuid'], step['next_uuid'])] = step['count']
return (active, visits)
def _check_for_cache_update(self):
"""
Checks if we have a redis cache for our flow stats, or whether they need to be updated.
If so, triggers an async rebuild of the cache for our flow.
"""
from .tasks import check_flow_stats_accuracy_task
r = get_redis_connection()
# don't do the more expensive check if it was performed recently
cache_check = self.get_stats_cache_key(FlowStatsCache.cache_check)
if r.exists(cache_check):
return
# don't check again for a day or so, add up to an hour of randomness
# to spread things out a bit
import random
r.set(cache_check, 1, FLOW_STAT_CACHE_FREQUENCY + random.randint(0, 60 * 60))
# check flow stats for accuracy, rebuilding if necessary
check_flow_stats_accuracy_task.delay(self.pk)
def get_activity(self, simulation=False, check_cache=True):
"""
Get the activity summary for a flow as a tuple of the number of active runs
at each step and a map of the previous visits
"""
if simulation:
(active, visits) = self._calculate_activity(simulation=True)
# we want counts not actual run ids
for key, value in active.items():
active[key] = len(value)
return (active, visits)
if check_cache:
self._check_for_cache_update()
r = get_redis_connection()
# get all possible active keys
keys = self.calculate_active_step_keys()
active = {}
for key in keys:
count = r.scard(key)
# only include stats for steps that actually have people there
if count:
active[key[key.rfind(':') + 1:]] = count
# visited path
visited = r.hgetall(self.get_stats_cache_key(FlowStatsCache.visit_count_map))
# make sure our counts are treated as ints for consistency
for k, v in visited.items():
visited[k] = int(v)
return (active, visited)
def get_total_runs(self):
return FlowRunCount.run_count(self)
def get_base_text(self, language_dict, default=''):
if not isinstance(language_dict, dict): # pragma: no cover
return language_dict
if self.base_language:
return language_dict.get(self.base_language, default)
return default # pragma: no cover
def get_localized_text(self, text_translations, contact=None, default_text=''):
"""
Given a language dict and a preferred language, return the best possible text match
:param text_translations: The text in all supported languages, or string (which will just return immediately)
:param contact: the contact we are interacting with
:param default_text: What to use if all else fails
:return: the localized text
"""
org_languages = {l.iso_code for l in self.org.languages.all()}
# We return according to the following precedence:
# 1) Contact's language (if it's a valid org language)
# 2) Org Primary Language
# 3) Flow Base Language
# 4) Default Text
preferred_languages = []
if contact and contact.language and contact.language in org_languages:
preferred_languages.append(contact.language)
if self.org.primary_language:
preferred_languages.append(self.org.primary_language.iso_code)
preferred_languages.append(self.base_language)
return Language.get_localized_text(text_translations, preferred_languages, default_text)
def update_run_expirations(self):
"""
Update all of our current run expirations according to our new expiration period
"""
for step in FlowStep.objects.filter(run__flow=self, run__is_active=True, left_on=None).distinct('run'):
step.run.update_expiration(step.arrived_on)
# force an expiration update
from temba.flows.tasks import check_flows_task
check_flows_task.delay()
def import_definition(self, flow_json):
"""
Allows setting the definition for a flow from another definition. All uuid's will be
remmaped accordingly.
"""
# uuid mappings
uuid_map = dict()
def copy_recording(url, path):
if not url:
return None
try:
url = "https://%s/%s" % (settings.AWS_BUCKET_DOMAIN, url)
temp = NamedTemporaryFile(delete=True)
temp.write(urllib2.urlopen(url).read())
temp.flush()
return default_storage.save(path, temp)
except Exception:
# its okay if its no longer there, we'll remove the recording
return None
def remap_uuid(json, attribute):
if attribute in json and json[attribute]:
uuid = json[attribute]
new_uuid = uuid_map.get(uuid, None)
if not new_uuid:
new_uuid = str(uuid4())
uuid_map[uuid] = new_uuid
json[attribute] = new_uuid
remap_uuid(flow_json, 'entry')
for actionset in flow_json[Flow.ACTION_SETS]:
remap_uuid(actionset, 'uuid')
remap_uuid(actionset, 'destination')
# for all of our recordings, pull them down and remap
for action in actionset['actions']:
if 'recording' in action:
# if its a localized
if isinstance(action['recording'], dict):
for lang, url in action['recording'].iteritems():
path = copy_recording(url, 'recordings/%d/%d/steps/%s.wav' % (self.org.pk, self.pk, action['uuid']))
action['recording'][lang] = path
else:
path = copy_recording(action['recording'], 'recordings/%d/%d/steps/%s.wav' % (self.org.pk, self.pk, action['uuid']))
action['recording'] = path
for ruleset in flow_json[Flow.RULE_SETS]:
remap_uuid(ruleset, 'uuid')
for rule in ruleset.get('rules', []):
remap_uuid(rule, 'uuid')
remap_uuid(rule, 'destination')
# now update with our remapped values
self.update(flow_json)
return self
def set_metadata_json(self, metadata):
self.metadata = json.dumps(metadata)
def get_metadata_json(self):
metadata = {}
if self.metadata:
metadata = json.loads(self.metadata)
return metadata
def archive(self):
self.is_archived = True
self.save(update_fields=['is_archived'])
# archive our triggers as well
from temba.triggers.models import Trigger
Trigger.objects.filter(flow=self).update(is_archived=True)
def restore(self):
if self.flow_type == Flow.VOICE:
if not self.org.supports_ivr():
raise FlowException("%s requires a Twilio number")
self.is_archived = False
self.save(update_fields=['is_archived'])
# we don't know enough to restore triggers automatically
def update_single_message_flow(self, message):
self.flow_type = Flow.MESSAGE
self.save(update_fields=['name', 'flow_type'])
uuid = str(uuid4())
action_sets = [dict(x=100, y=0, uuid=uuid, actions=[dict(type='reply', msg=dict(base=message))])]
self.update(dict(entry=uuid, rule_sets=[], action_sets=action_sets, base_language='base'))
def steps(self):
return FlowStep.objects.filter(run__flow=self)
def get_completed_runs(self):
return FlowRunCount.run_count_for_type(self, FlowRun.EXIT_TYPE_COMPLETED)
def get_interrupted_runs(self):
return FlowRunCount.run_count_for_type(self, FlowRun.EXIT_TYPE_INTERRUPTED)
def get_expired_runs(self):
return FlowRunCount.run_count_for_type(self, FlowRun.EXIT_TYPE_EXPIRED)
def get_completed_percentage(self):
total_runs = FlowRunCount.run_count(self)
if not total_runs:
return 0
else:
return int(self.get_completed_runs() * 100 / total_runs)
def get_and_clear_unread_responses(self):
"""
Gets the number of new responses since the last clearing for this flow.
"""
r = get_redis_connection()
# get the number of new responses
new_responses = r.hget(UNREAD_FLOW_RESPONSES, self.id)
# then clear them
r.hdel(UNREAD_FLOW_RESPONSES, self.id)
return 0 if new_responses is None else int(new_responses)
def increment_unread_responses(self):
"""
Increments the number of new responses for this flow.
"""
r = get_redis_connection()
r.hincrby(UNREAD_FLOW_RESPONSES, self.id, 1)
# increment our global count as well
self.org.increment_unread_msg_count(UNREAD_FLOW_MSGS)
def get_columns(self):
node_order = []
for ruleset in RuleSet.objects.filter(flow=self).exclude(label=None).order_by('y', 'pk'):
if ruleset.uuid:
node_order.append(ruleset)
return node_order
def build_ruleset_caches(self, ruleset_list=None):
rulesets = dict()
rule_categories = dict()
if ruleset_list is None:
ruleset_list = RuleSet.objects.filter(flow=self).exclude(label=None).order_by('pk').select_related('flow', 'flow__org')
for ruleset in ruleset_list:
rulesets[ruleset.uuid] = ruleset
for rule in ruleset.get_rules():
rule_categories[rule.uuid] = rule.category
return (rulesets, rule_categories)
def build_message_context(self, contact, msg):
contact_context = contact.build_message_context() if contact else dict()
# our default value
channel_context = None
# add our message context
if msg:
message_context = msg.build_message_context()
# some fake channel deets for simulation
if msg.contact.is_test:
channel_context = dict(__default__='(800) 555-1212', name='Simulator', tel='(800) 555-1212', tel_e164='+18005551212')
elif msg.channel:
channel_context = msg.channel.build_message_context()
elif contact:
message_context = dict(__default__='', contact=contact_context)
else:
message_context = dict(__default__='')
# If we still don't know our channel and have a contact, derive the right channel to use
if not channel_context and contact:
_contact, contact_urn = Msg.resolve_recipient(self.org, self.created_by, contact, None)
# only populate channel if this contact can actually be reached (ie, has a URN)
if contact_urn:
channel = contact.org.get_send_channel(contact_urn=contact_urn)
channel_context = channel.build_message_context() if channel else None
run = self.runs.filter(contact=contact).order_by('-created_on').first()
run_context = run.field_dict() if run else {}
# our current flow context
flow_context = Flow.build_flow_context(self, contact, contact_context)
context = dict(flow=flow_context, channel=channel_context, step=message_context, extra=run_context)
# if we have parent or child contexts, add them in too
if run:
if run.parent:
context['parent'] = Flow.build_flow_context(run.parent.flow, run.parent.contact)
# see if we spawned any children and add them too
child_run = FlowRun.objects.filter(parent=run).order_by('-created_on').first()
if child_run:
context['child'] = Flow.build_flow_context(child_run.flow, child_run.contact)
if contact:
context['contact'] = contact_context
return context
def get_results(self, contact=None, filter_ruleset=None, only_last_run=True, run=None):
if filter_ruleset:
ruleset_list = [filter_ruleset]
elif run and hasattr(run.flow, 'ruleset_prefetch'):
ruleset_list = run.flow.ruleset_prefetch
else:
ruleset_list = None
(rulesets, rule_categories) = self.build_ruleset_caches(ruleset_list)
# for each of the contacts that participated
results = []
if run:
runs = [run]
flow_steps = [s for s in run.steps.all() if s.rule_uuid]
else:
runs = self.runs.all().select_related('contact')
# hide simulation test contact
runs = runs.filter(contact__is_test=Contact.get_simulation())
if contact:
runs = runs.filter(contact=contact)
runs = runs.order_by('contact', '-created_on')
# or possibly only the last run
if only_last_run:
runs = runs.distinct('contact')
flow_steps = FlowStep.objects.filter(step_uuid__in=rulesets.keys()).exclude(rule_uuid=None)
# filter our steps to only the runs we care about
flow_steps = flow_steps.filter(run__pk__in=[r.pk for r in runs])
# and the ruleset we care about
if filter_ruleset:
flow_steps = flow_steps.filter(step_uuid=filter_ruleset.uuid)
flow_steps = flow_steps.order_by('arrived_on', 'pk')
flow_steps = flow_steps.select_related('run').prefetch_related('messages', 'broadcasts')
steps_cache = {}
for step in flow_steps:
step_dict = dict(left_on=step.left_on,
arrived_on=step.arrived_on,
rule_uuid=step.rule_uuid,
rule_category=step.rule_category,
rule_decimal_value=step.rule_decimal_value,
rule_value=step.rule_value,
text=step.get_text(),
step_uuid=step.step_uuid)
step_run = step.run.id
if step_run in steps_cache.keys():
steps_cache[step_run].append(step_dict)
else:
steps_cache[step_run] = [step_dict]
for run in runs:
first_seen = None
last_seen = None
values = []
if run.id in steps_cache:
run_steps = steps_cache[run.id]
else:
run_steps = []
for rule_step in run_steps:
ruleset = rulesets.get(rule_step['step_uuid'])
if not first_seen:
first_seen = rule_step['left_on']
last_seen = rule_step['arrived_on']
if ruleset:
time = rule_step['left_on'] if rule_step['left_on'] else rule_step['arrived_on']
label = ruleset.label
category = rule_categories.get(rule_step['rule_uuid'], None)
# if this category no longer exists, use the category label at the time
if not category:
category = rule_step['rule_category']
value = rule_step['rule_decimal_value'] if rule_step['rule_decimal_value'] is not None else rule_step['rule_value']
values.append(dict(node=rule_step['step_uuid'],
label=label,
category=category,
text=rule_step['text'],
value=value,
rule_value=rule_step['rule_value'],
time=time))
results.append(dict(contact=run.contact, values=values, first_seen=first_seen, last_seen=last_seen, run=run.pk))
# sort so most recent is first
now = timezone.now()
results = sorted(results, reverse=True, key=lambda result: result['first_seen'] if result['first_seen'] else now)
return results
def async_start(self, user, groups, contacts, restart_participants=False):
"""
Causes us to schedule a flow to start in a background thread.
"""
from .tasks import start_flow_task
# create a flow start object
flow_start = FlowStart.objects.create(flow=self, restart_participants=restart_participants,
created_by=user, modified_by=user)
contact_ids = [c.id for c in contacts]
flow_start.contacts.add(*contact_ids)
group_ids = [g.id for g in groups]
flow_start.groups.add(*group_ids)
start_flow_task.delay(flow_start.pk)
def start(self, groups, contacts, restart_participants=False, started_flows=None,
start_msg=None, extra=None, flow_start=None, parent_run=None, interrupt=True):
"""
Starts a flow for the passed in groups and contacts.
"""
# build up querysets of our groups for memory efficiency
if isinstance(groups, QuerySet): # pragma: no cover
group_qs = groups
else:
group_qs = ContactGroup.all_groups.filter(id__in=[g.id for g in groups])
# build up querysets of our contacts for memory efficiency
if isinstance(contacts, QuerySet): # pragma: no cover
contact_qs = contacts
else:
contact_qs = Contact.objects.filter(id__in=[c.id for c in contacts])
self.ensure_current_version()
if started_flows is None:
started_flows = []
# prevents infinite loops
if self.pk in started_flows:
return
# add this flow to our list of started flows
started_flows.append(self.pk)
if not self.entry_uuid:
return
if start_msg and start_msg.id:
start_msg.msg_type = FLOW
start_msg.save(update_fields=['msg_type'])
all_contact_ids = Contact.all().filter(Q(all_groups__in=group_qs) | Q(pk__in=contact_qs))
all_contact_ids = all_contact_ids.only('is_test').order_by('pk').values_list('pk', flat=True).distinct('pk')
if not restart_participants:
# exclude anybody who has already participated in the flow
already_started = set(self.runs.all().values_list('contact_id', flat=True))
all_contact_ids = [contact_id for contact_id in all_contact_ids if contact_id not in already_started]
# if we have a parent run, find any parents/grandparents that are active, we'll keep these active
ancestor_ids = []
ancestor = parent_run
while ancestor:
ancestor_ids.append(ancestor.id)
ancestor = ancestor.parent
# for the contacts that will be started, exit any existing flow runs
active_runs = FlowRun.objects.filter(is_active=True, contact__pk__in=all_contact_ids).exclude(id__in=ancestor_ids)
FlowRun.bulk_exit(active_runs, FlowRun.EXIT_TYPE_INTERRUPTED)
# if we are interrupting parent flow runs, mark them as completed
if ancestor_ids and interrupt:
ancestor_runs = FlowRun.objects.filter(id__in=ancestor_ids)
FlowRun.bulk_exit(ancestor_runs, FlowRun.EXIT_TYPE_COMPLETED)
contact_count = len(all_contact_ids)
# update our total flow count on our flow start so we can keep track of when it is finished
if flow_start:
flow_start.contact_count = contact_count
flow_start.save(update_fields=['contact_count'])
# if there are no contacts to start this flow, then update our status and exit this flow
if contact_count == 0:
if flow_start:
flow_start.update_status()
return
# single contact starting from a trigger? increment our unread count
if start_msg and contact_count == 1:
if Contact.objects.filter(pk=all_contact_ids[0], org=self.org, is_test=False).first():
self.increment_unread_responses()
if self.flow_type == Flow.VOICE:
return self.start_call_flow(all_contact_ids, start_msg=start_msg,
extra=extra, flow_start=flow_start, parent_run=parent_run)
else:
return self.start_msg_flow(all_contact_ids,
started_flows=started_flows, start_msg=start_msg,
extra=extra, flow_start=flow_start, parent_run=parent_run)
def start_call_flow(self, all_contact_ids, start_msg=None, extra=None, flow_start=None, parent_run=None):
from temba.ivr.models import IVRCall
runs = []
channel = self.org.get_call_channel()
from temba.channels.models import CALL
if not channel or CALL not in channel.role:
return runs
for contact_id in all_contact_ids:
contact = Contact.objects.filter(pk=contact_id, org=channel.org).first()
contact_urn = contact.get_urn(TEL_SCHEME)
channel = self.org.get_call_channel(contact_urn=contact_urn)
# can't reach this contact, move on
if not contact or not contact_urn or not channel: # pragma: no cover
continue
run = FlowRun.create(self, contact_id, start=flow_start, parent=parent_run)
if extra:
run.update_fields(extra)
# create our call objects
call = IVRCall.create_outgoing(channel, contact, contact_urn, self, self.created_by)
# save away our created call
run.call = call
run.save(update_fields=['call'])
# if we were started by other call, save that off
if parent_run and parent_run.call:
call.parent = parent_run.call
call.save()
else:
# trigger the call to start (in the background)
call.start_call()
runs.append(run)
if flow_start:
flow_start.update_status()
return runs
def start_msg_flow(self, all_contact_ids, started_flows=None, start_msg=None, extra=None,
flow_start=None, parent_run=None):
start_msg_id = start_msg.id if start_msg else None
flow_start_id = flow_start.id if flow_start else None
if started_flows is None:
started_flows = []
# create the broadcast for this flow
send_actions = self.get_entry_send_actions()
# for each send action, we need to create a broadcast, we'll group our created messages under these
broadcasts = []
for send_action in send_actions:
message_text = self.get_localized_text(send_action.msg)
# if we have localized versions, add those to our broadcast definition
language_dict = None
if isinstance(send_action.msg, dict):
language_dict = json.dumps(send_action.msg)
if message_text:
broadcast = Broadcast.create(self.org, self.created_by, message_text, [],
language_dict=language_dict)
broadcast.update_contacts(all_contact_ids)
# manually set our broadcast status to QUEUED, our sub processes will send things off for us
broadcast.status = QUEUED
broadcast.save(update_fields=['status'])
# add it to the list of broadcasts in this flow start
broadcasts.append(broadcast)
# if there are fewer contacts than our batch size, do it immediately
if len(all_contact_ids) < START_FLOW_BATCH_SIZE:
return self.start_msg_flow_batch(all_contact_ids, broadcasts=broadcasts, started_flows=started_flows,
start_msg=start_msg, extra=extra, flow_start=flow_start,
parent_run=parent_run)
# otherwise, create batches instead
else:
# for all our contacts, build up start sms batches
task_context = dict(contacts=[], flow=self.pk, flow_start=flow_start_id,
started_flows=started_flows, broadcasts=[b.id for b in broadcasts], start_msg=start_msg_id, extra=extra)
batch_contacts = task_context['contacts']
for contact_id in all_contact_ids:
batch_contacts.append(contact_id)
if len(batch_contacts) >= START_FLOW_BATCH_SIZE:
print "Starting flow '%s' for batch of %d contacts" % (self.name, len(task_context['contacts']))
push_task(self.org, 'flows', 'start_msg_flow_batch', task_context)
batch_contacts = []
task_context['contacts'] = batch_contacts
if batch_contacts:
print "Starting flow '%s' for batch of %d contacts" % (self.name, len(task_context['contacts']))
push_task(self.org, 'flows', 'start_msg_flow_batch', task_context)
return []
def start_msg_flow_batch(self, batch_contact_ids, broadcasts, started_flows, start_msg=None,
extra=None, flow_start=None, parent_run=None):
simulation = False
if len(batch_contact_ids) == 1:
if Contact.objects.filter(pk=batch_contact_ids[0], org=self.org, is_test=True).first():
simulation = True
# these fields are the initial state for our flow run
run_fields = None
if extra:
# we keep 1024 values in @extra for new flow runs because we might be passing the state
(normalized_fields, count) = FlowRun.normalize_fields(extra, 1024)
run_fields = json.dumps(normalized_fields)
# create all our flow runs for this set of contacts at once
batch = []
now = timezone.now()
for contact_id in batch_contact_ids:
run = FlowRun.create(self, contact_id, fields=run_fields, start=flow_start, created_on=now,
parent=parent_run, db_insert=False)
batch.append(run)
FlowRun.objects.bulk_create(batch)
# build a map of contact to flow run
run_map = dict()
for run in FlowRun.objects.filter(contact__in=batch_contact_ids, flow=self, created_on=now):
run_map[run.contact_id] = run
if run.contact.is_test:
ActionLog.create(run, '%s has entered the "%s" flow' % (run.contact.get_display(self.org, short=True), run.flow.name))
# update our expiration date on our runs, we do this by calculating it on one run then updating all others
run.update_expiration(timezone.now())
FlowRun.objects.filter(contact__in=batch_contact_ids, created_on=now).update(expires_on=run.expires_on,
modified_on=timezone.now())
# if we have some broadcasts to optimize for
message_map = dict()
if broadcasts:
# create our message context
message_context_base = self.build_message_context(None, start_msg)
if extra:
message_context_base['extra'] = extra
# and add each contact and message to each broadcast
for broadcast in broadcasts:
# create our message context
message_context = dict()
message_context.update(message_context_base)
# provide the broadcast with a partial recipient list
partial_recipients = list(), Contact.objects.filter(org=self.org, pk__in=batch_contact_ids)
# create the sms messages
created_on = timezone.now()
broadcast.send(message_context=message_context, trigger_send=False,
response_to=start_msg, status=INITIALIZING, msg_type=FLOW,
created_on=created_on, base_language=self.base_language,
partial_recipients=partial_recipients, run_map=run_map)
# map all the messages we just created back to our contact
for msg in Msg.current_messages.filter(broadcast=broadcast, created_on=created_on):
if msg.contact_id not in message_map:
message_map[msg.contact_id] = [msg]
else:
message_map[msg.contact_id].append(msg)
# now execute our actual flow steps
(entry_actions, entry_rules) = (None, None)
if self.entry_type == Flow.ACTIONS_ENTRY:
entry_actions = ActionSet.objects.filter(uuid=self.entry_uuid).first()
elif self.entry_type == Flow.RULES_ENTRY:
entry_rules = RuleSet.objects.filter(uuid=self.entry_uuid).first()
runs = []
msgs = []
optimize_sending_action = len(broadcasts) > 0
for contact_id in batch_contact_ids:
# each contact maintains its own list of started flows
started_flows_by_contact = list(started_flows)
run = run_map[contact_id]
run_msgs = message_map.get(contact_id, [])
arrived_on = timezone.now()
if entry_actions:
run_msgs += entry_actions.execute_actions(run, start_msg, started_flows_by_contact,
skip_leading_reply_actions=not optimize_sending_action)
step = self.add_step(run, entry_actions, run_msgs, is_start=True, arrived_on=arrived_on)
# and onto the destination
if entry_actions.destination:
destination = Flow.get_node(entry_actions.flow,
entry_actions.destination,
entry_actions.destination_type)
next_step = self.add_step(run, destination, previous_step=step, arrived_on=timezone.now())
msg = Msg(org=self.org, contact_id=contact_id, text='', id=0)
handled, step_msgs = Flow.handle_destination(destination, next_step, run, msg, started_flows_by_contact,
is_test_contact=simulation, trigger_send=False)
run_msgs += step_msgs
else:
run.set_completed(final_step=step)
elif entry_rules:
step = self.add_step(run, entry_rules, run_msgs, is_start=True, arrived_on=arrived_on)
# if we have a start message, go and handle the rule
if start_msg:
Flow.find_and_handle(start_msg, started_flows_by_contact, triggered_start=True)
# if we didn't get an incoming message, see if we need to evaluate it passively
elif not entry_rules.is_pause():
# create an empty placeholder message
msg = Msg(org=self.org, contact_id=contact_id, text='', id=0)
handled, step_msgs = Flow.handle_destination(entry_rules, step, run, msg, started_flows_by_contact, trigger_send=False)
run_msgs += step_msgs
if start_msg:
step.add_message(start_msg)
runs.append(run)
# add these messages as ones that are ready to send
for msg in run_msgs:
msgs.append(msg)
# trigger our messages to be sent
if msgs:
# then send them off
msgs.sort(key=lambda message: (message.contact_id, message.created_on))
Msg.all_messages.filter(id__in=[m.id for m in msgs]).update(status=PENDING)
# trigger a sync
self.org.trigger_send(msgs)
# if we have a flow start, check whether we are complete
if flow_start:
flow_start.update_status()
return runs
def add_step(self, run, node,
msgs=None, rule=None, category=None, call=None, is_start=False, previous_step=None, arrived_on=None):
if msgs is None:
msgs = []
if not arrived_on:
arrived_on = timezone.now()
# update our timeouts
timeout = node.get_timeout() if isinstance(node, RuleSet) else None
run.update_timeout(arrived_on, timeout)
if not is_start:
# mark any other states for this contact as evaluated, contacts can only be in one place at time
self.steps().filter(run=run, left_on=None).update(left_on=arrived_on, next_uuid=node.uuid,
rule_uuid=rule, rule_category=category)
# then add our new step and associate it with our message
step = FlowStep.objects.create(run=run, contact=run.contact, step_type=node.get_step_type(),
step_uuid=node.uuid, arrived_on=arrived_on)
# for each message, associate it with this step and set the label on it
for msg in msgs:
step.add_message(msg)
# update the activity for our run
if not run.contact.is_test:
self.update_activity(step, previous_step, rule_uuid=rule)
return step
def remove_active_for_run_ids(self, run_ids):
"""
Bulk deletion of activity for a list of run ids. This removes the runs
from the active step, but does not remove the visited (path) data
for the runs.
"""
r = get_redis_connection()
if run_ids:
for key in self.calculate_active_step_keys():
# remove keys 1,000 at a time
for batch in chunk_list(run_ids, 1000):
r.srem(key, *batch)
def remove_active_for_step(self, step):
"""
Removes the active stat for a run at the given step, but does not
remove the (path) data for the runs.
"""
r = get_redis_connection()
r.srem(self.get_stats_cache_key(FlowStatsCache.step_active_set, step.step_uuid), step.run.pk)
def remove_visits_for_step(self, step):
"""
Decrements the count for the given step
"""
r = get_redis_connection()
step_uuid = step.step_uuid
if step.rule_uuid:
step_uuid = step.rule_uuid
r.hincrby(self.get_stats_cache_key(FlowStatsCache.visit_count_map), "%s:%s" % (step_uuid, step.next_uuid), -1)
def update_activity(self, step, previous_step=None, rule_uuid=None):
"""
Updates our cache for the given step. This will mark the current active step and
record history path data for activity.
:param step: the step they just took
:param previous_step: the step they were just on
:param rule_uuid: the uuid for the rule they came from (if any)
:param simulation: if we are part of a simulation
"""
with self.lock_on(FlowLock.activity):
r = get_redis_connection()
# remove our previous active spot
if previous_step:
self.remove_active_for_step(previous_step)
# mark our path
previous_uuid = previous_step.step_uuid
# if we came from a rule, use that instead of our step
if rule_uuid:
previous_uuid = rule_uuid
r.hincrby(self.get_stats_cache_key(FlowStatsCache.visit_count_map), "%s:%s" % (previous_uuid, step.step_uuid), 1)
# make us active on our new step
r.sadd(self.get_stats_cache_key(FlowStatsCache.step_active_set, step.step_uuid), step.run.pk)
def get_entry_send_actions(self):
"""
Returns all the entry actions (the first actions in a flow) that are reply actions. This is used
for grouping all our outgoing messages into a single Broadcast.
"""
if not self.entry_uuid or self.entry_type != Flow.ACTIONS_ENTRY:
return []
# get our entry actions
entry_actions = ActionSet.objects.filter(uuid=self.entry_uuid).first()
send_actions = []
if entry_actions:
actions = entry_actions.get_actions()
for action in actions:
# if this isn't a reply action, bail, they might be modifying the contact
if not isinstance(action, ReplyAction):
break
send_actions.append(action)
return send_actions
def get_dependencies(self, dependencies=None):
# need to make sure we have the latest version to inspect dependencies
self.ensure_current_version()
if not dependencies:
dependencies = dict(flows=set(), groups=set(), campaigns=set(), triggers=set())
flows = set()
groups = set()
# find all the flows we reference, note this won't include archived flows
for action_set in self.action_sets.all():
for action in action_set.get_actions():
if hasattr(action, 'flow'):
flows.add(action.flow)
if hasattr(action, 'groups'):
for group in action.groups:
if not isinstance(group, unicode):
groups.add(group)
for ruleset in self.rule_sets.all():
if ruleset.ruleset_type == RuleSet.TYPE_SUBFLOW:
flow = Flow.objects.filter(uuid=ruleset.config_json()['flow']['uuid']).first()
if flow:
flows.add(flow)
# add any campaigns that use our groups
from temba.campaigns.models import Campaign
campaigns = set(Campaign.objects.filter(org=self.org, group__in=groups, is_archived=False, is_active=True))
for campaign in campaigns:
flows.update(list(campaign.get_flows()))
# and any of our triggers that reference us
from temba.triggers.models import Trigger
triggers = set(Trigger.objects.filter(org=self.org, flow=self, is_archived=False, is_active=True))
dependencies['flows'].update(flows)
dependencies['groups'].update(groups)
dependencies['campaigns'].update(campaigns)
dependencies['triggers'].update(triggers)
if self in dependencies['flows']:
return dependencies
for flow in flows:
dependencies = flow.get_dependencies(dependencies)
return dependencies
def as_json(self, expand_contacts=False):
"""
Returns the JSON definition for this flow.
expand_contacts:
Add names for contacts and groups that are just ids. This is useful for human readable
situations such as the flow editor.
"""
flow = dict()
if self.entry_uuid:
flow[Flow.ENTRY] = self.entry_uuid
else:
flow[Flow.ENTRY] = None
actionsets = []
for actionset in ActionSet.objects.filter(flow=self).order_by('pk'):
actionsets.append(actionset.as_json())
def lookup_action_contacts(action, contacts, groups):
if 'contact' in action:
contacts.append(action['contact']['uuid'])
if 'contacts' in action:
for contact in action['contacts']:
contacts.append(contact['uuid'])
if 'group' in action:
g = action['group']
if isinstance(g, dict):
if 'uuid' in g:
groups.append(g['uuid'])
if 'groups' in action:
for group in action['groups']:
if isinstance(group, dict):
if 'uuid' in group:
groups.append(group['uuid'])
def replace_action_contacts(action, contacts, groups):
if 'contact' in action:
contact = contacts.get(action['contact']['uuid'], None)
if contact:
action['contact'] = contact.as_json()
if 'contacts' in action:
expanded_contacts = []
for contact in action['contacts']:
contact = contacts.get(contact['uuid'], None)
if contact:
expanded_contacts.append(contact.as_json())
action['contacts'] = expanded_contacts
if 'group' in action:
# variable substitution
group = action['group']
if isinstance(group, dict):
if 'uuid' in group:
group = groups.get(group['uuid'], None)
if group:
action['group'] = dict(uuid=group.uuid, name=group.name)
if 'groups' in action:
expanded_groups = []
for group in action['groups']:
# variable substitution
if not isinstance(group, dict):
expanded_groups.append(group)
else:
group_instance = groups.get(group['uuid'], None)
if group_instance:
expanded_groups.append(dict(uuid=group_instance.uuid, name=group_instance.name))
else:
expanded_groups.append(group)
action['groups'] = expanded_groups
if expand_contacts:
groups = []
contacts = []
for actionset in actionsets:
for action in actionset['actions']:
lookup_action_contacts(action, contacts, groups)
# load them all
contacts = dict((_.uuid, _) for _ in Contact.all().filter(org=self.org, uuid__in=contacts))
groups = dict((_.uuid, _) for _ in ContactGroup.user_groups.filter(org=self.org, uuid__in=groups))
# and replace them
for actionset in actionsets:
for action in actionset['actions']:
replace_action_contacts(action, contacts, groups)
flow[Flow.ACTION_SETS] = actionsets
# add in our rulesets
rulesets = []
for ruleset in RuleSet.objects.filter(flow=self).order_by('pk'):
rulesets.append(ruleset.as_json())
flow[Flow.RULE_SETS] = rulesets
# required flow running details
flow[Flow.BASE_LANGUAGE] = self.base_language
flow[Flow.FLOW_TYPE] = self.flow_type
flow[Flow.VERSION] = CURRENT_EXPORT_VERSION
flow[Flow.METADATA] = self.get_metadata()
return flow
def get_metadata(self):
metadata = dict()
if self.metadata:
metadata = json.loads(self.metadata)
revision = self.revisions.all().order_by('-revision').first()
metadata[Flow.NAME] = self.name
metadata[Flow.SAVED_ON] = datetime_to_str(self.saved_on)
metadata[Flow.REVISION] = revision.revision if revision else 1
metadata[Flow.UUID] = self.uuid
metadata[Flow.EXPIRES] = self.expires_after_minutes
return metadata
@classmethod
def detect_invalid_cycles(cls, json_dict):
"""
Checks for invalid cycles in our flow
:param json_dict: our flow definition
:return: invalid cycle path as list of uuids if found, otherwise empty list
"""
# Adapted from a blog post by Guido:
# http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html
# Maintain path as a a depth-first path in the implicit tree;
# path is represented as an OrderedDict of {node: [child,...]} pairs.
nodes = list()
node_map = {}
for ruleset in json_dict.get(Flow.RULE_SETS, []):
nodes.append(ruleset.get('uuid'))
node_map[ruleset.get('uuid')] = ruleset
for actionset in json_dict.get(Flow.ACTION_SETS, []):
nodes.append(actionset.get('uuid'))
node_map[actionset.get('uuid')] = actionset
def get_destinations(uuid):
node = node_map.get(uuid)
if not node:
return []
rules = node.get('rules', [])
destinations = []
if rules:
if node.get('ruleset_type', None) in RuleSet.TYPE_WAIT:
return []
for rule in rules:
if rule.get('destination'):
destinations.append(rule.get('destination'))
elif node.get('destination'):
destinations.append(node.get('destination'))
return destinations
while nodes:
root = nodes.pop()
path = OrderedDict({root: get_destinations(root)})
while path:
# children at the fringe of the tree
children = path[next(reversed(path))]
while children:
child = children.pop()
# found a loop
if child in path:
pathlist = list(path)
return pathlist[pathlist.index(child):] + [child]
# new path
if child in nodes:
path[child] = get_destinations(child)
nodes.remove(child)
break
else:
# no more children; pop back up a level
path.popitem()
return None
def ensure_current_version(self):
"""
Makes sure the flow is at the current version. If it isn't it will
migrate the definition forward updating the flow accordingly.
"""
if self.version_number < CURRENT_EXPORT_VERSION:
with self.lock_on(FlowLock.definition):
revision = self.revisions.all().order_by('-revision').all().first()
if revision:
json_flow = revision.get_definition_json()
else:
json_flow = self.as_json()
self.update(json_flow)
self.refresh_from_db()
def update(self, json_dict, user=None, force=False):
"""
Updates a definition for a flow.
"""
def get_step_type(dest, rulesets, actionsets):
if dest:
if rulesets.get(dest, None):
return FlowStep.TYPE_RULE_SET
if actionsets.get(dest, None):
return FlowStep.TYPE_ACTION_SET
return None
cycle = Flow.detect_invalid_cycles(json_dict)
if cycle:
raise FlowException("Found invalid cycle: %s" % cycle)
try:
# check whether the flow has changed since this flow was last saved
if user and not force:
saved_on = json_dict.get(Flow.METADATA).get(Flow.SAVED_ON, None)
org = user.get_org()
tz = org.get_tzinfo()
if not saved_on or str_to_datetime(saved_on, tz) < self.saved_on:
saver = ""
if self.saved_by.first_name:
saver += "%s " % self.saved_by.first_name
if self.saved_by.last_name:
saver += "%s" % self.saved_by.last_name
if not saver:
saver = self.saved_by.username
return dict(status="unsaved", description="Flow NOT Saved", saved_on=datetime_to_str(self.saved_on), saved_by=saver)
top_y = 0
top_uuid = None
# load all existing objects into dicts by uuid
existing_actionsets = dict()
for actionset in self.action_sets.all():
existing_actionsets[actionset.uuid] = actionset
existing_rulesets = dict()
for ruleset in self.rule_sets.all():
existing_rulesets[ruleset.uuid] = ruleset
# set of uuids which we've seen, we use this set to remove objects no longer used in this flow
seen = set()
destinations = set()
# our steps in our current update submission
current_actionsets = {}
current_rulesets = {}
# parse our actions
for actionset in json_dict.get(Flow.ACTION_SETS, []):
uuid = actionset.get(Flow.UUID)
# validate our actions, normalizing them as JSON after reading them
actions = [_.as_json() for _ in Action.from_json_array(self.org, actionset.get(Flow.ACTIONS))]
if actions:
current_actionsets[uuid] = actions
for ruleset in json_dict.get(Flow.RULE_SETS, []):
uuid = ruleset.get(Flow.UUID)
current_rulesets[uuid] = ruleset
seen.add(uuid)
# create all our rule sets
for ruleset in json_dict.get(Flow.RULE_SETS, []):
uuid = ruleset.get(Flow.UUID)
rules = ruleset.get(Flow.RULES)
label = ruleset.get(Flow.LABEL, None)
operand = ruleset.get(Flow.OPERAND, None)
finished_key = ruleset.get(Flow.FINISHED_KEY)
ruleset_type = ruleset.get(Flow.RULESET_TYPE)
config = ruleset.get(Flow.CONFIG)
if not config:
config = dict()
# cap our lengths
label = label[:64]
if operand:
operand = operand[:128]
(x, y) = (ruleset.get(Flow.X), ruleset.get(Flow.Y))
if not top_uuid or y < top_y:
top_y = y
top_uuid = uuid
# validate we can parse our rules, this will throw if not
Rule.from_json_array(self.org, rules)
for rule in rules:
if 'destination' in rule:
# if the destination was excluded for not having any actions
# remove the connection for our rule too
if rule['destination'] not in current_actionsets and rule['destination'] not in seen:
rule['destination'] = None
else:
destination_uuid = rule.get('destination', None)
destinations.add(destination_uuid)
# determine what kind of destination we are pointing to
rule['destination_type'] = get_step_type(destination_uuid,
current_rulesets, current_actionsets)
# print "Setting destination [%s] type to: %s" % (destination_uuid, rule['destination_type'])
existing = existing_rulesets.get(uuid, None)
if existing:
existing.label = ruleset.get(Flow.LABEL, None)
existing.set_rules_dict(rules)
existing.operand = operand
existing.label = label
existing.finished_key = finished_key
existing.ruleset_type = ruleset_type
existing.set_config(config)
(existing.x, existing.y) = (x, y)
existing.save()
else:
existing = RuleSet.objects.create(flow=self,
uuid=uuid,
label=label,
rules=json.dumps(rules),
finished_key=finished_key,
ruleset_type=ruleset_type,
operand=operand,
config=json.dumps(config),
x=x, y=y)
existing_rulesets[uuid] = existing
# update our value type based on our new rules
existing.value_type = existing.get_value_type()
RuleSet.objects.filter(pk=existing.pk).update(value_type=existing.value_type)
# now work through our action sets
for actionset in json_dict.get(Flow.ACTION_SETS, []):
uuid = actionset.get(Flow.UUID)
# skip actionsets without any actions. This happens when there are no valid
# actions in an actionset such as when deleted groups or flows are the only actions
if uuid not in current_actionsets:
continue
actions = current_actionsets[uuid]
seen.add(uuid)
(x, y) = (actionset.get(Flow.X), actionset.get(Flow.Y))
if not top_uuid or y < top_y:
top_y = y
top_uuid = uuid
existing = existing_actionsets.get(uuid, None)
# lookup our destination
destination_uuid = actionset.get('destination')
destination_type = get_step_type(destination_uuid, current_rulesets, current_actionsets)
if destination_uuid:
if not destination_type:
destination_uuid = None
# only create actionsets if there are actions
if actions:
if existing:
# print "Updating %s to point to %s" % (unicode(actions), destination_uuid)
existing.destination = destination_uuid
existing.destination_type = destination_type
existing.set_actions_dict(actions)
(existing.x, existing.y) = (x, y)
existing.save()
else:
existing = ActionSet.objects.create(flow=self,
uuid=uuid,
destination=destination_uuid,
destination_type=destination_type,
actions=json.dumps(actions),
x=x, y=y)
existing_actionsets[uuid] = existing
# now work through all our objects once more, making sure all uuids map appropriately
for existing in existing_actionsets.values():
if existing.uuid not in seen:
del existing_actionsets[existing.uuid]
existing.delete()
for existing in existing_rulesets.values():
if existing.uuid not in seen:
# clean up any values on this ruleset
Value.objects.filter(ruleset=existing, org=self.org).delete()
del existing_rulesets[existing.uuid]
existing.delete()
# make sure all destinations are present though
for destination in destinations:
if destination not in existing_rulesets and destination not in existing_actionsets:
raise FlowException("Invalid destination: '%s', no matching actionset or ruleset" % destination)
entry = json_dict.get('entry', None)
# check if we are pointing to a destination that is no longer valid
if entry not in existing_rulesets and entry not in existing_actionsets:
entry = None
if not entry and top_uuid:
entry = top_uuid
# set our entry
if entry in existing_actionsets:
self.entry_uuid = entry
self.entry_type = Flow.ACTIONS_ENTRY
elif entry in existing_rulesets:
self.entry_uuid = entry
self.entry_type = Flow.RULES_ENTRY
else:
self.entry_uuid = None
self.entry_type = None
# if we have a base language, set that
self.base_language = json_dict.get('base_language', None)
# set our metadata
self.metadata = None
if Flow.METADATA in json_dict:
self.metadata = json.dumps(json_dict[Flow.METADATA])
if user:
self.saved_by = user
self.saved_on = timezone.now()
self.version_number = CURRENT_EXPORT_VERSION
self.save()
# clear property cache
self.clear_props_cache()
# create a version of our flow for posterity
if user is None:
user = self.created_by
# last version
revision = 1
last_revision = self.revisions.order_by('-revision').first()
if last_revision:
revision = last_revision.revision + 1
# create a new version
self.revisions.create(definition=json.dumps(json_dict),
created_by=user,
modified_by=user,
spec_version=CURRENT_EXPORT_VERSION,
revision=revision)
return dict(status="success", description="Flow Saved",
saved_on=datetime_to_str(self.saved_on), revision=revision)
except Exception as e:
# note that badness happened
import logging
logger = logging.getLogger(__name__)
logger.exception(unicode(e))
import traceback
traceback.print_exc(e)
raise e
def __unicode__(self):
return self.name
class Meta:
ordering = ('-modified_on',)
class FlowRun(models.Model):
EXIT_TYPE_COMPLETED = 'C'
EXIT_TYPE_INTERRUPTED = 'I'
EXIT_TYPE_EXPIRED = 'E'
EXIT_TYPE_CHOICES = ((EXIT_TYPE_COMPLETED, _("Completed")),
(EXIT_TYPE_INTERRUPTED, _("Interrupted")),
(EXIT_TYPE_EXPIRED, _("Expired")))
INVALID_EXTRA_KEY_CHARS = re.compile(r'[^a-zA-Z0-9_]')
org = models.ForeignKey(Org, related_name='runs', db_index=False)
flow = models.ForeignKey(Flow, related_name='runs')
contact = models.ForeignKey(Contact, related_name='runs')
call = models.ForeignKey('ivr.IVRCall', related_name='runs', null=True, blank=True,
help_text=_("The call that handled this flow run, only for voice flows"))
is_active = models.BooleanField(default=True,
help_text=_("Whether this flow run is currently active"))
fields = models.TextField(blank=True, null=True,
help_text=_("A JSON representation of any custom flow values the user has saved away"))
created_on = models.DateTimeField(default=timezone.now,
help_text=_("When this flow run was created"))
modified_on = models.DateTimeField(auto_now=True,
help_text=_("When this flow run was last updated"))
exited_on = models.DateTimeField(null=True,
help_text=_("When the contact exited this flow run"))
exit_type = models.CharField(null=True, max_length=1, choices=EXIT_TYPE_CHOICES,
help_text=_("Why the contact exited this flow run"))
expires_on = models.DateTimeField(null=True,
help_text=_("When this flow run will expire"))
timeout_on = models.DateTimeField(null=True,
help_text=_("When this flow will next time out (if any)"))
responded = models.BooleanField(default=False, help_text='Whether contact has responded in this run')
start = models.ForeignKey('flows.FlowStart', null=True, blank=True, related_name='runs',
help_text=_("The FlowStart objects that started this run"))
submitted_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
help_text="The user which submitted this flow run")
parent = models.ForeignKey('flows.FlowRun', null=True, help_text=_("The parent run that triggered us"))
@classmethod
def create(cls, flow, contact_id, start=None, call=None, fields=None,
created_on=None, db_insert=True, submitted_by=None, parent=None):
args = dict(org=flow.org, flow=flow, contact_id=contact_id, start=start,
call=call, fields=fields, submitted_by=submitted_by, parent=parent)
if created_on:
args['created_on'] = created_on
if db_insert:
return FlowRun.objects.create(**args)
else:
return FlowRun(**args)
@classmethod
def normalize_field_key(cls, key):
return FlowRun.INVALID_EXTRA_KEY_CHARS.sub('_', key)[:255]
@classmethod
def normalize_fields(cls, fields, max_values=128, count=-1):
"""
Turns an arbitrary dictionary into a dictionary containing only string keys and values
"""
if isinstance(fields, (str, unicode)):
return fields[:640], count + 1
elif isinstance(fields, numbers.Number):
return fields, count + 1
elif isinstance(fields, dict):
count += 1
field_dict = dict()
for (k, v) in fields.items():
(field_dict[FlowRun.normalize_field_key(k)], count) = FlowRun.normalize_fields(v, max_values, count)
if count >= max_values:
break
return field_dict, count
elif isinstance(fields, list):
count += 1
list_dict = dict()
for (i, v) in enumerate(fields):
(list_dict[str(i)], count) = FlowRun.normalize_fields(v, max_values, count)
if count >= max_values:
break
return list_dict, count
else:
return unicode(fields), count + 1
@classmethod
def bulk_exit(cls, runs, exit_type, exited_on=None):
"""
Exits (expires, interrupts) runs in bulk
"""
if isinstance(runs, list):
runs = [{'id': r.pk, 'flow_id': r.flow_id} for r in runs]
else:
runs = list(runs.values('id', 'flow_id')) # select only what we need...
# organize runs by flow
runs_by_flow = defaultdict(list)
for run in runs:
runs_by_flow[run['flow_id']].append(run['id'])
# for each flow, remove activity for all runs
for flow_id, run_ids in runs_by_flow.iteritems():
flow = Flow.objects.filter(id=flow_id).first()
if flow:
flow.remove_active_for_run_ids(run_ids)
modified_on = timezone.now()
if not exited_on:
exited_on = modified_on
from .tasks import continue_parent_flows
# batch this for 1,000 runs at a time so we don't grab locks for too long
for batch in chunk_list(runs, 1000):
ids = [r['id'] for r in batch]
run_objs = FlowRun.objects.filter(pk__in=ids)
run_objs.update(is_active=False, exited_on=exited_on, exit_type=exit_type, modified_on=modified_on)
# continue the parent flows to continue async
continue_parent_flows.delay(ids)
def get_last_msg(self, direction):
"""
Returns the last incoming msg on this run, or an empty dummy message if there is none
"""
msg = Msg.all_messages.filter(steps__run=self, direction=direction).order_by('-created_on').first()
return msg
@classmethod
def continue_parent_flow_runs(cls, runs):
"""
Hands flow control back to our parent run if we have one
"""
runs = runs.filter(parent__flow__is_active=True, parent__flow__is_archived=False)
for run in runs:
steps = run.parent.steps.filter(left_on=None, step_type=FlowStep.TYPE_RULE_SET)
step = steps.select_related('run', 'run__flow', 'run__contact', 'run__flow__org').first()
if step:
ruleset = RuleSet.objects.filter(uuid=step.step_uuid, ruleset_type=RuleSet.TYPE_SUBFLOW, flow__org=step.run.org).first()
if ruleset:
# use the last incoming message on this step
msg = step.messages.filter(direction=INCOMING).order_by('-created_on').first()
# if we are routing back to the parent before a msg was sent, we need a placeholder
if not msg:
msg = Msg()
msg.text = ''
msg.org = run.org
msg.contact = run.contact
# finally, trigger our parent flow
Flow.find_and_handle(msg, started_flows=[run.flow, run.parent.flow], resume_parent_run=True)
def resume_after_timeout(self):
"""
Resumes a flow that is at a ruleset that has timed out
"""
last_step = self.steps.order_by('-arrived_on').first()
node = last_step.get_step()
# only continue if we are at a ruleset with a timeout
if isinstance(node, RuleSet) and timezone.now() > self.timeout_on > last_step.arrived_on:
timeout = node.get_timeout()
# if our current node doesn't have a timeout, then we've moved on
if timeout:
# get the last outgoing msg for this contact
msg = self.get_last_msg(OUTGOING)
# check that our last outgoing msg was sent and our timeout is in the past, otherwise reschedule
if msg and (not msg.sent_on or timezone.now() < msg.sent_on + timedelta(minutes=timeout) - timedelta(seconds=5)):
self.update_timeout(msg.sent_on if msg.sent_on else timezone.now(), timeout)
# look good, lets resume this run
else:
msg = self.get_last_msg(INCOMING)
if not msg:
msg = Msg()
msg.text = ''
msg.org = self.org
msg.contact = self.contact
Flow.find_and_handle(msg, resume_after_timeout=True)
def release(self):
"""
Permanently deletes this flow run
"""
# remove each of our steps. we do this one at a time
# so we can decrement the activity properly
for step in self.steps.all():
step.release()
# remove our run from the activity
with self.flow.lock_on(FlowLock.activity):
self.flow.remove_active_for_run_ids([self.pk])
# lastly delete ourselves
self.delete()
def set_completed(self, final_step=None, completed_on=None):
"""
Mark a run as complete
"""
if self.contact.is_test:
ActionLog.create(self, _('%s has exited this flow') % self.contact.get_display(self.flow.org, short=True))
now = timezone.now()
if not completed_on:
completed_on = now
# mark that we left this step
if final_step:
final_step.left_on = completed_on
final_step.save(update_fields=['left_on'])
self.flow.remove_active_for_step(final_step)
# mark this flow as inactive
self.exit_type = FlowRun.EXIT_TYPE_COMPLETED
self.exited_on = completed_on
self.modified_on = now
self.is_active = False
self.save(update_fields=('exit_type', 'exited_on', 'modified_on', 'is_active'))
# let our parent know we finished
from .tasks import continue_parent_flows
continue_parent_flows.delay([self.pk])
def update_timeout(self, now, minutes):
"""
Updates our timeout for our run, either clearing it or setting it appropriately
"""
if not minutes and self.timeout_on:
self.timeout_on = None
self.modified_on = now
self.save(update_fields=['timeout_on', 'modified_on'])
elif minutes:
self.timeout_on = now + timedelta(minutes=minutes)
self.modified_on = now
self.save(update_fields=['timeout_on', 'modified_on'])
def update_expiration(self, point_in_time):
"""
Set our expiration according to the flow settings
"""
if self.flow.expires_after_minutes:
now = timezone.now()
if not point_in_time:
point_in_time = now
self.expires_on = point_in_time + timedelta(minutes=self.flow.expires_after_minutes)
self.modified_on = now
# save our updated fields
self.save(update_fields=['expires_on', 'modified_on'])
# if it's in the past, just expire us now
if self.expires_on < now:
self.expire()
# parent should always have a later expiration than the children
if self.parent:
self.parent.update_expiration(self.expires_on)
def expire(self):
self.bulk_exit([self], FlowRun.EXIT_TYPE_EXPIRED)
@classmethod
def expire_all_for_contacts(cls, contacts):
contact_runs = cls.objects.filter(is_active=True, contact__in=contacts)
cls.bulk_exit(contact_runs, FlowRun.EXIT_TYPE_EXPIRED)
def update_fields(self, field_map, max_values=128):
# validate our field
(field_map, count) = FlowRun.normalize_fields(field_map, max_values)
if not self.fields:
self.fields = json.dumps(field_map)
else:
existing_map = json.loads(self.fields)
existing_map.update(field_map)
self.fields = json.dumps(existing_map)
self.save(update_fields=['fields'])
def field_dict(self):
return json.loads(self.fields) if self.fields else {}
def is_completed(self):
return self.exit_type == FlowRun.EXIT_TYPE_COMPLETED
def create_outgoing_ivr(self, text, recording_url, response_to=None):
# create a Msg object to track what happened
from temba.msgs.models import DELIVERED, IVR
media = None
if recording_url:
media = '%s/x-wav:%s' % (Msg.MEDIA_AUDIO, recording_url)
text = recording_url
msg = Msg.create_outgoing(self.flow.org, self.flow.created_by, self.contact, text, channel=self.call.channel,
response_to=response_to, media=media,
status=DELIVERED, msg_type=IVR)
# play a recording or read some text
if msg:
if recording_url:
self.voice_response.play(url=recording_url)
else:
self.voice_response.say(text)
return msg
class FlowStep(models.Model):
"""
A contact's visit to a node in a flow (rule set or action set)
"""
TYPE_RULE_SET = 'R'
TYPE_ACTION_SET = 'A'
STEP_TYPE_CHOICES = ((TYPE_RULE_SET, "RuleSet"),
(TYPE_ACTION_SET, "ActionSet"))
run = models.ForeignKey(FlowRun, related_name='steps')
contact = models.ForeignKey(Contact, related_name='flow_steps')
step_type = models.CharField(max_length=1, choices=STEP_TYPE_CHOICES, help_text=_("What type of node was visited"))
step_uuid = models.CharField(max_length=36, db_index=True,
help_text=_("The UUID of the ActionSet or RuleSet for this step"))
rule_uuid = models.CharField(max_length=36, null=True,
help_text=_("For uuid of the rule that matched on this ruleset, null on ActionSets"))
rule_category = models.CharField(max_length=36, null=True,
help_text=_("The category label that matched on this ruleset, null on ActionSets"))
rule_value = models.CharField(max_length=640, null=True,
help_text=_("The value that was matched in our category for this ruleset, null on ActionSets"))
rule_decimal_value = models.DecimalField(max_digits=36, decimal_places=8, null=True,
help_text=_("The decimal value that was matched in our category for this ruleset, null on ActionSets or if a non numeric rule was matched"))
next_uuid = models.CharField(max_length=36, null=True,
help_text=_("The uuid of the next step type we took"))
arrived_on = models.DateTimeField(help_text=_("When the user arrived at this step in the flow"))
left_on = models.DateTimeField(null=True, db_index=True,
help_text=_("When the user left this step in the flow"))
messages = models.ManyToManyField(Msg, related_name='steps',
help_text=_("Any messages that are associated with this step (either sent or received)"))
broadcasts = models.ManyToManyField(Broadcast, related_name='steps',
help_text=_("Any broadcasts that are associated with this step (only sent)"))
@classmethod
def from_json(cls, json_obj, flow, run, previous_rule=None):
node = json_obj['node']
arrived_on = json_date_to_datetime(json_obj['arrived_on'])
# find and update the previous step
prev_step = FlowStep.objects.filter(run=run).order_by('-left_on').first()
if prev_step:
prev_step.left_on = arrived_on
prev_step.next_uuid = node.uuid
prev_step.save(update_fields=('left_on', 'next_uuid'))
# generate the messages for this step
msgs = []
if node.is_ruleset():
incoming = None
if node.is_pause():
# if a msg was sent to this ruleset, create it
if json_obj['rule']:
media = None
if 'media' in json_obj['rule']:
media = json_obj['rule']['media']
(media_type, url) = media.split(':', 1)
# store the non-typed url in the value and text
json_obj['rule']['value'] = url
json_obj['rule']['text'] = url
# if we received a message
incoming = Msg.create_incoming(org=run.org, contact=run.contact, text=json_obj['rule']['text'],
media=media, msg_type=FLOW, status=HANDLED, date=arrived_on,
channel=None, urn=None)
else:
incoming = Msg.current_messages.filter(org=run.org, direction=INCOMING, steps__run=run).order_by('-pk').first()
if incoming:
msgs.append(incoming)
else:
actions = Action.from_json_array(flow.org, json_obj['actions'])
last_incoming = Msg.all_messages.filter(org=run.org, direction=INCOMING, steps__run=run).order_by('-pk').first()
for action in actions:
msgs += action.execute(run, node.uuid, msg=last_incoming, offline_on=arrived_on)
step = flow.add_step(run, node, msgs=msgs, previous_step=prev_step, arrived_on=arrived_on, rule=previous_rule)
# if a rule was picked on this ruleset
if node.is_ruleset() and json_obj['rule']:
rule_uuid = json_obj['rule']['uuid']
rule_value = json_obj['rule']['value']
rule_category = json_obj['rule']['category']
# update the value if we have an existing ruleset
ruleset = RuleSet.objects.filter(flow=flow, uuid=node.uuid).first()
if ruleset:
rule = None
for r in ruleset.get_rules():
if r.uuid == rule_uuid:
rule = r
break
if not rule:
raise ValueError("No such rule with UUID %s" % rule_uuid)
rule.category = rule_category
ruleset.save_run_value(run, rule, rule_value)
# update our step with our rule details
step.rule_uuid = rule_uuid
step.rule_category = rule_category
step.rule_value = rule_value
try:
step.rule_decimal_value = Decimal(json_obj['rule']['value'])
except Exception:
pass
step.save(update_fields=('rule_uuid', 'rule_category', 'rule_value', 'rule_decimal_value'))
return step
@classmethod
def get_active_steps_for_contact(cls, contact, step_type=None):
steps = FlowStep.objects.filter(run__is_active=True, run__flow__is_active=True, run__contact=contact, left_on=None)
# don't consider voice steps, those are interactive
steps = steps.exclude(run__flow__flow_type=Flow.VOICE)
# real contacts don't deal with archived flows
if not contact.is_test:
steps = steps.filter(run__flow__is_archived=False)
if step_type:
steps = steps.filter(step_type=step_type)
steps = steps.order_by('-pk')
# optimize lookups
return steps.select_related('run', 'run__flow', 'run__contact', 'run__flow__org')
def release(self):
if not self.contact.is_test:
self.run.flow.remove_visits_for_step(self)
# finally delete us
self.delete()
def save_rule_match(self, rule, value):
self.rule_category = rule.category
self.rule_uuid = rule.uuid
if value is None:
value = ''
self.rule_value = unicode(value)[:640]
if isinstance(value, Decimal):
self.rule_decimal_value = value
self.save(update_fields=['rule_category', 'rule_uuid', 'rule_value', 'rule_decimal_value'])
def get_text(self, run=None):
"""
Returns a single text value for this step. Since steps can have multiple outgoing messages, this isn't very
useful but needed for backwards compatibility in API v1.
"""
msg = self.messages.all().first()
if msg:
return msg.text
# It's possible that messages have been purged but we still have broadcasts. Broadcast isn't implicitly ordered
# like Msg is so .all().first() would cause an extra db hit even if all() has been prefetched.
broadcasts = list(self.broadcasts.all())
if broadcasts:
run = run or self.run
return broadcasts[0].get_translated_text(run.contact, base_language=run.flow.base_language, org=run.org)
return None
def add_message(self, msg):
# no-op for no msg or mock msgs
if not msg or not msg.id:
return
self.messages.add(msg)
# if this msg is part of a broadcast, save that on our flowstep so we can later purge the msg
if msg.broadcast:
self.broadcasts.add(msg.broadcast)
# incoming non-IVR messages won't have a type yet so update that
if not msg.msg_type or msg.msg_type == INBOX:
msg.msg_type = FLOW
msg.save(update_fields=['msg_type'])
# if message is from contact, mark run as responded
if not self.run.responded and msg.direction == INCOMING:
# update our local run's responded state and it's expiration
self.run.responded = True
# and make sure the db is up to date
FlowRun.objects.filter(id=self.run.id, responded=False).update(responded=True)
def get_step(self):
"""
Returns either the RuleSet or ActionSet associated with this FlowStep
"""
if self.step_type == FlowStep.TYPE_RULE_SET:
return RuleSet.objects.filter(uuid=self.step_uuid).first()
else:
return ActionSet.objects.filter(uuid=self.step_uuid).first()
def __unicode__(self):
return "%s - %s:%s" % (self.run.contact, self.step_type, self.step_uuid)
class Meta:
index_together = ['step_uuid', 'next_uuid', 'rule_uuid', 'left_on']
class RuleSet(models.Model):
TYPE_WAIT_MESSAGE = 'wait_message'
# Calls
TYPE_WAIT_RECORDING = 'wait_recording'
TYPE_WAIT_DIGIT = 'wait_digit'
TYPE_WAIT_DIGITS = 'wait_digits'
# Surveys
TYPE_WAIT_PHOTO = 'wait_photo'
TYPE_WAIT_VIDEO = 'wait_video'
TYPE_WAIT_AUDIO = 'wait_audio'
TYPE_WAIT_GPS = 'wait_gps'
TYPE_AIRTIME = 'airtime'
TYPE_WEBHOOK = 'webhook'
TYPE_RESTHOOK = 'resthook'
TYPE_FLOW_FIELD = 'flow_field'
TYPE_FORM_FIELD = 'form_field'
TYPE_CONTACT_FIELD = 'contact_field'
TYPE_EXPRESSION = 'expression'
TYPE_SUBFLOW = 'subflow'
CONFIG_WEBHOOK = 'webhook'
CONFIG_WEBHOOK_ACTION = 'webhook_action'
CONFIG_RESTHOOK = 'resthook'
TYPE_MEDIA = (TYPE_WAIT_PHOTO, TYPE_WAIT_GPS, TYPE_WAIT_VIDEO, TYPE_WAIT_AUDIO, TYPE_WAIT_RECORDING)
TYPE_WAIT = (TYPE_WAIT_MESSAGE, TYPE_WAIT_RECORDING, TYPE_WAIT_DIGIT, TYPE_WAIT_DIGITS,
TYPE_WAIT_PHOTO, TYPE_WAIT_VIDEO, TYPE_WAIT_AUDIO, TYPE_WAIT_GPS)
TYPE_CHOICES = ((TYPE_WAIT_MESSAGE, "Wait for message"),
(TYPE_WAIT_RECORDING, "Wait for recording"),
(TYPE_WAIT_DIGIT, "Wait for digit"),
(TYPE_WAIT_DIGITS, "Wait for digits"),
(TYPE_SUBFLOW, "Subflow"),
(TYPE_WEBHOOK, "Webhook"),
(TYPE_RESTHOOK, "Resthook"),
(TYPE_AIRTIME, "Transfer Airtime"),
(TYPE_FORM_FIELD, "Split by message form"),
(TYPE_CONTACT_FIELD, "Split on contact field"),
(TYPE_EXPRESSION, "Split by expression"))
uuid = models.CharField(max_length=36, unique=True)
flow = models.ForeignKey(Flow, related_name='rule_sets')
label = models.CharField(max_length=64, null=True, blank=True,
help_text=_("The label for this field"))
operand = models.CharField(max_length=128, null=True, blank=True,
help_text=_("The value that rules will be run against, if None defaults to @step.value"))
webhook_url = models.URLField(null=True, blank=True, max_length=255,
help_text=_("The URL that will be called with the user's response before we run our rules"))
webhook_action = models.CharField(null=True, blank=True, max_length=8, default='POST',
help_text=_('How the webhook should be executed'))
rules = models.TextField(help_text=_("The JSON encoded actions for this action set"))
finished_key = models.CharField(max_length=1, null=True, blank=True,
help_text="During IVR, this is the key to indicate we are done waiting")
value_type = models.CharField(max_length=1, choices=Value.TYPE_CHOICES, default=Value.TYPE_TEXT,
help_text="The type of value this ruleset saves")
ruleset_type = models.CharField(max_length=16, choices=TYPE_CHOICES, null=True,
help_text="The type of ruleset")
response_type = models.CharField(max_length=1, help_text="The type of response that is being saved")
config = models.TextField(null=True, verbose_name=_("Ruleset Configuration"),
help_text=_("RuleSet type specific configuration"))
x = models.IntegerField()
y = models.IntegerField()
created_on = models.DateTimeField(auto_now_add=True, help_text=_("When this ruleset was originally created"))
modified_on = models.DateTimeField(auto_now=True, help_text=_("When this ruleset was last modified"))
@classmethod
def get(cls, flow, uuid):
return RuleSet.objects.filter(flow=flow, uuid=uuid).select_related('flow', 'flow__org').first()
@classmethod
def contains_step(cls, text):
# remove any padding
if text:
text = text.strip()
# match @step.value or @(step.value)
return text and text[0] == '@' and 'step' in text
def config_json(self):
if not self.config:
return dict()
else:
return json.loads(self.config)
def set_config(self, config):
self.config = json.dumps(config)
def build_uuid_to_category_map(self):
flow_language = self.flow.base_language
uuid_to_category = dict()
ordered_categories = []
unique_categories = set()
for rule in self.get_rules():
label = rule.get_category_name(flow_language) if rule.category else unicode(_("Valid"))
# ignore "Other" labels
if label == "Other":
continue
# we only want to represent each unique label once
if not label.lower() in unique_categories:
unique_categories.add(label.lower())
ordered_categories.append(dict(label=label, count=0))
uuid_to_category[rule.uuid] = label
# this takes care of results that were categorized with different rules that may not exist anymore
for value in Value.objects.filter(ruleset=self, category=label).order_by('rule_uuid').distinct('rule_uuid'):
uuid_to_category[value.rule_uuid] = label
return ordered_categories, uuid_to_category
def get_value_type(self):
"""
Determines the value type that this ruleset will generate.
"""
rules = self.get_rules()
# we keep track of specialized rule types we see
dec_rules = 0
dt_rules = 0
rule_count = 0
for rule in self.get_rules():
if not isinstance(rule, TrueTest):
rule_count += 1
if isinstance(rule, NumericTest):
dec_rules += 1
elif isinstance(rule, DateTest):
dt_rules += 1
# no real rules? this is open ended, return
if rule_count == 0:
return Value.TYPE_TEXT
# if we are all of one type (excluding other) then we are that type
if dec_rules == len(rules) - 1:
return Value.TYPE_DECIMAL
elif dt_rules == len(rules) - 1:
return Value.TYPE_DATETIME
else:
return Value.TYPE_TEXT
def get_voice_input(self, voice_response, action=None):
# recordings aren't wrapped input they get tacked on at the end
if self.ruleset_type == RuleSet.TYPE_WAIT_RECORDING:
return voice_response
elif self.ruleset_type == RuleSet.TYPE_WAIT_DIGITS:
return voice_response.gather(finishOnKey=self.finished_key, timeout=60, action=action)
else:
# otherwise we assume it's single digit entry
return voice_response.gather(numDigits=1, timeout=60, action=action)
def is_pause(self):
return self.ruleset_type in RuleSet.TYPE_WAIT
def get_timeout(self):
for rule in self.get_rules():
if isinstance(rule.test, TimeoutTest):
return rule.test.minutes
return None
def find_matching_rule(self, step, run, msg, resume_after_timeout=False):
orig_text = None
if msg:
orig_text = msg.text
context = run.flow.build_message_context(run.contact, msg)
if resume_after_timeout:
for rule in self.get_rules():
if isinstance(rule.test, TimeoutTest):
(result, value) = rule.matches(run, msg, context, orig_text)
if result > 0:
rule.category = run.flow.get_base_text(rule.category)
return rule, value
elif self.ruleset_type in [RuleSet.TYPE_WEBHOOK, RuleSet.TYPE_RESTHOOK]:
# figure out which URLs will be called
if self.ruleset_type == RuleSet.TYPE_WEBHOOK:
resthook = None
urls = [self.config_json()[RuleSet.CONFIG_WEBHOOK]]
action = self.config_json()[RuleSet.CONFIG_WEBHOOK_ACTION]
elif self.ruleset_type == RuleSet.TYPE_RESTHOOK:
from temba.api.models import Resthook
# look up the rest hook
resthook_slug = self.config_json()[RuleSet.CONFIG_RESTHOOK]
resthook = Resthook.get_or_create(run.org, resthook_slug, run.flow.created_by)
urls = resthook.get_subscriber_urls()
# no urls? use None, as our empty case
if not urls:
urls = [None]
action = 'POST'
# by default we are a failure (there are no resthooks for example)
status_code = None
body = ""
for url in urls:
from temba.api.models import WebHookEvent
(value, errors) = Msg.substitute_variables(url, run.contact, context,
org=run.flow.org, url_encode=True)
result = WebHookEvent.trigger_flow_event(value, self.flow, run, self,
run.contact, msg, action, resthook=resthook)
# we haven't recorded any status yet, do so
if not status_code:
status_code = result.status_code
body = result.body
# our subscriber is no longer interested, remove this URL as a subscriber
if result.status_code == 410:
resthook.remove_subscriber(url, run.flow.created_by)
# if this is a success and we haven't ever succeeded, set our code and body
elif 200 <= result.status_code < 300 and not (200 <= status_code < 300):
status_code = result.status_code
body = result.body
# this was an empty URL, treat it as success regardless
if url is None:
status_code = 200
body = _("No subscribers to this event")
# default to a status code of 418 if we made no calls
if not status_code:
status_code = 418
# find our matching rule, we pass in the status from our calls
for rule in self.get_rules():
(result, value) = rule.matches(run, msg, context, str(status_code))
if result > 0:
rule.category = run.flow.get_base_text(rule.category)
return rule, body
else:
# if it's a form field, construct an expression accordingly
if self.ruleset_type == RuleSet.TYPE_FORM_FIELD:
config = self.config_json()
delim = config.get('field_delimiter', ' ')
self.operand = '@(FIELD(%s, %d, "%s"))' % (self.operand[1:], config.get('field_index', 0) + 1, delim)
# if we have a custom operand, figure that out
text = None
if self.operand:
(text, errors) = Msg.substitute_variables(self.operand, run.contact, context, org=run.flow.org)
elif msg:
text = msg.text
if self.ruleset_type == RuleSet.TYPE_AIRTIME:
# flow simulation will always simulate a suceessful airtime transfer
# without saving the object in the DB
if run.contact.is_test:
from temba.flows.models import ActionLog
log_txt = "Simulate Complete airtime transfer"
ActionLog.create(run, log_txt, safe=True)
airtime = AirtimeTransfer(status=AirtimeTransfer.SUCCESS)
else:
airtime = AirtimeTransfer.trigger_airtime_event(self.flow.org, self, run.contact, msg)
# rebuild our context again, the webhook may have populated something
context = run.flow.build_message_context(run.contact, msg)
# airtime test evaluate against the status of the airtime
text = airtime.status
try:
rules = self.get_rules()
for rule in rules:
(result, value) = rule.matches(run, msg, context, text)
if result > 0:
# treat category as the base category
rule.category = run.flow.get_base_text(rule.category)
return rule, value
finally:
if msg:
msg.text = orig_text
return None, None
def save_run_value(self, run, rule, value):
value = unicode(value)[:640]
location_value = None
dec_value = None
dt_value = None
media_value = None
if isinstance(value, AdminBoundary):
location_value = value
else:
dt_value = run.flow.org.parse_date(value)
dec_value = run.flow.org.parse_decimal(value)
# if its a media value, only store the path as the value
if ':' in value:
(media_type, media_path) = value.split(':', 1)
if media_type in Msg.MEDIA_TYPES:
media_value = value
value = media_path
# delete any existing values for this ruleset, run and contact, we only store the latest
Value.objects.filter(contact=run.contact, run=run, ruleset=self).delete()
Value.objects.create(contact=run.contact, run=run, ruleset=self, category=rule.category, rule_uuid=rule.uuid,
string_value=value, decimal_value=dec_value, datetime_value=dt_value,
location_value=location_value, media_value=media_value, org=run.flow.org)
# invalidate any cache on this ruleset
Value.invalidate_cache(ruleset=self)
def get_step_type(self):
return FlowStep.TYPE_RULE_SET
def get_rules_dict(self):
return json.loads(self.rules)
def get_rules(self):
return Rule.from_json_array(self.flow.org, json.loads(self.rules))
def get_rule_uuids(self):
return [rule['uuid'] for rule in json.loads(self.rules)]
def set_rules_dict(self, json_dict):
self.rules = json.dumps(json_dict)
def set_rules(self, rules):
rules_dict = []
for rule in rules:
rules_dict.append(rule.as_json())
self.set_rules_dict(rules_dict)
def as_json(self):
ruleset_def = dict(uuid=self.uuid, x=self.x, y=self.y, label=self.label, rules=self.get_rules_dict(),
finished_key=self.finished_key, ruleset_type=self.ruleset_type, response_type=self.response_type,
operand=self.operand, config=self.config_json())
# if we are pre-version 10, include our webhook and webhook_action in our dict
if self.flow.version_number < 10:
ruleset_def['webhook'] = self.webhook_url
ruleset_def['webhook_action'] = self.webhook_action
return ruleset_def
def __unicode__(self):
if self.label:
return "RuleSet: %s - %s" % (self.uuid, self.label)
else:
return "RuleSet: %s" % (self.uuid, )
class ActionSet(models.Model):
uuid = models.CharField(max_length=36, unique=True)
flow = models.ForeignKey(Flow, related_name='action_sets')
destination = models.CharField(max_length=36, null=True)
destination_type = models.CharField(max_length=1, choices=FlowStep.STEP_TYPE_CHOICES, null=True)
actions = models.TextField(help_text=_("The JSON encoded actions for this action set"))
x = models.IntegerField()
y = models.IntegerField()
created_on = models.DateTimeField(auto_now_add=True, help_text=_("When this action was originally created"))
modified_on = models.DateTimeField(auto_now=True, help_text=_("When this action was last modified"))
@classmethod
def get(cls, flow, uuid):
return ActionSet.objects.filter(flow=flow, uuid=uuid).select_related('flow', 'flow__org').first()
def get_step_type(self):
return FlowStep.TYPE_ACTION_SET
def execute_actions(self, run, msg, started_flows, skip_leading_reply_actions=True):
actions = self.get_actions()
msgs = []
seen_other_action = False
for action in actions:
if not isinstance(action, ReplyAction):
seen_other_action = True
# if this is a reply action, we're skipping leading reply actions and we haven't seen other actions
if not skip_leading_reply_actions and isinstance(action, ReplyAction) and not seen_other_action:
# then skip it
pass
elif isinstance(action, StartFlowAction):
if action.flow.pk in started_flows:
pass
else:
msgs += action.execute(run, self.uuid, msg, started_flows)
# reload our contact and reassign it to our run, it may have been changed deep down in our child flow
run.contact = Contact.objects.get(pk=run.contact.pk)
else:
msgs += action.execute(run, self.uuid, msg)
# actions modify the run.contact, update the msg contact in case they did so
if msg:
msg.contact = run.contact
return msgs
def get_actions_dict(self):
return json.loads(self.actions)
def get_actions(self):
return Action.from_json_array(self.flow.org, json.loads(self.actions))
def set_actions_dict(self, json_dict):
self.actions = json.dumps(json_dict)
def as_json(self):
return dict(uuid=self.uuid, x=self.x, y=self.y, destination=self.destination, actions=self.get_actions_dict())
def __unicode__(self): # pragma: no cover
return "ActionSet: %s" % (self.uuid, )
class FlowRevision(SmartModel):
"""
JSON definitions for previous flow revisions
"""
flow = models.ForeignKey(Flow, related_name='revisions')
definition = models.TextField(help_text=_("The JSON flow definition"))
spec_version = models.IntegerField(default=CURRENT_EXPORT_VERSION, help_text=_("The flow version this definition is in"))
revision = models.IntegerField(null=True, help_text=_("Revision number for this definition"))
@classmethod
def validate_flow_definition(cls, flow_spec):
non_localized_error = _('Malformed flow, encountered non-localized definition')
# should always have a base_language
if 'base_language' not in flow_spec or not flow_spec['base_language']:
raise ValueError(non_localized_error)
# language should match values in definition
base_language = flow_spec['base_language']
def validate_localization(lang_dict):
# must be a dict
if not isinstance(lang_dict, dict):
raise ValueError(non_localized_error)
# and contain the base_language
if base_language not in lang_dict:
raise ValueError(non_localized_error)
for actionset in flow_spec['action_sets']:
for action in actionset['actions']:
if 'msg' in action and action['type'] != 'email':
validate_localization(action['msg'])
for ruleset in flow_spec['rule_sets']:
for rule in ruleset['rules']:
validate_localization(rule['category'])
@classmethod
def migrate_export(cls, org, exported_json, same_site, version, to_version=None):
if not to_version:
to_version = CURRENT_EXPORT_VERSION
from temba.flows import flow_migrations
while version < to_version and version < CURRENT_EXPORT_VERSION:
migrate_fn = getattr(flow_migrations, 'migrate_export_to_version_%d' % (version + 1), None)
if migrate_fn:
exported_json = migrate_fn(exported_json, org, same_site)
else:
flows = []
for json_flow in exported_json.get('flows', []):
migrate_fn = getattr(flow_migrations, 'migrate_to_version_%d' % (version + 1), None)
if migrate_fn:
json_flow = migrate_fn(json_flow, None)
flows.append(json_flow)
exported_json['flows'] = flows
version += 1
return exported_json
@classmethod
def migrate_definition(cls, json_flow, flow, version, to_version=None):
if not to_version:
to_version = CURRENT_EXPORT_VERSION
from temba.flows import flow_migrations
while version < to_version and version < CURRENT_EXPORT_VERSION:
migrate_fn = getattr(flow_migrations, 'migrate_to_version_%d' % (version + 1), None)
if migrate_fn:
json_flow = migrate_fn(json_flow, flow)
version += 1
return json_flow
def get_definition_json(self):
definition = json.loads(self.definition)
# if it's previous to version 6, wrap the definition to
# mirror our exports for those versions
if self.spec_version <= 6:
definition = dict(definition=definition, flow_type=self.flow.flow_type,
expires=self.flow.expires_after_minutes, id=self.flow.pk,
revision=self.revision, uuid=self.flow.uuid)
# migrate our definition if necessary
if self.spec_version < CURRENT_EXPORT_VERSION:
definition = FlowRevision.migrate_definition(definition, self.flow, self.spec_version)
return definition
def as_json(self, include_definition=False):
return dict(user=dict(email=self.created_by.username,
name=self.created_by.get_full_name()),
created_on=datetime_to_str(self.created_on),
id=self.pk,
version=self.spec_version,
revision=self.revision)
class FlowRunCount(models.Model):
"""
Maintains counts of different states of exit types of flow runs on a flow. These are calculated
via triggers on the database.
"""
flow = models.ForeignKey(Flow, related_name='counts')
exit_type = models.CharField(null=True, max_length=1, choices=FlowRun.EXIT_TYPE_CHOICES)
count = models.IntegerField(default=0)
LAST_SQUASH_KEY = 'last_flowruncount_squash'
@classmethod
def squash_counts(cls):
# get the id of the last count we squashed
r = get_redis_connection()
last_squash = r.get(FlowRunCount.LAST_SQUASH_KEY)
if not last_squash:
last_squash = 0
# get the unique flow ids for all new ones
start = time.time()
squash_count = 0
for count in FlowRunCount.objects.filter(id__gt=last_squash).order_by('flow_id', 'exit_type').distinct('flow_id', 'exit_type'):
print "Squashing: %d %s" % (count.flow_id, count.exit_type)
# perform our atomic squash in SQL by calling our squash method
with connection.cursor() as c:
c.execute("SELECT temba_squash_flowruncount(%s, %s);", (count.flow_id, count.exit_type))
squash_count += 1
# insert our new top squashed id
max_id = FlowRunCount.objects.all().order_by('-id').first()
if max_id:
r.set(FlowRunCount.LAST_SQUASH_KEY, max_id.id)
print "Squashed run counts for %d pairs in %0.3fs" % (squash_count, time.time() - start)
@classmethod
def run_count(cls, flow):
count = FlowRunCount.objects.filter(flow=flow)
count = count.aggregate(Sum('count')).get('count__sum', 0)
return 0 if count is None else count
@classmethod
def run_count_for_type(cls, flow, exit_type=None):
count = FlowRunCount.objects.filter(flow=flow).filter(exit_type=exit_type)
count = count.aggregate(Sum('count')).get('count__sum', 0)
return 0 if count is None else count
@classmethod
def populate_for_flow(cls, flow):
# remove old ones
FlowRunCount.objects.filter(flow=flow).delete()
# get test contacts on this org
test_contacts = Contact.objects.filter(org=flow.org, is_test=True).values('id')
# calculate our count for each exit type
counts = FlowRun.objects.filter(flow=flow).exclude(contact__in=test_contacts)\
.values('exit_type').annotate(count=Count('pk'))
# insert updated counts for each
for count in counts:
if count['count'] > 0:
FlowRunCount.objects.create(flow=flow, exit_type=count['exit_type'], count=count['count'])
def __unicode__(self):
return "RunCount[%d:%s:%d]" % (self.flow_id, self.exit_type, self.count)
class Meta:
index_together = ('flow', 'exit_type')
class ExportFlowResultsTask(SmartModel):
"""
Container for managing our export requests
"""
INCLUDE_RUNS = 'include_runs'
INCLUDE_MSGS = 'include_msgs'
CONTACT_FIELDS = 'contact_fields'
RESPONDED_ONLY = 'responded_only'
org = models.ForeignKey(Org, related_name='flow_results_exports', help_text=_("The Organization of the user."))
flows = models.ManyToManyField(Flow, related_name='exports', help_text=_("The flows to export"))
host = models.CharField(max_length=32, help_text=_("The host this export task was created on"))
task_id = models.CharField(null=True, max_length=64)
is_finished = models.BooleanField(default=False, help_text=_("Whether this export is complete"))
uuid = models.CharField(max_length=36, null=True,
help_text=_("The uuid used to name the resulting export file"))
config = models.TextField(null=True,
help_text=_("Any configuration options for this flow export"))
@classmethod
def create(cls, host, org, user, flows, contact_fields, responded_only, include_runs, include_msgs):
config = {ExportFlowResultsTask.INCLUDE_RUNS: include_runs,
ExportFlowResultsTask.INCLUDE_MSGS: include_msgs,
ExportFlowResultsTask.CONTACT_FIELDS: [c.id for c in contact_fields],
ExportFlowResultsTask.RESPONDED_ONLY: responded_only}
export = ExportFlowResultsTask.objects.create(org=org, created_by=user, modified_by=user, host=host,
config=json.dumps(config))
for flow in flows:
export.flows.add(flow)
return export
def start_export(self):
"""
Starts our export, wrapping it in a try block to make sure we mark it as finished when complete.
"""
try:
start = time.time()
self.do_export()
finally:
elapsed = time.time() - start
analytics.track(self.created_by.username, 'temba.flowresult_export_latency', properties=dict(value=elapsed))
self.is_finished = True
self.save(update_fields=['is_finished'])
def do_export(self):
from xlwt import Workbook
book = Workbook()
max_rows = 65535
config = json.loads(self.config) if self.config else dict()
include_runs = config.get(ExportFlowResultsTask.INCLUDE_RUNS, False)
include_msgs = config.get(ExportFlowResultsTask.INCLUDE_MSGS, False)
responded_only = config.get(ExportFlowResultsTask.RESPONDED_ONLY, True)
contact_field_ids = config.get(ExportFlowResultsTask.CONTACT_FIELDS, [])
contact_fields = []
for cf_id in contact_field_ids:
cf = ContactField.objects.filter(id=cf_id, org=self.org, is_active=True).first()
if cf:
contact_fields.append(cf)
date_format = xlwt.easyxf(num_format_str='MM/DD/YYYY HH:MM:SS')
small_width = 15 * 256
medium_width = 20 * 256
large_width = 100 * 256
# merge the columns for all of our flows
show_submitted_by = False
columns = []
flows = self.flows.all()
with SegmentProfiler("get columns"):
for flow in flows:
columns += flow.get_columns()
if flow.flow_type == Flow.SURVEY:
show_submitted_by = True
org = None
if flows:
org = flows[0].org
org_tz = pytz.timezone(flows[0].org.timezone)
def as_org_tz(dt):
if dt:
return dt.astimezone(org_tz).replace(tzinfo=None)
else:
return None
# create a mapping of column id to index
column_map = dict()
for col in range(len(columns)):
column_map[columns[col].uuid] = 6 + len(contact_fields) + col * 3
# build a cache of rule uuid to category name, we want to use the most recent name the user set
# if possible and back down to the cached rule_category only when necessary
category_map = dict()
with SegmentProfiler("rule uuid to category to name"):
for ruleset in RuleSet.objects.filter(flow__in=flows).select_related('flow'):
for rule in ruleset.get_rules():
category_map[rule.uuid] = rule.get_category_name(ruleset.flow.base_language)
ruleset_steps = FlowStep.objects.filter(run__flow__in=flows, step_type=FlowStep.TYPE_RULE_SET)
ruleset_steps = ruleset_steps.order_by('contact', 'run', 'arrived_on', 'pk')
if responded_only:
ruleset_steps = ruleset_steps.filter(run__responded=True)
# count of unique flow runs
with SegmentProfiler("# of runs"):
all_runs_count = ruleset_steps.values('run').distinct().count()
# count of unique contacts
with SegmentProfiler("# of contacts"):
contacts_count = ruleset_steps.values('contact').distinct().count()
# grab the ids for all our steps so we don't have to ever calculate them again
with SegmentProfiler("calculate step ids"):
all_steps = FlowStep.objects.filter(run__flow__in=flows)\
.order_by('contact', 'run', 'arrived_on', 'pk')\
.values('id')
if responded_only:
all_steps = all_steps.filter(run__responded=True)
step_ids = [s['id'] for s in all_steps]
# build our sheets
run_sheets = []
total_run_sheet_count = 0
# the full sheets we need for runs
if include_runs:
for i in range(all_runs_count / max_rows + 1):
total_run_sheet_count += 1
name = "Runs" if (i + 1) <= 1 else "Runs (%d)" % (i + 1)
sheet = book.add_sheet(name, cell_overwrite_ok=True)
run_sheets.append(sheet)
total_merged_run_sheet_count = 0
# the full sheets we need for contacts
for i in range(contacts_count / max_rows + 1):
total_merged_run_sheet_count += 1
name = "Contacts" if (i + 1) <= 1 else "Contacts (%d)" % (i + 1)
sheet = book.add_sheet(name, cell_overwrite_ok=True)
run_sheets.append(sheet)
# then populate their header columns
for (sheet_num, sheet) in enumerate(run_sheets):
# build up our header row
index = 0
if show_submitted_by:
sheet.write(0, index, "Surveyor")
sheet.col(0).width = medium_width
index += 1
sheet.write(0, index, "Contact UUID")
sheet.col(index).width = medium_width
index += 1
sheet.write(0, index, "URN")
sheet.col(index).width = small_width
index += 1
sheet.write(0, index, "Name")
sheet.col(index).width = medium_width
index += 1
sheet.write(0, index, "Groups")
sheet.col(index).width = medium_width
index += 1
# add our contact fields
for cf in contact_fields:
sheet.write(0, index, cf.label)
sheet.col(index).width = medium_width
index += 1
sheet.write(0, index, "First Seen")
sheet.col(index).width = medium_width
index += 1
sheet.write(0, index, "Last Seen")
sheet.col(index).width = medium_width
index += 1
for col in range(len(columns)):
ruleset = columns[col]
sheet.write(0, index + col * 3, "%s (Category) - %s" % (unicode(ruleset.label), unicode(ruleset.flow.name)))
sheet.write(0, index + col * 3 + 1, "%s (Value) - %s" % (unicode(ruleset.label), unicode(ruleset.flow.name)))
sheet.write(0, index + col * 3 + 2, "%s (Text) - %s" % (unicode(ruleset.label), unicode(ruleset.flow.name)))
sheet.col(index + col * 3).width = 15 * 256
sheet.col(index + col * 3 + 1).width = 15 * 256
sheet.col(index + col * 3 + 2).width = 15 * 256
run_row = 0
merged_row = 0
msg_row = 0
latest = None
earliest = None
merged_latest = None
merged_earliest = None
last_run = 0
last_contact = None
# index of sheets that we are currently writing to
run_sheet_index = 0
merged_run_sheet_index = total_run_sheet_count
msg_sheet_index = 0
# get our initial runs and merged runs to write to
runs = book.get_sheet(run_sheet_index)
merged_runs = book.get_sheet(merged_run_sheet_index)
msgs = None
processed_steps = 0
total_steps = len(step_ids)
start = time.time()
flow_names = ", ".join([f['name'] for f in self.flows.values('name')])
urn_display_cache = {}
def get_contact_urn_display(contact):
"""
Gets the possibly cached URN display (e.g. formatted phone number) for the given contact
"""
urn_display = urn_display_cache.get(contact.pk)
if urn_display:
return urn_display
urn_display = contact.get_urn_display(org=org, full=True)
urn_display_cache[contact.pk] = urn_display
return urn_display
for run_step in ChunkIterator(FlowStep, step_ids,
order_by=['contact', 'run', 'arrived_on', 'pk'],
select_related=['run', 'contact'],
prefetch_related=['messages__contact_urn',
'messages__channel',
'broadcasts',
'contact__all_groups'],
contact_fields=contact_fields):
processed_steps += 1
if processed_steps % 10000 == 0:
print "Export of %s - %d%% complete in %0.2fs" % \
(flow_names, processed_steps * 100 / total_steps, time.time() - start)
# skip over test contacts
if run_step.contact.is_test:
continue
contact_urn_display = get_contact_urn_display(run_step.contact)
contact_uuid = run_step.contact.uuid
# if this is a rule step, write out the value collected
if run_step.step_type == FlowStep.TYPE_RULE_SET:
# a new contact
if last_contact != run_step.contact.pk:
merged_earliest = run_step.arrived_on
merged_latest = None
if merged_row % 1000 == 0:
merged_runs.flush_row_data()
merged_row += 1
if merged_row > max_rows:
# get the next sheet to use for Contacts
merged_row = 1
merged_run_sheet_index += 1
merged_runs = book.get_sheet(merged_run_sheet_index)
# a new run
if last_run != run_step.run.pk:
earliest = run_step.arrived_on
latest = None
if include_runs:
if run_row % 1000 == 0:
runs.flush_row_data()
run_row += 1
if run_row > max_rows:
# get the next sheet to use for Runs
run_row = 1
run_sheet_index += 1
runs = book.get_sheet(run_sheet_index)
# build up our group names
group_names = []
for group in run_step.contact.all_groups.all():
if group.group_type == ContactGroup.TYPE_USER_DEFINED:
group_names.append(group.name)
group_names.sort()
groups = ", ".join(group_names)
padding = 0
if show_submitted_by:
submitted_by = ''
# use the login as the submission user
if run_step.run.submitted_by:
submitted_by = run_step.run.submitted_by.username
if include_runs:
runs.write(run_row, 0, submitted_by)
merged_runs.write(merged_row, 0, submitted_by)
padding = 1
if include_runs:
runs.write(run_row, padding + 0, contact_uuid)
runs.write(run_row, padding + 1, contact_urn_display)
runs.write(run_row, padding + 2, run_step.contact.name)
runs.write(run_row, padding + 3, groups)
merged_runs.write(merged_row, padding + 0, contact_uuid)
merged_runs.write(merged_row, padding + 1, contact_urn_display)
merged_runs.write(merged_row, padding + 2, run_step.contact.name)
merged_runs.write(merged_row, padding + 3, groups)
cf_padding = 0
# write our contact fields if any
for cf in contact_fields:
field_value = Contact.get_field_display_for_value(cf, run_step.contact.get_field(cf.key.lower()))
if field_value is None:
field_value = ''
field_value = unicode(field_value)
merged_runs.write(merged_row, padding + 4 + cf_padding, field_value)
if include_runs:
runs.write(run_row, padding + 4 + cf_padding, field_value)
cf_padding += 1
if not latest or latest < run_step.arrived_on:
latest = run_step.arrived_on
if not merged_latest or merged_latest < run_step.arrived_on:
merged_latest = run_step.arrived_on
if include_runs:
runs.write(run_row, padding + 4 + cf_padding, as_org_tz(earliest), date_format)
runs.write(run_row, padding + 5 + cf_padding, as_org_tz(latest), date_format)
merged_runs.write(merged_row, padding + 4 + cf_padding, as_org_tz(merged_earliest), date_format)
merged_runs.write(merged_row, padding + 5 + cf_padding, as_org_tz(merged_latest), date_format)
# write the step data
col = column_map.get(run_step.step_uuid, 0) + padding
if col:
category = category_map.get(run_step.rule_uuid, None)
if category:
if include_runs:
runs.write(run_row, col, category)
merged_runs.write(merged_row, col, category)
elif run_step.rule_category:
if include_runs:
runs.write(run_row, col, run_step.rule_category)
merged_runs.write(merged_row, col, run_step.rule_category)
value = run_step.rule_value
if value:
if include_runs:
runs.write(run_row, col + 1, value)
merged_runs.write(merged_row, col + 1, value)
text = run_step.get_text()
if text:
if include_runs:
runs.write(run_row, col + 2, text)
merged_runs.write(merged_row, col + 2, text)
last_run = run_step.run.pk
last_contact = run_step.contact.pk
# write out any message associated with this step
if include_msgs:
step_msgs = list(run_step.messages.all())
if step_msgs:
msg = step_msgs[0]
msg_row += 1
if msg_row % 1000 == 0:
msgs.flush_row_data()
if msg_row > max_rows or not msgs:
msg_row = 1
msg_sheet_index += 1
name = "Messages" if (msg_sheet_index + 1) <= 1 else "Messages (%d)" % (msg_sheet_index + 1)
msgs = book.add_sheet(name)
msgs.write(0, 0, "Contact UUID")
msgs.write(0, 1, "URN")
msgs.write(0, 2, "Name")
msgs.write(0, 3, "Date")
msgs.write(0, 4, "Direction")
msgs.write(0, 5, "Message")
msgs.write(0, 6, "Channel")
msgs.col(0).width = medium_width
msgs.col(1).width = small_width
msgs.col(2).width = medium_width
msgs.col(3).width = medium_width
msgs.col(4).width = small_width
msgs.col(5).width = large_width
msgs.col(6).width = small_width
msg_urn_display = msg.contact_urn.get_display(org=org, full=True) if msg.contact_urn else ''
channel_name = msg.channel.name if msg.channel else ''
msgs.write(msg_row, 0, run_step.contact.uuid)
msgs.write(msg_row, 1, msg_urn_display)
msgs.write(msg_row, 2, run_step.contact.name)
msgs.write(msg_row, 3, as_org_tz(msg.created_on), date_format)
msgs.write(msg_row, 4, "IN" if msg.direction == INCOMING else "OUT")
msgs.write(msg_row, 5, msg.text)
msgs.write(msg_row, 6, channel_name)
temp = NamedTemporaryFile(delete=True)
book.save(temp)
temp.flush()
# initialize the UUID which we will save results as
self.uuid = str(uuid4())
self.save(update_fields=['uuid'])
# save as file asset associated with this task
from temba.assets.models import AssetType
from temba.assets.views import get_asset_url
store = AssetType.results_export.store
store.save(self.pk, File(temp), 'xls')
subject = "Your export is ready"
template = 'flows/email/flow_export_download'
from temba.middleware import BrandingMiddleware
branding = BrandingMiddleware.get_branding_for_host(self.host)
download_url = branding['link'] + get_asset_url(AssetType.results_export, self.pk)
# force a gc
import gc
gc.collect()
# only send the email if this is production
send_template_email(self.created_by.username, subject, template, dict(flows=flows, link=download_url), branding)
class ActionLog(models.Model):
"""
Log of an event that occurred whilst executing a flow in the simulator
"""
LEVEL_INFO = 'I'
LEVEL_WARN = 'W'
LEVEL_ERROR = 'E'
LEVEL_CHOICES = ((LEVEL_INFO, _("Info")), (LEVEL_WARN, _("Warning")), (LEVEL_ERROR, _("Error")))
run = models.ForeignKey(FlowRun, related_name='logs')
text = models.TextField(help_text=_("Log event text"))
level = models.CharField(max_length=1, choices=LEVEL_CHOICES, default=LEVEL_INFO, help_text=_("Log event level"))
created_on = models.DateTimeField(auto_now_add=True, help_text=_("When this log event occurred"))
@classmethod
def create(cls, run, text, level=LEVEL_INFO, safe=False):
if not safe:
text = escape(text)
text = text.replace('\n', "<br/>")
try:
return ActionLog.objects.create(run=run, text=text, level=level)
except Exception: # pragma: no cover
return None # it's possible our test run can be deleted out from under us
@classmethod
def info(cls, run, text, safe=False):
return cls.create(run, text, cls.LEVEL_INFO, safe)
@classmethod
def warn(cls, run, text, safe=False):
return cls.create(run, text, cls.LEVEL_WARN, safe)
@classmethod
def error(cls, run, text, safe=False):
return cls.create(run, text, cls.LEVEL_ERROR, safe)
def as_json(self):
return dict(id=self.id,
direction="O",
level=self.level,
text=self.text,
created_on=self.created_on.strftime('%x %X'),
model="log")
def simulator_json(self):
return self.as_json()
def __unicode__(self):
return self.text
class FlowStart(SmartModel):
STATUS_PENDING = 'P'
STATUS_STARTING = 'S'
STATUS_COMPLETE = 'C'
STATUS_FAILED = 'F'
STATUS_CHOICES = ((STATUS_PENDING, "Pending"),
(STATUS_STARTING, "Starting"),
(STATUS_COMPLETE, "Complete"),
(STATUS_FAILED, "Failed"))
flow = models.ForeignKey(Flow, related_name='starts', help_text=_("The flow that is being started"))
groups = models.ManyToManyField(ContactGroup, help_text=_("Groups that will start the flow"))
contacts = models.ManyToManyField(Contact, help_text=_("Contacts that will start the flow"))
restart_participants = models.BooleanField(default=True,
help_text=_("Whether to restart any participants already in this flow"))
contact_count = models.IntegerField(default=0,
help_text=_("How many unique contacts were started down the flow"))
status = models.CharField(max_length=1, default=STATUS_PENDING, choices=STATUS_CHOICES,
help_text=_("The status of this flow start"))
@classmethod
def create(cls, flow, user, groups=None, contacts=None, restart_participants=True):
if contacts is None:
contacts = []
if groups is None:
groups = []
start = FlowStart.objects.create(flow=flow, restart_participants=restart_participants,
created_by=user, modified_by=user)
for contact in contacts:
start.contacts.add(contact)
for group in groups:
start.groups.add(group)
return start
def async_start(self):
from temba.flows.tasks import start_flow_task
start_flow_task.delay(self.id)
def start(self):
self.status = FlowStart.STATUS_STARTING
self.save(update_fields=['status'])
try:
groups = [g for g in self.groups.all()]
contacts = [c for c in self.contacts.all().only('is_test')]
self.flow.start(groups, contacts, restart_participants=self.restart_participants, flow_start=self)
except Exception as e: # pragma: no cover
import traceback
traceback.print_exc(e)
self.status = FlowStart.STATUS_FAILED
self.save(update_fields=['status'])
raise e
def update_status(self):
# only update our status to complete if we have started as many runs as our total contact count
if self.runs.count() == self.contact_count:
self.status = FlowStart.STATUS_COMPLETE
self.save(update_fields=['status'])
def __unicode__(self): # pragma: no cover
return "FlowStart %d (Flow %d)" % (self.id, self.flow_id)
class FlowLabel(models.Model):
org = models.ForeignKey(Org)
uuid = models.CharField(max_length=36, unique=True, db_index=True, default=generate_uuid,
verbose_name=_("Unique Identifier"), help_text=_("The unique identifier for this label"))
name = models.CharField(max_length=64, verbose_name=_("Name"),
help_text=_("The name of this flow label"))
parent = models.ForeignKey('FlowLabel', verbose_name=_("Parent"), null=True, related_name="children")
def get_flows_count(self):
"""
Returns the count of flows tagged with this label or one of its children
"""
return self.get_flows().count()
def get_flows(self):
return Flow.objects.filter(Q(labels=self) | Q(labels__parent=self)).filter(is_active=True, is_archived=False).distinct()
@classmethod
def create_unique(cls, base, org, parent=None):
base = base.strip()
# truncate if necessary
if len(base) > 32:
base = base[:32]
# find the next available label by appending numbers
count = 2
while FlowLabel.objects.filter(name=base, org=org, parent=parent):
# make room for the number
if len(base) >= 32:
base = base[:30]
last = str(count - 1)
if base.endswith(last):
base = base[:-len(last)]
base = "%s %d" % (base.strip(), count)
count += 1
return FlowLabel.objects.create(name=base, org=org, parent=parent)
def toggle_label(self, flows, add):
changed = []
for flow in flows:
# if we are adding the flow label and this flow doesnt have it, add it
if add:
if not flow.labels.filter(pk=self.pk):
flow.labels.add(self)
changed.append(flow.pk)
# otherwise, remove it if not already present
else:
if flow.labels.filter(pk=self.pk):
flow.labels.remove(self)
changed.append(flow.pk)
return changed
def __unicode__(self):
if self.parent:
return "%s > %s" % (self.parent, self.name)
return self.name
class Meta:
unique_together = ('name', 'parent', 'org')
__flow_user = None
def get_flow_user():
global __flow_user
if not __flow_user:
user = User.objects.filter(username='flow').first()
if user:
__flow_user = user
else:
user = User.objects.create_user('flow')
user.groups.add(Group.objects.get(name='Service Users'))
__flow_user = user
return __flow_user
class Action(object):
"""
Base class for actions that can be added to an action set and executed during a flow run
"""
TYPE = 'type'
__action_mapping = None
@classmethod
def from_json(cls, org, json_obj):
if not cls.__action_mapping:
cls.__action_mapping = {
ReplyAction.TYPE: ReplyAction,
SendAction.TYPE: SendAction,
AddToGroupAction.TYPE: AddToGroupAction,
DeleteFromGroupAction.TYPE: DeleteFromGroupAction,
AddLabelAction.TYPE: AddLabelAction,
EmailAction.TYPE: EmailAction,
WebhookAction.TYPE: WebhookAction,
SaveToContactAction.TYPE: SaveToContactAction,
SetLanguageAction.TYPE: SetLanguageAction,
SetChannelAction.TYPE: SetChannelAction,
StartFlowAction.TYPE: StartFlowAction,
SayAction.TYPE: SayAction,
PlayAction.TYPE: PlayAction,
TriggerFlowAction.TYPE: TriggerFlowAction,
}
action_type = json_obj.get(cls.TYPE)
if not action_type: # pragma: no cover
raise FlowException("Action definition missing 'type' attribute: %s" % json_obj)
if action_type not in cls.__action_mapping: # pragma: no cover
raise FlowException("Unknown action type '%s' in definition: '%s'" % (action_type, json_obj))
return cls.__action_mapping[action_type].from_json(org, json_obj)
@classmethod
def from_json_array(cls, org, json_arr):
actions = []
for inner in json_arr:
action = Action.from_json(org, inner)
if action:
actions.append(action)
return actions
class EmailAction(Action):
"""
Sends an email to someone
"""
TYPE = 'email'
EMAILS = 'emails'
SUBJECT = 'subject'
MESSAGE = 'msg'
def __init__(self, emails, subject, message):
if not emails:
raise FlowException("Email actions require at least one recipient")
self.emails = emails
self.subject = subject
self.message = message
@classmethod
def from_json(cls, org, json_obj):
emails = json_obj.get(EmailAction.EMAILS)
message = json_obj.get(EmailAction.MESSAGE)
subject = json_obj.get(EmailAction.SUBJECT)
return EmailAction(emails, subject, message)
def as_json(self):
return dict(type=EmailAction.TYPE, emails=self.emails, subject=self.subject, msg=self.message)
def execute(self, run, actionset_uuid, msg, offline_on=None):
from .tasks import send_email_action_task
# build our message from our flow variables
message_context = run.flow.build_message_context(run.contact, msg)
(message, errors) = Msg.substitute_variables(self.message, run.contact, message_context, org=run.flow.org)
(subject, errors) = Msg.substitute_variables(self.subject, run.contact, message_context, org=run.flow.org)
# make sure the subject is single line; replace '\t\n\r\f\v' to ' '
subject = regex.sub('\s+', ' ', subject, regex.V0)
valid_addresses = []
invalid_addresses = []
for email in self.emails:
if email[0] == '@':
# a valid email will contain @ so this is very likely to generate evaluation errors
(address, errors) = Msg.substitute_variables(email, run.contact, message_context, org=run.flow.org)
else:
address = email
address = address.strip()
if is_valid_address(address):
valid_addresses.append(address)
else:
invalid_addresses.append(address)
if not run.contact.is_test:
if valid_addresses:
send_email_action_task.delay(valid_addresses, subject, message)
else:
if valid_addresses:
ActionLog.info(run, _("\"%s\" would be sent to %s") % (message, ", ".join(valid_addresses)))
if invalid_addresses:
ActionLog.warn(run, _("Some email address appear to be invalid: %s") % ", ".join(invalid_addresses))
return []
class WebhookAction(Action):
"""
Forwards the steps in this flow to the webhook (if any)
"""
TYPE = 'api'
ACTION = 'action'
def __init__(self, webhook, action='POST'):
self.webhook = webhook
self.action = action
@classmethod
def from_json(cls, org, json_obj):
return WebhookAction(json_obj.get('webhook', org.get_webhook_url()), json_obj.get('action', 'POST'))
def as_json(self):
return dict(type=WebhookAction.TYPE, webhook=self.webhook, action=self.action)
def execute(self, run, actionset_uuid, msg, offline_on=None):
from temba.api.models import WebHookEvent
message_context = run.flow.build_message_context(run.contact, msg)
(value, errors) = Msg.substitute_variables(self.webhook, run.contact, message_context,
org=run.flow.org, url_encode=True)
if errors:
ActionLog.warn(run, _("URL appears to contain errors: %s") % ", ".join(errors))
WebHookEvent.trigger_flow_event(value, run.flow, run, actionset_uuid, run.contact, msg, self.action)
return []
class AddToGroupAction(Action):
"""
Adds the user to a group
"""
TYPE = 'add_group'
GROUP = 'group'
GROUPS = 'groups'
UUID = 'uuid'
NAME = 'name'
def __init__(self, groups):
self.groups = groups
@classmethod
def from_json(cls, org, json_obj):
return AddToGroupAction(AddToGroupAction.get_groups(org, json_obj))
@classmethod
def get_groups(cls, org, json_obj):
# for backwards compatibility
group_data = json_obj.get(AddToGroupAction.GROUP, None)
if not group_data:
group_data = json_obj.get(AddToGroupAction.GROUPS)
else:
group_data = [group_data]
groups = []
for g in group_data:
if isinstance(g, dict):
group_uuid = g.get(AddToGroupAction.UUID, None)
group_name = g.get(AddToGroupAction.NAME)
group = ContactGroup.get_or_create(org, org.created_by, group_name, group_uuid)
groups.append(group)
else:
if g and g[0] == '@':
groups.append(g)
else:
group = ContactGroup.get_user_group(org, g)
if group:
groups.append(group)
else:
groups.append(ContactGroup.create_static(org, org.get_user(), g))
return groups
def as_json(self):
groups = []
for g in self.groups:
if isinstance(g, ContactGroup):
groups.append(dict(uuid=g.uuid, name=g.name))
else:
groups.append(g)
return dict(type=self.get_type(), groups=groups)
def get_type(self):
return AddToGroupAction.TYPE
def execute(self, run, actionset_uuid, msg, offline_on=None):
contact = run.contact
add = AddToGroupAction.TYPE == self.get_type()
user = get_flow_user()
if contact:
for group in self.groups:
if not isinstance(group, ContactGroup):
message_context = run.flow.build_message_context(contact, msg)
(value, errors) = Msg.substitute_variables(group, contact, message_context, org=run.flow.org)
group = None
if not errors:
group = ContactGroup.get_user_group(contact.org, value)
if not group:
try:
group = ContactGroup.create_static(contact.org, user, name=value)
if run.contact.is_test:
ActionLog.info(run, _("Group '%s' created") % value)
except ValueError:
ActionLog.error(run, _("Unable to create group with name '%s'") % value)
else:
ActionLog.error(run, _("Group name could not be evaluated: %s") % ', '.join(errors))
if group:
# TODO should become a failure (because it should be impossible) and not just a simulator error
if group.is_dynamic:
# report to sentry
logger.error("Attempt to add/remove contacts on dynamic group '%s' [%d] "
"in flow '%s' [%d] for org '%s' [%d]"
% (group.name, group.pk, run.flow.name, run.flow.pk, run.org.name, run.org.pk))
if run.contact.is_test:
if add:
ActionLog.error(run, _("%s is a dynamic group which we can't add contacts to") % group.name)
else:
ActionLog.error(run, _("%s is a dynamic group which we can't remove contacts from") % group.name)
continue
group.update_contacts(user, [contact], add)
if run.contact.is_test:
if add:
ActionLog.info(run, _("Added %s to %s") % (run.contact.name, group.name))
else:
ActionLog.info(run, _("Removed %s from %s") % (run.contact.name, group.name))
return []
class DeleteFromGroupAction(AddToGroupAction):
"""
Removes the user from a group
"""
TYPE = 'del_group'
def get_type(self):
return DeleteFromGroupAction.TYPE
def as_json(self):
groups = []
for g in self.groups:
if isinstance(g, ContactGroup):
groups.append(dict(uuid=g.uuid, name=g.name))
else:
groups.append(g)
return dict(type=self.get_type(), groups=groups)
@classmethod
def from_json(cls, org, json_obj):
return DeleteFromGroupAction(DeleteFromGroupAction.get_groups(org, json_obj))
def execute(self, run, actionset, sms):
if len(self.groups) == 0:
contact = run.contact
user = get_flow_user()
if contact:
# remove from all active and inactive user-defined, static groups
for group in ContactGroup.user_groups.filter(org=contact.org,
group_type=ContactGroup.TYPE_USER_DEFINED,
query__isnull=True):
group.update_contacts(user, [contact], False)
if run.contact.is_test:
ActionLog.info(run, _("Removed %s from %s") % (run.contact.name, group.name))
return []
return AddToGroupAction.execute(self, run, actionset, sms)
class AddLabelAction(Action):
"""
Add a label to the incoming message
"""
TYPE = 'add_label'
LABELS = 'labels'
UUID = 'uuid'
NAME = 'name'
def __init__(self, labels):
self.labels = labels
@classmethod
def from_json(cls, org, json_obj):
labels_data = json_obj.get(AddLabelAction.LABELS)
labels = []
for label_data in labels_data:
if isinstance(label_data, dict):
label_uuid = label_data.get(AddLabelAction.UUID, None)
label_name = label_data.get(AddLabelAction.NAME)
if label_uuid and Label.label_objects.filter(org=org, uuid=label_uuid).first():
label = Label.label_objects.filter(org=org, uuid=label_uuid).first()
if label:
labels.append(label)
else:
labels.append(Label.get_or_create(org, org.get_user(), label_name))
elif isinstance(label_data, basestring):
if label_data and label_data[0] == '@':
# label name is a variable substitution
labels.append(label_data)
else:
labels.append(Label.get_or_create(org, org.get_user(), label_data))
else:
raise ValueError("Label data must be a dict or string")
return AddLabelAction(labels)
def as_json(self):
labels = []
for action_label in self.labels:
if isinstance(action_label, Label):
labels.append(dict(uuid=action_label.uuid, name=action_label.name))
else:
labels.append(action_label)
return dict(type=self.get_type(), labels=labels)
def get_type(self):
return AddLabelAction.TYPE
def execute(self, run, actionset_uuid, msg, offline_on=None):
for label in self.labels:
if not isinstance(label, Label):
contact = run.contact
message_context = run.flow.build_message_context(contact, msg)
(value, errors) = Msg.substitute_variables(label, contact, message_context, org=run.flow.org)
if not errors:
try:
label = Label.get_or_create(contact.org, contact.org.get_user(), value)
if run.contact.is_test:
ActionLog.info(run, _("Label '%s' created") % label.name)
except ValueError:
ActionLog.error(run, _("Unable to create label with name '%s'") % label.name)
else:
label = None
ActionLog.error(run, _("Label name could not be evaluated: %s") % ', '.join(errors))
if label and msg and msg.pk:
if run.contact.is_test:
# don't really add labels to simulator messages
ActionLog.info(run, _("Added %s label to msg '%s'") % (label.name, msg.text))
else:
label.toggle_label([msg], True)
return []
class SayAction(Action):
"""
Voice action for reading some text to a user
"""
TYPE = 'say'
MESSAGE = 'msg'
UUID = 'uuid'
RECORDING = 'recording'
def __init__(self, uuid, msg, recording):
self.uuid = uuid
self.msg = msg
self.recording = recording
@classmethod
def from_json(cls, org, json_obj):
return SayAction(json_obj.get(SayAction.UUID),
json_obj.get(SayAction.MESSAGE),
json_obj.get(SayAction.RECORDING))
def as_json(self):
return dict(type=SayAction.TYPE, msg=self.msg,
uuid=self.uuid, recording=self.recording)
def execute(self, run, actionset_uuid, event, offline_on=None):
media_url = None
if self.recording:
# localize our recording
recording = run.flow.get_localized_text(self.recording, run.contact)
# if we have a localized recording, create the url
if recording:
media_url = "https://%s/%s" % (settings.AWS_BUCKET_DOMAIN, recording)
# localize the text for our message, need this either way for logging
message = run.flow.get_localized_text(self.msg, run.contact)
(message, errors) = Msg.substitute_variables(message, run.contact, run.flow.build_message_context(run.contact, event))
msg = run.create_outgoing_ivr(message, media_url)
if msg:
if run.contact.is_test:
if media_url:
ActionLog.create(run, _('Played recorded message for "%s"') % message)
else:
ActionLog.create(run, _('Read message "%s"') % message)
return [msg]
else:
# no message, possibly failed loop detection
run.voice_response.say(_("Sorry, an invalid flow has been detected. Good bye."))
return []
class PlayAction(Action):
"""
Voice action for reading some text to a user
"""
TYPE = 'play'
URL = 'url'
UUID = 'uuid'
def __init__(self, uuid, url):
self.uuid = uuid
self.url = url
@classmethod
def from_json(cls, org, json_obj):
return PlayAction(json_obj.get(PlayAction.UUID), json_obj.get(PlayAction.URL))
def as_json(self):
return dict(type=PlayAction.TYPE, url=self.url, uuid=self.uuid)
def execute(self, run, actionset_uuid, event, offline_on=None):
(media, errors) = Msg.substitute_variables(self.url, run.contact, run.flow.build_message_context(run.contact, event))
msg = run.create_outgoing_ivr(_('Played contact recording'), media)
if msg:
if run.contact.is_test:
log_txt = _('Played recording at "%s"') % msg.media
ActionLog.create(run, log_txt)
return [msg]
else:
# no message, possibly failed loop detection
run.voice_response.say(_("Sorry, an invalid flow has been detected. Good bye."))
return []
class ReplyAction(Action):
"""
Simple action for sending back a message
"""
TYPE = 'reply'
MESSAGE = 'msg'
def __init__(self, msg=None):
self.msg = msg
@classmethod
def from_json(cls, org, json_obj):
return ReplyAction(msg=json_obj.get(ReplyAction.MESSAGE))
def as_json(self):
return dict(type=ReplyAction.TYPE, msg=self.msg)
def execute(self, run, actionset_uuid, msg, offline_on=None):
reply = None
if self.msg:
user = get_flow_user()
text = run.flow.get_localized_text(self.msg, run.contact)
if offline_on:
reply = Msg.create_outgoing(run.org, user, (run.contact, None), text, status=SENT,
created_on=offline_on, response_to=msg)
else:
context = run.flow.build_message_context(run.contact, msg)
try:
if msg:
reply = msg.reply(text, user, trigger_send=False, message_context=context)
else:
reply = run.contact.send(text, user, trigger_send=False, message_context=context)
except UnreachableException:
pass
return [reply] if reply else []
class VariableContactAction(Action):
"""
Base action that resolves variables into contacts. Used for actions that take
SendAction, TriggerAction, etc
"""
CONTACTS = 'contacts'
GROUPS = 'groups'
VARIABLES = 'variables'
PHONE = 'phone'
NAME = 'name'
UUID = 'uuid'
ID = 'id'
def __init__(self, groups, contacts, variables):
self.groups = groups
self.contacts = contacts
self.variables = variables
@classmethod
def parse_groups(cls, org, json_obj):
# we actually instantiate our contacts here
groups = []
for group_data in json_obj.get(VariableContactAction.GROUPS):
group_uuid = group_data.get(VariableContactAction.UUID, None)
group_name = group_data.get(VariableContactAction.NAME)
# flows from when true deletion was allowed need this
if not group_name:
group_name = 'Missing'
group = ContactGroup.get_or_create(org, org.get_user(), group_name, group_uuid)
groups.append(group)
return groups
@classmethod
def parse_contacts(cls, org, json_obj):
contacts = []
for contact in json_obj.get(VariableContactAction.CONTACTS):
name = contact.get(VariableContactAction.NAME, None)
phone = contact.get(VariableContactAction.PHONE, None)
contact_uuid = contact.get(VariableContactAction.UUID, None)
contact = Contact.objects.filter(uuid=contact_uuid, org=org).first()
if not contact and phone:
contact = Contact.get_or_create(org, org.created_by, name=None, urns=[(TEL_SCHEME, phone)])
# if they dont have a name use the one in our action
if name and not contact.name:
contact.name = name
contact.save(update_fields=['name'])
if contact:
contacts.append(contact)
return contacts
@classmethod
def parse_variables(cls, org, json_obj):
variables = []
if VariableContactAction.VARIABLES in json_obj:
variables = list(_.get(VariableContactAction.ID) for _ in json_obj.get(VariableContactAction.VARIABLES))
return variables
def build_groups_and_contacts(self, run, msg):
message_context = run.flow.build_message_context(run.contact, msg)
contacts = list(self.contacts)
groups = list(self.groups)
# see if we've got groups or contacts
for variable in self.variables:
# this is a marker for a new contact
if variable == NEW_CONTACT_VARIABLE:
# if this is a test contact, stuff a fake contact in for logging purposes
if run.contact.is_test:
contacts.append(Contact(pk=-1))
# otherwise, really create the contact
else:
contacts.append(Contact.get_or_create(run.flow.org, get_flow_user(), name=None, urns=()))
# other type of variable, perform our substitution
else:
(variable, errors) = Msg.substitute_variables(variable, contact=run.contact,
message_context=message_context, org=run.flow.org)
variable_group = ContactGroup.get_user_group(run.flow.org, name=variable)
if variable_group:
groups.append(variable_group)
else:
country = run.flow.org.get_country_code()
if country:
(number, valid) = URN.normalize_number(variable, country)
if number and valid:
contact = Contact.get_or_create(run.flow.org, get_flow_user(), urns=[URN.from_tel(number)])
contacts.append(contact)
return groups, contacts
class TriggerFlowAction(VariableContactAction):
"""
Action that starts a set of contacts down another flow
"""
TYPE = 'trigger-flow'
def __init__(self, flow, groups, contacts, variables):
self.flow = flow
super(TriggerFlowAction, self).__init__(groups, contacts, variables)
@classmethod
def from_json(cls, org, json_obj):
flow_json = json_obj.get('flow')
uuid = flow_json.get('uuid')
flow = Flow.objects.filter(org=org, is_active=True,
is_archived=False, uuid=uuid).first()
# it is possible our flow got deleted
if not flow:
return None
groups = VariableContactAction.parse_groups(org, json_obj)
contacts = VariableContactAction.parse_contacts(org, json_obj)
variables = VariableContactAction.parse_variables(org, json_obj)
return TriggerFlowAction(flow, groups, contacts, variables)
def as_json(self):
contact_ids = [dict(uuid=_.uuid, name=_.name) for _ in self.contacts]
group_ids = [dict(uuid=_.uuid, name=_.name) for _ in self.groups]
variables = [dict(id=_) for _ in self.variables]
return dict(type=TriggerFlowAction.TYPE, flow=dict(uuid=self.flow.uuid, name=self.flow.name),
contacts=contact_ids, groups=group_ids, variables=variables)
def execute(self, run, actionset_uuid, msg, offline_on=None):
if self.flow:
message_context = run.flow.build_message_context(run.contact, msg)
(groups, contacts) = self.build_groups_and_contacts(run, msg)
# start our contacts down the flow
if not run.contact.is_test:
# our extra will be our flow variables in our message context
extra = message_context.get('extra', dict())
self.flow.start(groups, contacts, restart_participants=True, started_flows=[run.flow.pk],
extra=extra, parent_run=run)
return []
else:
unique_contacts = set()
for contact in contacts:
unique_contacts.add(contact.pk)
for group in groups:
for contact in group.contacts.all():
unique_contacts.add(contact.pk)
self.logger(run, self.flow, len(unique_contacts))
return []
else: # pragma: no cover
return []
def logger(self, run, flow, contact_count):
log_txt = _("Added %d contact(s) to '%s' flow") % (contact_count, flow.name)
log = ActionLog.create(run, log_txt)
return log
class SetLanguageAction(Action):
"""
Action that sets the language for a contact
"""
TYPE = 'lang'
LANG = 'lang'
NAME = 'name'
def __init__(self, lang, name):
self.lang = lang
self.name = name
@classmethod
def from_json(cls, org, json_obj):
return SetLanguageAction(json_obj.get(cls.LANG), json_obj.get(cls.NAME))
def as_json(self):
return dict(type=SetLanguageAction.TYPE, lang=self.lang, name=self.name)
def execute(self, run, actionset_uuid, msg, offline_on=None):
if len(self.lang) != 3:
run.contact.language = None
else:
run.contact.language = self.lang
run.contact.save(update_fields=['language'])
self.logger(run)
return []
def logger(self, run):
# only log for test contact
if not run.contact.is_test:
return False
log_txt = _("Setting language to %s") % self.name
log = ActionLog.create(run, log_txt)
return log
class StartFlowAction(Action):
"""
Action that starts the contact into another flow
"""
TYPE = 'flow'
FLOW = 'flow'
NAME = 'name'
UUID = 'uuid'
def __init__(self, flow):
self.flow = flow
@classmethod
def from_json(cls, org, json_obj):
flow_obj = json_obj.get(cls.FLOW)
flow_uuid = flow_obj.get(cls.UUID)
flow = Flow.objects.filter(org=org, is_active=True, is_archived=False, uuid=flow_uuid).first()
# it is possible our flow got deleted
if not flow:
return None
else:
return StartFlowAction(flow)
def as_json(self):
return dict(type=StartFlowAction.TYPE, flow=dict(uuid=self.flow.uuid, name=self.flow.name))
def execute(self, run, actionset_uuid, msg, started_flows, offline_on=None):
# our extra will be our flow variables in our message context
message_context = run.flow.build_message_context(run.contact, msg)
extra = message_context.get('extra', dict())
# if they are both flow runs, just redirect the call
if run.flow.flow_type == Flow.VOICE and self.flow.flow_type == Flow.VOICE:
new_run = self.flow.start([], [run.contact], started_flows=started_flows,
restart_participants=True, extra=extra, parent_run=run)[0]
url = "https://%s%s" % (settings.TEMBA_HOST, reverse('ivr.ivrcall_handle', args=[new_run.call.pk]))
run.voice_response.redirect(url)
else:
self.flow.start([], [run.contact], started_flows=started_flows, restart_participants=True,
extra=extra, parent_run=run)
self.logger(run)
return []
def logger(self, run):
# only log for test contact
if not run.contact.is_test:
return False
log_txt = _("Starting other flow %s") % self.flow.name
log = ActionLog.create(run, log_txt)
return log
class SaveToContactAction(Action):
"""
Action to save a variable substitution to a field on a contact
"""
TYPE = 'save'
FIELD = 'field'
LABEL = 'label'
VALUE = 'value'
def __init__(self, label, field, value):
self.label = label
self.field = field
self.value = value
@classmethod
def get_label(cls, org, field, label=None):
# make sure this field exists
if field == 'name':
label = 'Contact Name'
elif field == 'first_name':
label = 'First Name'
elif field == 'tel_e164':
label = 'Phone Number'
elif field in ContactURN.CONTEXT_KEYS_TO_SCHEME.keys():
label = unicode(ContactURN.CONTEXT_KEYS_TO_LABEL[field])
else:
contact_field = ContactField.objects.filter(org=org, key=field).first()
if contact_field:
label = contact_field.label
else:
ContactField.get_or_create(org, get_flow_user(), field, label)
return label
@classmethod
def from_json(cls, org, json_obj):
# they are creating a new field
label = json_obj.get(cls.LABEL)
field = json_obj.get(cls.FIELD)
value = json_obj.get(cls.VALUE)
if label and label.startswith('[_NEW_]'):
label = label[7:]
# create our contact field if necessary
if not field:
field = ContactField.make_key(label)
# look up our label
label = cls.get_label(org, field, label)
return SaveToContactAction(label, field, value)
def as_json(self):
return dict(type=SaveToContactAction.TYPE, label=self.label, field=self.field, value=self.value)
def execute(self, run, actionset_uuid, msg, offline_on=None):
# evaluate our value
contact = run.contact
user = get_flow_user()
message_context = run.flow.build_message_context(contact, msg)
(value, errors) = Msg.substitute_variables(self.value, contact, message_context, org=run.flow.org)
if contact.is_test and errors:
ActionLog.warn(run, _("Expression contained errors: %s") % ', '.join(errors))
value = value.strip()
if self.field == 'name':
new_value = value[:128]
contact.name = new_value
contact.modified_by = user
contact.save(update_fields=('name', 'modified_by', 'modified_on'))
self.logger(run, new_value)
elif self.field == 'first_name':
new_value = value[:128]
contact.set_first_name(new_value)
contact.modified_by = user
contact.save(update_fields=('name', 'modified_by', 'modified_on'))
self.logger(run, new_value)
elif self.field in ContactURN.CONTEXT_KEYS_TO_SCHEME.keys():
new_value = value[:128]
# add in our new urn number
scheme = ContactURN.CONTEXT_KEYS_TO_SCHEME[self.field]
# trim off '@' for twitter handles
if self.field == 'twitter':
if len(new_value) > 0:
if new_value[0] == '@':
new_value = new_value[1:]
# only valid urns get added, sorry
new_urn = URN.normalize(URN.from_parts(scheme, new_value))
if not URN.validate(new_urn, contact.org.get_country_code()):
new_urn = None
if contact.is_test:
ActionLog.warn(run, _('Skipping invalid connection for contact (%s:%s)' % (scheme, new_value)))
if new_urn:
urns = [urn.urn for urn in contact.urns.all()]
urns += [new_urn]
# don't really update URNs on test contacts
if contact.is_test:
ActionLog.info(run, _("Added %s as @contact.%s - skipped in simulator" % (new_value, scheme)))
else:
contact.update_urns(user, urns)
else:
new_value = value[:640]
contact.set_field(user, self.field, new_value)
self.logger(run, new_value)
return []
def logger(self, run, new_value):
# only log for test contact
if not run.contact.is_test:
return False
label = SaveToContactAction.get_label(run.flow.org, self.field, self.label)
log_txt = _("Updated %s to '%s'") % (label, new_value)
log = ActionLog.create(run, log_txt)
return log
class SetChannelAction(Action):
"""
Action which sets the preferred channel to use for this Contact. If the contact has no URNs that match
the Channel being set then this is a no-op.
"""
TYPE = 'channel'
CHANNEL = 'channel'
NAME = 'name'
def __init__(self, channel):
self.channel = channel
super(Action, self).__init__()
@classmethod
def from_json(cls, org, json_obj):
channel_uuid = json_obj.get(SetChannelAction.CHANNEL)
if channel_uuid:
channel = Channel.objects.filter(org=org, is_active=True, uuid=channel_uuid).first()
else:
channel = None
return SetChannelAction(channel)
def as_json(self):
channel_uuid = self.channel.uuid if self.channel else None
channel_name = "%s: %s" % (self.channel.get_channel_type_display(), self.channel.get_address_display()) if self.channel else None
return dict(type=SetChannelAction.TYPE, channel=channel_uuid, name=channel_name)
def execute(self, run, actionset_uuid, msg, offline_on=None):
# if we found the channel to set
if self.channel:
run.contact.set_preferred_channel(self.channel)
self.log(run, _("Updated preferred channel to %s") % self.channel.name)
return []
else:
self.log(run, _("Channel not found, no action taken"))
return []
def log(self, run, text): # pragma: no cover
if run.contact.is_test:
ActionLog.create(run, text)
class SendAction(VariableContactAction):
"""
Action which sends a message to a specified set of contacts and groups.
"""
TYPE = 'send'
MESSAGE = 'msg'
def __init__(self, msg, groups, contacts, variables):
self.msg = msg
super(SendAction, self).__init__(groups, contacts, variables)
@classmethod
def from_json(cls, org, json_obj):
groups = VariableContactAction.parse_groups(org, json_obj)
contacts = VariableContactAction.parse_contacts(org, json_obj)
variables = VariableContactAction.parse_variables(org, json_obj)
return SendAction(json_obj.get(SendAction.MESSAGE), groups, contacts, variables)
def as_json(self):
contact_ids = [dict(uuid=_.uuid) for _ in self.contacts]
group_ids = [dict(uuid=_.uuid, name=_.name) for _ in self.groups]
variables = [dict(id=_) for _ in self.variables]
return dict(type=SendAction.TYPE, msg=self.msg, contacts=contact_ids, groups=group_ids, variables=variables)
def execute(self, run, actionset_uuid, msg, offline_on=None):
if self.msg:
flow = run.flow
message_context = flow.build_message_context(run.contact, msg)
(groups, contacts) = self.build_groups_and_contacts(run, msg)
# create our broadcast and send it
if not run.contact.is_test:
# if we have localized versions, add those to our broadcast definition
language_dict = None
if isinstance(self.msg, dict):
language_dict = json.dumps(self.msg)
message_text = run.flow.get_localized_text(self.msg)
# no message text? then no-op
if not message_text:
return list()
recipients = groups + contacts
broadcast = Broadcast.create(flow.org, flow.modified_by, message_text, recipients,
language_dict=language_dict)
broadcast.send(trigger_send=False, message_context=message_context, base_language=flow.base_language)
return list(broadcast.get_messages())
else:
unique_contacts = set()
for contact in contacts:
unique_contacts.add(contact.pk)
for group in groups:
for contact in group.contacts.all():
unique_contacts.add(contact.pk)
# contact refers to each contact this message is being sent to so evaluate without it for logging
del message_context['contact']
text = run.flow.get_localized_text(self.msg, run.contact)
(message, errors) = Msg.substitute_variables(text, None, message_context,
org=run.flow.org, partial_vars=True)
self.logger(run, message, len(unique_contacts))
return []
else: # pragma: no cover
return []
def logger(self, run, text, contact_count):
log_txt = _n("Sending '%(msg)s' to %(count)d contact",
"Sending '%(msg)s' to %(count)d contacts",
contact_count) % dict(msg=text, count=contact_count)
log = ActionLog.create(run, log_txt)
return log
class Rule(object):
def __init__(self, uuid, category, destination, destination_type, test):
self.uuid = uuid
self.category = category
self.destination = destination
self.destination_type = destination_type
self.test = test
def get_category_name(self, flow_lang):
if not self.category:
if isinstance(self.test, BetweenTest):
return "%s-%s" % (self.test.min, self.test.max)
# return the category name for the flow language version
if isinstance(self.category, dict):
if flow_lang:
return self.category[flow_lang]
else:
return self.category.values()[0]
return self.category
def matches(self, run, sms, context, text):
return self.test.evaluate(run, sms, context, text)
def as_json(self):
return dict(uuid=self.uuid,
category=self.category,
destination=self.destination,
destination_type=self.destination_type,
test=self.test.as_json())
@classmethod
def from_json_array(cls, org, json):
rules = []
for rule in json:
category = rule.get('category', None)
if isinstance(category, dict):
# prune all of our translations to 36
for k, v in category.items():
if isinstance(v, unicode):
category[k] = v[:36]
elif category:
category = category[:36]
destination = rule.get('destination', None)
destination_type = None
# determine our destination type, if its not set its an action set
if destination:
destination_type = rule.get('destination_type', FlowStep.TYPE_ACTION_SET)
rules.append(Rule(rule.get('uuid'),
category,
destination,
destination_type,
Test.from_json(org, rule['test'])))
return rules
class Test(object):
TYPE = 'type'
__test_mapping = None
@classmethod
def from_json(cls, org, json_dict):
if not cls.__test_mapping:
cls.__test_mapping = {
SubflowTest.TYPE: SubflowTest,
TrueTest.TYPE: TrueTest,
FalseTest.TYPE: FalseTest,
AndTest.TYPE: AndTest,
OrTest.TYPE: OrTest,
ContainsTest.TYPE: ContainsTest,
ContainsAnyTest.TYPE: ContainsAnyTest,
NumberTest.TYPE: NumberTest,
LtTest.TYPE: LtTest,
LteTest.TYPE: LteTest,
GtTest.TYPE: GtTest,
GteTest.TYPE: GteTest,
EqTest.TYPE: EqTest,
BetweenTest.TYPE: BetweenTest,
StartsWithTest.TYPE: StartsWithTest,
HasDateTest.TYPE: HasDateTest,
DateEqualTest.TYPE: DateEqualTest,
DateAfterTest.TYPE: DateAfterTest,
DateBeforeTest.TYPE: DateBeforeTest,
PhoneTest.TYPE: PhoneTest,
RegexTest.TYPE: RegexTest,
HasWardTest.TYPE: HasWardTest,
HasDistrictTest.TYPE: HasDistrictTest,
HasStateTest.TYPE: HasStateTest,
NotEmptyTest.TYPE: NotEmptyTest,
TimeoutTest.TYPE: TimeoutTest,
AirtimeStatusTest.TYPE: AirtimeStatusTest,
WebhookStatusTest.TYPE: WebhookStatusTest,
}
type = json_dict.get(cls.TYPE, None)
if not type: # pragma: no cover
raise FlowException("Test definition missing 'type' field: %s", json_dict)
if type not in cls.__test_mapping: # pragma: no cover
raise FlowException("Unknown type: '%s' in definition: %s" % (type, json_dict))
return cls.__test_mapping[type].from_json(org, json_dict)
@classmethod
def from_json_array(cls, org, json):
tests = []
for inner in json:
tests.append(Test.from_json(org, inner))
return tests
def evaluate(self, run, sms, context, text): # pragma: no cover
"""
Where the work happens, subclasses need to be able to evalute their Test
according to their definition given the passed in message. Tests do not have
side effects.
"""
raise FlowException("Subclasses must implement evaluate, returning a tuple containing 1 or 0 and the value tested")
class WebhookStatusTest(Test):
"""
{op: 'webhook', status: 'success' }
"""
TYPE = 'webhook_status'
STATUS = 'status'
STATUS_SUCCESS = 'success'
STATUS_FAILURE = 'failure'
def __init__(self, status):
self.status = status
@classmethod
def from_json(cls, org, json):
return WebhookStatusTest(json.get('status'))
def as_json(self):
return dict(type=WebhookStatusTest.TYPE, status=self.status)
def evaluate(self, run, sms, context, text):
# we treat any 20* return code as successful
success = 200 <= int(text) < 300
if success and self.status == WebhookStatusTest.STATUS_SUCCESS:
return 1, text
elif not success and self.status == WebhookStatusTest.STATUS_FAILURE:
return 1, text
else:
return 0, None
class AirtimeStatusTest(Test):
"""
{op: 'airtime_status'}
"""
TYPE = 'airtime_status'
EXIT = 'exit_status'
STATUS_SUCCESS = 'success'
STATUS_FAILED = 'failed'
STATUS_MAP = {STATUS_SUCCESS: AirtimeTransfer.SUCCESS,
STATUS_FAILED: AirtimeTransfer.FAILED}
def __init__(self, exit_status):
self.exit_status = exit_status
@classmethod
def from_json(cls, org, json):
return AirtimeStatusTest(json.get('exit_status'))
def as_json(self):
return dict(type=AirtimeStatusTest.TYPE, exit_status=self.exit_status)
def evaluate(self, run, sms, context, text):
status = text
if status and AirtimeStatusTest.STATUS_MAP[self.exit_status] == status:
return 1, status
return 0, None
class SubflowTest(Test):
"""
{ op: "subflow" }
"""
TYPE = 'subflow'
EXIT = 'exit_type'
TYPE_COMPLETED = 'completed'
TYPE_EXPIRED = 'expired'
EXIT_MAP = {TYPE_COMPLETED: FlowRun.EXIT_TYPE_COMPLETED,
TYPE_EXPIRED: FlowRun.EXIT_TYPE_EXPIRED}
def __init__(self, exit_type):
self.exit_type = exit_type
@classmethod
def from_json(cls, org, json):
return SubflowTest(json.get(SubflowTest.EXIT))
def as_json(self):
return dict(type=SubflowTest.TYPE, exit_type=self.exit_type)
def evaluate(self, run, sms, context, text):
# lookup the subflow run
subflow_run = FlowRun.objects.filter(parent=run).order_by('-created_on').first()
if subflow_run and SubflowTest.EXIT_MAP[self.exit_type] == subflow_run.exit_type:
return 1, text
return 0, None
class TimeoutTest(Test):
"""
{ op: "timeout", minutes: 60 }
"""
TYPE = 'timeout'
MINUTES = 'minutes'
def __init__(self, minutes):
self.minutes = minutes
@classmethod
def from_json(cls, org, json):
return TimeoutTest(int(json.get(TimeoutTest.MINUTES)))
def as_json(self):
return {'type': TimeoutTest.TYPE, TimeoutTest.MINUTES: self.minutes}
def evaluate(self, run, sms, context, text):
if run.timeout_on < timezone.now():
return 1, None
else:
return 0, None
class TrueTest(Test):
"""
{ op: "true" }
"""
TYPE = 'true'
def __init__(self):
pass
@classmethod
def from_json(cls, org, json):
return TrueTest()
def as_json(self):
return dict(type=TrueTest.TYPE)
def evaluate(self, run, sms, context, text):
return 1, text
class FalseTest(Test):
"""
{ op: "false" }
"""
TYPE = 'false'
def __init__(self):
pass
@classmethod
def from_json(cls, org, json):
return FalseTest()
def as_json(self):
return dict(type=FalseTest.TYPE)
def evaluate(self, run, sms, context, text):
return 0, None
class AndTest(Test):
"""
{ op: "and", "tests": [ ... ] }
"""
TESTS = 'tests'
TYPE = 'and'
def __init__(self, tests):
self.tests = tests
@classmethod
def from_json(cls, org, json):
return AndTest(Test.from_json_array(org, json[cls.TESTS]))
def as_json(self):
return dict(type=AndTest.TYPE, tests=[_.as_json() for _ in self.tests])
def evaluate(self, run, sms, context, text):
matches = []
for test in self.tests:
(result, value) = test.evaluate(run, sms, context, text)
if result:
matches.append(value)
else:
return 0, None
# all came out true, we are true
return 1, " ".join(matches)
class OrTest(Test):
"""
{ op: "or", "tests": [ ... ] }
"""
TESTS = 'tests'
TYPE = 'or'
def __init__(self, tests):
self.tests = tests
@classmethod
def from_json(cls, org, json):
return OrTest(Test.from_json_array(org, json[cls.TESTS]))
def as_json(self):
return dict(type=OrTest.TYPE, tests=[_.as_json() for _ in self.tests])
def evaluate(self, run, sms, context, text):
for test in self.tests:
(result, value) = test.evaluate(run, sms, context, text)
if result:
return result, value
return 0, None
class NotEmptyTest(Test):
"""
{ op: "not_empty" }
"""
TYPE = 'not_empty'
def __init__(self):
pass
@classmethod
def from_json(cls, org, json):
return NotEmptyTest()
def as_json(self):
return dict(type=NotEmptyTest.TYPE)
def evaluate(self, run, sms, context, text):
if text and len(text.strip()):
return 1, text
return 0, None
class ContainsTest(Test):
"""
{ op: "contains", "test": "red" }
"""
TEST = 'test'
TYPE = 'contains'
def __init__(self, test):
self.test = test
@classmethod
def from_json(cls, org, json):
return cls(json[cls.TEST])
def as_json(self):
json = dict(type=ContainsTest.TYPE, test=self.test)
return json
def test_in_words(self, test, words, raw_words):
matches = []
for index, word in enumerate(words):
if word == test:
matches.append(index)
continue
# words are over 4 characters and start with the same letter
if len(word) > 4 and len(test) > 4 and word[0] == test[0]:
# edit distance of 1 or less is a match
if edit_distance(word, test) <= 1:
matches.append(index)
return matches
def evaluate(self, run, sms, context, text):
# substitute any variables
test = run.flow.get_localized_text(self.test, run.contact)
test, errors = Msg.substitute_variables(test, run.contact, context, org=run.flow.org)
# tokenize our test
tests = regex.split(r"\W+", test.lower(), flags=regex.UNICODE | regex.V0)
# tokenize our sms
words = regex.split(r"\W+", text.lower(), flags=regex.UNICODE | regex.V0)
raw_words = regex.split(r"\W+", text, flags=regex.UNICODE | regex.V0)
tests = [elt for elt in tests if elt != '']
words = [elt for elt in words if elt != '']
raw_words = [elt for elt in raw_words if elt != '']
# run through each of our tests
matches = set()
matched_tests = 0
for test in tests:
match = self.test_in_words(test, words, raw_words)
if match:
matched_tests += 1
matches.update(match)
# we are a match only if every test matches
if matched_tests == len(tests):
matches = sorted(list(matches))
matched_words = " ".join([raw_words[idx] for idx in matches])
return len(tests), matched_words
else:
return 0, None
class ContainsAnyTest(ContainsTest):
"""
{ op: "contains_any", "test": "red" }
"""
TEST = 'test'
TYPE = 'contains_any'
def as_json(self):
return dict(type=ContainsAnyTest.TYPE, test=self.test)
def evaluate(self, run, sms, context, text):
# substitute any variables
test = run.flow.get_localized_text(self.test, run.contact)
test, errors = Msg.substitute_variables(test, run.contact, context, org=run.flow.org)
# tokenize our test
tests = regex.split(r"\W+", test.lower(), flags=regex.UNICODE | regex.V0)
# tokenize our sms
words = regex.split(r"\W+", text.lower(), flags=regex.UNICODE | regex.V0)
raw_words = regex.split(r"\W+", text, flags=regex.UNICODE | regex.V0)
tests = [elt for elt in tests if elt != '']
words = [elt for elt in words if elt != '']
raw_words = [elt for elt in raw_words if elt != '']
# run through each of our tests
matches = set()
for test in tests:
match = self.test_in_words(test, words, raw_words)
if match:
matches.update(match)
# we are a match if at least one test matches
if matches:
matches = sorted(list(matches))
matched_words = " ".join([raw_words[idx] for idx in matches])
return 1, matched_words
else:
return 0, None
class StartsWithTest(Test):
"""
{ op: "starts", "test": "red" }
"""
TEST = 'test'
TYPE = 'starts'
def __init__(self, test):
self.test = test
@classmethod
def from_json(cls, org, json):
return cls(json[cls.TEST])
def as_json(self):
return dict(type=StartsWithTest.TYPE, test=self.test)
def evaluate(self, run, sms, context, text):
# substitute any variables in our test
test = run.flow.get_localized_text(self.test, run.contact)
test, errors = Msg.substitute_variables(test, run.contact, context, org=run.flow.org)
# strip leading and trailing whitespace
text = text.strip()
# see whether we start with our test
if text.lower().find(test.lower()) == 0:
return 1, text[:len(test)]
else:
return 0, None
class HasStateTest(Test):
TYPE = 'state'
def __init__(self):
pass
@classmethod
def from_json(cls, org, json):
return cls()
def as_json(self):
return dict(type=self.TYPE)
def evaluate(self, run, sms, context, text):
org = run.flow.org
# if they removed their country since adding the rule
if not org.country:
return 0, None
state = org.parse_location(text, STATE_LEVEL)
if state:
return 1, state[0]
return 0, None
class HasDistrictTest(Test):
TYPE = 'district'
TEST = 'test'
def __init__(self, state=None):
self.state = state
@classmethod
def from_json(cls, org, json):
return cls(json[cls.TEST])
def as_json(self):
return dict(type=self.TYPE, test=self.state)
def evaluate(self, run, sms, context, text):
# if they removed their country since adding the rule
org = run.flow.org
if not org.country:
return 0, None
# evaluate our district in case it has a replacement variable
state, errors = Msg.substitute_variables(self.state, sms.contact, context, org=run.flow.org)
parent = org.parse_location(state, STATE_LEVEL)
if parent:
district = org.parse_location(text, DISTRICT_LEVEL, parent[0])
if district:
return 1, district[0]
district = org.parse_location(text, DISTRICT_LEVEL)
# parse location when state contraint is not provided or available
if (errors or not state) and len(district) == 1:
return 1, district[0]
return 0, None
class HasWardTest(Test):
TYPE = 'ward'
STATE = 'state'
DISTRICT = 'district'
def __init__(self, state=None, district=None):
self.state = state
self.district = district
@classmethod
def from_json(cls, org, json):
return cls(json[cls.STATE], json[cls.DISTRICT])
def as_json(self):
return dict(type=self.TYPE, state=self.state, district=self.district)
def evaluate(self, run, sms, context, text):
# if they removed their country since adding the rule
org = run.flow.org
if not org.country:
return 0, None
district = None
# evaluate our district in case it has a replacement variable
district_name, missing_district = Msg.substitute_variables(self.district, sms.contact, context, org=run.flow.org)
state_name, missing_state = Msg.substitute_variables(self.state, sms.contact, context, org=run.flow.org)
if (district_name and state_name) and (len(missing_district) == 0 and len(missing_state) == 0):
state = org.parse_location(state_name, STATE_LEVEL)
if state:
district = org.parse_location(district_name, DISTRICT_LEVEL, state[0])
if district:
ward = org.parse_location(text, WARD_LEVEL, district[0])
if ward:
return 1, ward[0]
# parse location when district contraint is not provided or available
ward = org.parse_location(text, WARD_LEVEL)
if len(ward) == 1 and district is None:
return 1, ward[0]
return 0, None
class HasDateTest(Test):
TYPE = 'date'
def __init__(self):
pass
@classmethod
def from_json(cls, org, json):
return cls()
def as_json(self):
return dict(type=self.TYPE)
def evaluate_date_test(self, message_date):
return True
def evaluate(self, run, sms, context, text):
text = text.replace(' ', "-")
org = run.flow.org
dayfirst = org.get_dayfirst()
tz = org.get_tzinfo()
(date_format, time_format) = get_datetime_format(dayfirst)
date = str_to_datetime(text, tz=tz, dayfirst=org.get_dayfirst())
if date is not None and self.evaluate_date_test(date):
return 1, datetime_to_str(date, tz=tz, format=time_format, ms=False)
return 0, None
class DateTest(Test):
"""
Base class for those tests that check relative dates
"""
TEST = 'test'
TYPE = 'date'
def __init__(self, test):
self.test = test
@classmethod
def from_json(cls, org, json):
return cls(json[cls.TEST])
def as_json(self):
return dict(type=self.TYPE, test=self.test)
def evaluate_date_test(self, date_message, date_test):
raise FlowException("Evaluate date test needs to be defined by subclass.")
def evaluate(self, run, sms, context, text):
org = run.flow.org
dayfirst = org.get_dayfirst()
tz = org.get_tzinfo()
test, errors = Msg.substitute_variables(self.test, run.contact, context, org=org)
text = text.replace(' ', "-")
if not errors:
date_message = str_to_datetime(text, tz=tz, dayfirst=dayfirst)
date_test = str_to_datetime(test, tz=tz, dayfirst=dayfirst)
(date_format, time_format) = get_datetime_format(dayfirst)
if date_message is not None and date_test is not None and self.evaluate_date_test(date_message, date_test):
return 1, datetime_to_str(date_message, tz=tz, format=time_format, ms=False)
return 0, None
class DateEqualTest(DateTest):
TEST = 'test'
TYPE = 'date_equal'
def evaluate_date_test(self, date_message, date_test):
return date_message.date() == date_test.date()
class DateAfterTest(DateTest):
TEST = 'test'
TYPE = 'date_after'
def evaluate_date_test(self, date_message, date_test):
return date_message >= date_test
class DateBeforeTest(DateTest):
TEST = 'test'
TYPE = 'date_before'
def evaluate_date_test(self, date_message, date_test):
return date_message <= date_test
class NumericTest(Test):
"""
Base class for those tests that do numeric tests.
"""
TEST = 'test'
TYPE = ''
@classmethod
def convert_to_decimal(cls, word):
# common substitutions
original_word = word
word = word.replace('l', '1').replace('o', '0').replace('O', '0')
try:
return (word, Decimal(word))
except Exception as e:
# we only try this hard if we haven't already substituted characters
if original_word == word:
# does this start with a number? just use that part if so
match = regex.match(r"^(\d+).*$", word, regex.UNICODE | regex.V0)
if match:
return (match.group(1), Decimal(match.group(1)))
else:
raise e
else:
raise e
# test every word in the message against our test
def evaluate(self, run, sms, context, text):
text = text.replace(',', '')
for word in regex.split(r"\s+", text, flags=regex.UNICODE | regex.V0):
try:
(word, decimal) = NumericTest.convert_to_decimal(word)
if self.evaluate_numeric_test(run, context, decimal):
return 1, decimal
except Exception:
pass
return 0, None
class BetweenTest(NumericTest):
"""
Test whether we are between two numbers (inclusive)
"""
MIN = 'min'
MAX = 'max'
TYPE = 'between'
def __init__(self, min_val, max_val):
self.min = min_val
self.max = max_val
@classmethod
def from_json(cls, org, json):
return cls(json[cls.MIN], json[cls.MAX])
def as_json(self):
return dict(type=self.TYPE, min=self.min, max=self.max)
def evaluate_numeric_test(self, run, context, decimal_value):
min_val, min_errors = Msg.substitute_variables(self.min, run.contact, context, org=run.flow.org)
max_val, max_errors = Msg.substitute_variables(self.max, run.contact, context, org=run.flow.org)
if not min_errors and not max_errors:
try:
return Decimal(min_val) <= decimal_value <= Decimal(max_val)
except Exception:
pass
return False
class NumberTest(NumericTest):
"""
Tests that there is any number in the string.
"""
TYPE = 'number'
def __init__(self):
pass
@classmethod
def from_json(cls, org, json):
return cls()
def as_json(self):
return dict(type=self.TYPE)
def evaluate_numeric_test(self, run, context, decimal_value):
return True
class SimpleNumericTest(Test):
"""
Base class for those tests that do a numeric test with a single value
"""
TEST = 'test'
TYPE = ''
def __init__(self, test):
self.test = test
@classmethod
def from_json(cls, org, json):
return cls(json[cls.TEST])
def as_json(self):
return dict(type=self.TYPE, test=self.test)
def evaluate_numeric_test(self, message_numeric, test_numeric): # pragma: no cover
raise FlowException("Evaluate numeric test needs to be defined by subclass")
# test every word in the message against our test
def evaluate(self, run, sms, context, text):
test, errors = Msg.substitute_variables(str(self.test), run.contact, context, org=run.flow.org)
text = text.replace(',', '')
for word in regex.split(r"\s+", text, flags=regex.UNICODE | regex.V0):
try:
(word, decimal) = NumericTest.convert_to_decimal(word)
if self.evaluate_numeric_test(decimal, Decimal(test)):
return 1, decimal
except Exception:
pass
return 0, None
class GtTest(SimpleNumericTest):
TEST = 'test'
TYPE = 'gt'
def evaluate_numeric_test(self, message_numeric, test_numeric):
return message_numeric > test_numeric
class GteTest(SimpleNumericTest):
TEST = 'test'
TYPE = 'gte'
def evaluate_numeric_test(self, message_numeric, test_numeric):
return message_numeric >= test_numeric
class LtTest(SimpleNumericTest):
TEST = 'test'
TYPE = 'lt'
def evaluate_numeric_test(self, message_numeric, test_numeric):
return message_numeric < test_numeric
class LteTest(SimpleNumericTest):
TEST = 'test'
TYPE = 'lte'
def evaluate_numeric_test(self, message_numeric, test_numeric):
return message_numeric <= test_numeric
class EqTest(SimpleNumericTest):
TEST = 'test'
TYPE = 'eq'
def evaluate_numeric_test(self, message_numeric, test_numeric):
return message_numeric == test_numeric
class PhoneTest(Test):
"""
Test for whether a response contains a phone number
"""
TYPE = 'phone'
def __init__(self):
pass
@classmethod
def from_json(cls, org, json):
return cls()
def as_json(self):
return dict(type=self.TYPE)
def evaluate(self, run, sms, context, text):
org = run.flow.org
# try to find a phone number in the text we have been sent
country_code = org.get_country_code()
if not country_code:
country_code = 'US'
number = None
matches = phonenumbers.PhoneNumberMatcher(text, country_code)
# try it as an international number if we failed
if not matches.has_next():
matches = phonenumbers.PhoneNumberMatcher('+' + text, country_code)
for match in matches:
number = phonenumbers.format_number(match.number, phonenumbers.PhoneNumberFormat.E164)
return number, number
class RegexTest(Test):
"""
Test for whether a response matches a regular expression
"""
TEST = 'test'
TYPE = 'regex'
def __init__(self, test):
self.test = test
@classmethod
def from_json(cls, org, json):
return cls(json[cls.TEST])
def as_json(self):
return dict(type=self.TYPE, test=self.test)
def evaluate(self, run, sms, context, text):
try:
test = run.flow.get_localized_text(self.test, run.contact)
# check whether we match
rexp = regex.compile(test, regex.UNICODE | regex.IGNORECASE | regex.MULTILINE | regex.V0)
match = rexp.search(text)
# if so, $0 will be what we return
if match:
return_match = match.group(0)
# build up a dictionary that contains indexed values
group_dict = match.groupdict()
for idx in range(rexp.groups + 1):
group_dict[str(idx)] = match.group(idx)
# set it on run@extra
run.update_fields(group_dict)
# return all matched values
return True, return_match
except Exception:
import traceback
traceback.print_exc()
return False, None
|
ewheeler/rapidpro
|
temba/flows/models.py
|
Python
|
agpl-3.0
| 232,241
|
[
"VisIt"
] |
520bdb148589d308407fceedaaffd50275dfb0328489693f874bb4adb57e434f
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Test coupled cluster logfiles"""
import os
import unittest
import numpy
__filedir__ = os.path.realpath(os.path.dirname(__file__))
class GenericCCTest(unittest.TestCase):
"""Generic coupled cluster unittest"""
def testsign(self):
"""Are the coupled cluster corrections negative?"""
corrections = self.data.ccenergies - self.data.scfenergies
self.failUnless(numpy.alltrue(corrections < 0.0))
if __name__ == "__main__":
import sys
sys.path.append(os.path.join(__filedir__, ".."))
from test_data import DataSuite
suite = DataSuite(['CC'])
suite.testall()
|
gaursagar/cclib
|
test/data/testCC.py
|
Python
|
bsd-3-clause
| 815
|
[
"cclib"
] |
25d6ef43c35db808b60f2201c3ad445eb3bb56e7477e83c56d684d9f5f7cecab
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom formatting for OpenOffice and StarOffice."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
# pylint: disable-msg=C0301
import copy
import pyatspi
import orca.formatting
import orca.settings
formatting = {
'speech': {
# Get rid of unselectedCell because we don't run into that in OOo
# and we'd end up always speaking "not selected" for all table cells.
#
'suffix': {
'focused': '[]',
'unfocused': 'newNodeLevel + ' + orca.formatting.TUTORIAL,
'basicWhereAmI': orca.formatting.TUTORIAL + ' + description',
'detailedWhereAmI' : '[]'
},
pyatspi.ROLE_LABEL: {
'focused': 'expandableState + availability',
'unfocused': 'name + allTextSelection + expandableState + availability + positionInList',
'basicWhereAmI': 'roleName + name + positionInList + expandableState + (nodeLevel or nestingLevel)'
},
pyatspi.ROLE_TABLE_CELL: {
'focused': 'endOfTableIndicator + tableCellRow',
'unfocused': 'endOfTableIndicator + tableCellRow',
'basicWhereAmI': 'parentRoleName + columnHeader + rowHeader + roleName + cellCheckedState + (realActiveDescendantDisplayedText or imageDescription + image) + columnAndRow + expandableState + nodeLevel',
'detailedWhereAmI': 'parentRoleName + columnHeader + rowHeader + roleName + cellCheckedState + (realActiveDescendantDisplayedText or imageDescription + image) + columnAndRow + tableCellRow + expandableState + nodeLevel'
},
'REAL_ROLE_TABLE_CELL': {
'focused': 'newRowHeader + newColumnHeader + realActiveDescendantDisplayedText',
'unfocused': 'newRowHeader + newColumnHeader + realActiveDescendantDisplayedText',
},
'ROLE_SPREADSHEET_CELL': {
# We treat spreadsheet cells differently from other table cells in
# whereAmI.
#
'basicWhereAmI': 'roleName + column + columnHeader + row + rowHeader + (textContent or realTableCell) + anyTextSelection'
},
},
'braille': {
pyatspi.ROLE_LABEL: {
'unfocused': '[Text(obj,asString((label or name)))]',
'focused': '[Text(obj,asString((label or name)))]'
},
pyatspi.ROLE_LIST: {
'unfocused': '[Component(obj,\
asString(labelOrName + roleName + required))]'
},
pyatspi.ROLE_SCROLL_PANE: {
'unfocused': 'asPageTabOrScrollPane\
+ (childTab\
and ([Region(" ")] + childTab) or [])'
}
}
}
if orca.settings.useExperimentalSpeechProsody:
formatting['speech']['ROLE_SPREADSHEET_CELL']['basicWhereAmI'] = \
'roleName + pause + column + pause + columnHeader + pause + row + pause + rowHeader + pause + (textContent or realTableCell) + pause + anyTextSelection + pause'
formatting['speech'][pyatspi.ROLE_TABLE_CELL]['focused'] = \
'endOfTableIndicator + pause + tableCellRow + pause'
formatting['speech'][pyatspi.ROLE_TABLE_CELL]['unfocused'] = \
'endOfTableIndicator + pause + tableCellRow + pause'
formatting['speech'][pyatspi.ROLE_TABLE_CELL]['basicWhereAmI'] = \
'parentRoleName + pause + columnHeader + pause + rowHeader + pause + roleName + pause + cellCheckedState + pause + (realActiveDescendantDisplayedText or imageDescription + image) + pause + columnAndRow + pause + expandableState + pause + nodeLevel + pause'
formatting['speech'][pyatspi.ROLE_TABLE_CELL]['detailedWhereAmI'] = \
'parentRoleName + pause + columnHeader + pause + rowHeader + pause + roleName + pause + cellCheckedState + pause + (realActiveDescendantDisplayedText or imageDescription + image) + pause + columnAndRow + pause + tableCellRow + pause + expandableState + pause + nodeLevel + pause'
class Formatting(orca.formatting.Formatting):
def __init__(self, script):
orca.formatting.Formatting.__init__(self, script)
self.update(copy.deepcopy(formatting))
self._defaultFormatting = orca.formatting.Formatting(script)
def getFormat(self, **args):
if args.get('useDefaultFormatting', False):
return self._defaultFormatting.getFormat(**args)
else:
return orca.formatting.Formatting.getFormat(self, **args)
|
h4ck3rm1k3/orca-sonar
|
src/orca/scripts/apps/soffice/formatting.py
|
Python
|
lgpl-2.1
| 5,309
|
[
"ORCA"
] |
1f1ee9e4d8b19a71f25360062c5ff8216eddc33ccc7cf6427a757d1531c01de7
|
# Eigenvalues of Orientation Tensor by Local Polynomial Approximation using Numba JIT Acceleration
#
# Calculates the eigenvalues of an orientation tensor derived from the coefficients of a local polynomial
# approximation (3D 2nd order polynomial using gaussian weighted least squares) as proposed by Farneback.
#
# The eigenvalues are numbered in decreasing order of their magnitude.
#
import sys,os
import numpy as np
#
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..', '..'))
import extattrib as xa
import extlib as xl
#
# These are the attribute parameters
#
xa.params = {
'Input': 'Input',
'Output': ['e1', 'e2', 'e3'],
'ZSampMargin' : {'Value':[-1,1], 'Symmetric': True},
'StepOut' : {'Value': [1,1]},
'Par_0': {'Name': 'Weight Factor', 'Value': 0.2},
'Help': 'http://waynegm.github.io/OpendTect-Plugin-Docs/External_Attributes/LPA_Attributes/'
}
#
# Define the compute function
#
def doCompute():
xs = xa.SI['nrinl']
ys = xa.SI['nrcrl']
zs = xa.params['ZSampMargin']['Value'][1] - xa.params['ZSampMargin']['Value'][0] + 1
wf = xa.params['Par_0']['Value']
kernel = lpa3D_init(xs, ys, zs, wf)
gam = 1/(8*((min(xs,ys,zs)-1)*wf)**2)
while True:
xa.doInput()
r = np.zeros((10,xa.TI['nrsamp']))
for i in range(0,10):
r[i,:] = xl.sconvolve(xa.Input,kernel[i])
A = np.rollaxis(np.array([[r[4],r[7]/2, r[8]/2], [r[7]/2, r[5], r[9]/2], [r[8]/2, r[9]/2, r[6]]]),2)
AAT = np.einsum('...ij,...jk->...ik', A, np.swapaxes(A,1,2))
B = np.rollaxis(np.array([[r[1]],[r[2]],[r[3]]]),2)
BBT = np.einsum('...ij,...jk->...ik', B, np.swapaxes(B,1,2))
T = AAT+gam*BBT
w = np.linalg.eigvalsh(T)
v=np.rollaxis(np.sort(w),1)
xa.Output['e1'] = v[2,:]
xa.Output['e2'] = v[1,:]
xa.Output['e3'] = v[0,:]
xa.doOutput()
#
# Find the LPA solution for a 2nd order polynomial in 3D
#
def lpa3D_init( xs, ys, zs, sigma=0.2 ):
sx = sigma * (xs-1)
sy = sigma * (ys-1)
sz = sigma * (zs-1)
hxs = (xs-1)/2
hys = (ys-1)/2
hzs = (zs-1)/2
xtmp = np.linspace(-hxs,hxs,xs)
ytmp = np.linspace(-hys,hys,ys)
ztmp = np.linspace(-hzs,hzs,zs)
xyz = np.meshgrid(xtmp,ytmp,ztmp, indexing='ij')
x = xyz[0].flatten()
y = xyz[1].flatten()
z = xyz[2].flatten()
w = np.exp(-(x**2/(2*sx**2) + y**2/(2*sy**2) + z**2/(2*sz**2)))
W = np.diagflat(w)
A = np.dstack((np.ones(x.size), x, y, z, x*x, y*y, z*z, x*y, x*z, y*z)).reshape((x.size,10))
DB = np.linalg.inv(A.T.dot(W).dot(A)).dot(A.T).dot(W)
return DB.reshape((10,xs,ys,zs))
#
# Assign the compute function to the attribute
#
xa.doCompute = doCompute
#
# Do it
#
xa.run(sys.argv[1:])
|
waynegm/OpendTect-Plugins
|
bin/python/wmpy/Experimental/LocalPolynomialApproximation/ex_lpa_eigenvals.py
|
Python
|
gpl-3.0
| 2,624
|
[
"Gaussian"
] |
08e4547f163acb4366d1b13d0dd103955c4f65b80e22135159693cea62998d38
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(1, os.path.abspath('../../siman'))
# print(sys.path)
# -- Project information -----------------------------------------------------
project = 'Siman'
copyright = '2018, Dmitry Aksenov'
author = 'Dmitry Aksenov'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.9.5'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
autodoc_mock_imports = ['numpy', 'tabulate', 'pymatgen', 'pandas', 'scipy', 'six', 'matplotlib', 'ase', 'paramiko']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'], }
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Simandoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Siman.tex', 'Siman Documentation',
'Dmitry Aksenov', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'siman', 'Siman Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Siman', 'Siman Documentation',
author, 'Siman', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
dimonaks/siman
|
docs/source/conf.py
|
Python
|
gpl-2.0
| 5,558
|
[
"ASE",
"pymatgen"
] |
e54e0985c3860d5bb3bb3345aa5bd6461ee99dd985c49776591866f01c197590
|
#!/usr/bin/env python
''' script for filtering insertions vs. the human reference GRCh37/hg19 '''
''' may be useful as a template for extension to other species '''
import pysam
import sys
import os
def usage():
return 'usage: %s </path/to/TEBreak directory> <tabular output from resolve.py>' % sys.argv[0]
def ref_filter(chrom, start, end, superfams):
for sf in superfams.split(','):
if sf == 'L1':
for ins in l1_tbx.fetch(chrom, int(start), int(end)): return True
if sf in ('ALU', 'SVA'):
for ins in alu_tbx.fetch(chrom, int(start), int(end)): return True
if sf == 'SVA':
for ins in sva_tbx.fetch(chrom, int(start), int(end)): return True
return False
def len_filter(rec):
telen = int(rec['TE_Align_End']) - int(rec['TE_Align_Start'])
if 'ALU' in rec['Superfamily'] and telen < 250: return True
if 'SVA' in rec['Superfamily'] and telen < 1000: return True
if 'L1' in rec['Superfamily'] and int(rec['TE_Align_End']) < 5950: return True
return False
def avgmap(maptabix, chrom, start, end):
''' return average mappability across chrom:start-end region; maptabix = pysam.Tabixfile'''
scores = []
if None in (start, end): return None
if chrom in maptabix.contigs:
for rec in maptabix.fetch(chrom, int(start), int(end)):
mchrom, mstart, mend, mscore = rec.strip().split()
mstart, mend = int(mstart), int(mend)
mscore = float(mscore)
while mstart < mend and mstart:
mstart += 1
if mstart >= int(start) and mstart <= int(end):
scores.append(mscore)
if len(scores) > 0:
return sum(scores) / float(len(scores))
else:
return 0.0
else:
return 0.0
if len(sys.argv) == 3:
tebreak_dir = sys.argv[1]
if not os.path.exists(tebreak_dir):
sys.exit(usage())
l1_ref = tebreak_dir + '/lib/mask.L1.hg19.bed.gz'
alu_ref = tebreak_dir + '/lib/mask.Alu.hg19.bed.gz'
sva_ref = tebreak_dir + '/lib/mask.SVA.hg19.bed.gz'
map_ref = tebreak_dir + '/lib/wgEncodeCrgMapabilityAlign50mer.bed.gz'
for fn in (l1_ref, alu_ref, sva_ref):
if not os.path.exists(fn): sys.exit('reference %s not found' % fn)
if not os.path.exists(fn + '.tbi'): sys.exit('index for reference %s not found' %fn)
l1_tbx = pysam.Tabixfile(l1_ref)
alu_tbx = pysam.Tabixfile(alu_ref)
sva_tbx = pysam.Tabixfile(sva_ref)
map_tbx = pysam.Tabixfile(map_ref)
header = []
with open(sys.argv[2], 'r') as tab:
for i, line in enumerate(tab):
if i == 0: # header
header = line.strip().split('\t')
print line.strip()
else:
rec = {}
out = True
for n, field in enumerate(line.strip().split('\t')):
rec[header[n]] = field
if int(rec['3p_Cons_Len']) < 120 and int(rec['5p_Cons_Len']) < 120: out = False
if 'NA' in (rec['TE_Align_Start'], rec['TE_Align_End']): out = False
if ref_filter(rec['Chromosome'], rec['Left_Extreme'], rec['Right_Extreme'], rec['Superfamily']): out = False
if max(float(rec['5p_Elt_Match']), float(rec['3p_Elt_Match'])) < 0.95: out = False
if max(float(rec['5p_Genome_Match']), float(rec['3p_Genome_Match'])) < 0.98: out = False
if avgmap(map_tbx, rec['Chromosome'], rec['Left_Extreme'], rec['Right_Extreme']) < 0.5: out = False
if out and len_filter(rec): out = False
if out: print line.strip()
else:
sys.exit(usage())
|
ValentinaPeona/tebreak
|
scripts/filter_hg19.py
|
Python
|
mit
| 3,719
|
[
"pysam"
] |
c046554ecb2272b76f31e7b30f5be12115821b26e912e4ecc0d3aad67ec4012f
|
filedict = 'c10.dic'
fileweights = 'fig7.weight.dat'
try:
import custom_params
custom_params.filename='fig7'
except:
print 'You can\'t import the params file'
from colors import palette
from misc import Spherical
from math import sqrt, sin, cos, pi
def ConvertDirection(phi, theta, phir, thetar, transf):
def mult(m, v):
u = [ 0 ] * len(m)
for i in range(len(m)):
for j in range(len(m[i])):
u[i] += v[j] * m[i][j]
return u
v = Spherical.xyz(1., phi, theta, [ 0 ] * 3)
if transf:
Rz = [ [ cos(phir), -sin(phir), 0. ],
[ sin(phir), cos(phir), 0. ],
[ 0., 0., 1. ] ]
Ry = [ [ cos(thetar), 0., sin(thetar) ],
[ 0., 1., 0. ],
[ -sin(thetar), 0., cos(thetar) ] ]
u = mult(Ry, v)
u = mult(Rz, u)
else:
Rz = [ [ cos(-phir), -sin(-phir), 0. ],
[ sin(-phir), cos(-phir), 0. ],
[ 0., 0., 1. ] ]
Ry = [ [ cos(-thetar), 0., sin(-thetar) ],
[ 0., 1., 0. ],
[ -sin(-thetar), 0., cos(-thetar) ] ]
u = mult(Rz, v)
u = mult(Ry, u)
_rho, _phi, _theta = Spherical.to(u, [ 0 ] * 3)
return _phi, _theta
weights = {}
def loadweights(fweight):
global weights
weights = {}
with open(fweight) as f:
line = f.readline()
while line:
tokens = line.split()
gid = int(tokens[0]); w = float(tokens[1])
weights.update({ gid:w })
line = f.readline()
def gcolors():
gcol = {}
for gid in weights.keys():
if gid % 2 == 1 and gid >= params.gid_granule_begin + granules.ngranule:
ggid = gd.gid_dict[gid + 1][3]
w_new = weights[gid]
if gcol.has_key(ggid):
w_old = gcol[ggid]
if w_old < w_new:
gcol[ggid] = w_new
else:
gcol.update({ ggid:w_new })
return gcol
import bindict as gd
gd.load(filedict)
from mayavi.mlab import figure, text
fig = figure(bgcolor=(0, 0, 0))
from mayavi import mlab
points3d=mlab.points3d
import params
import granules
class GranulesManager:
def __init__(self):
self.gran = set()
self.actor1 = None
self.gran_color = (0., 157 / 255., 157 / 255.)
self.mitrals = None
self.colors = None
self.projected = False
self._gran = None
def setup_colors(self, colors):
self.colors = colors.copy()
def show(self):
if self.actor1:
self.actor1.remove()
from granules import granule_position_orientation as gpo
from params import granule_diam as diam
x = []
y = []
z = []
s = []
for gid in self.gran:
p = gpo(gid)[0]
s.append(self.colors[gid])
r = params.ranstream(gid, 0)
r.uniform(-params.grid_dim * .5, params.grid_dim * .5)
x.append(p[0] + r.repick())
y.append(p[1] + r.repick())
z.append(p[2] + r.repick())
self.actor1 = points3d(x, y, z, s, scale_factor=diam, scale_mode='none', vmin=0, vmax=100)
def add(self, ci):
new = set()
for _ci in ci:
new.add(_ci[3])
self.gran.symmetric_difference_update(new)
self.gran = set(sorted(self.gran))
import misc
from tvtk.api import tvtk
cone_factor = 2.
def vtkCone(p, q):
from math import pi
phi_base, theta_base = misc.Spherical.to(q, p)[1:]
quads = tvtk.CellArray() #vtk.vtkCellArray()
points = tvtk.Points() #vtk.vtkPoints()
for i in range(11):
# rotate
phi, theta = ConvertDirection((i % 10) * 2 * pi / 10, pi * .5, phi_base, theta_base, True)
# generate new points
_p = tuple(misc.Spherical.xyz(p[3] * .5 * cone_factor, phi, theta, p[0:3]))
_q = tuple(misc.Spherical.xyz(q[3] * .5 * cone_factor, phi, theta, q[0:3]))
# insert points
points.append(_p)
points.append(_q)
if i >= 1:
# create a face
quad = tvtk.Quad()
n = points.number_of_points - 1
quad.point_ids.set_id(0, n - 3) # p
quad.point_ids.set_id(1, n - 2) # q
quad.point_ids.set_id(2, n) # q
quad.point_ids.set_id(3, n - 1) # p
# insert the new face
quads.insert_next_cell(quad)
# create the actor
polydata = tvtk.PolyData(points=points, polys=quads)
mapper = tvtk.PolyDataMapper(input=polydata)
actor = tvtk.Actor(mapper=mapper)
return actor
class vtkMitral:
def __vtkconvert(self):
def drawsoma():
pts = self.mitral.soma.points
center = misc.centroid(pts)
# calc. soma radius
radius = 0.
for p in pts:
radius += misc.distance(p, center)
radius /= len(pts)
radius *= cone_factor
# versor
u = tuple(misc.versor(self.mitral.apic.points[0], self.mitral.apic.points[1]))
src = tvtk.ConeSource(center=tuple(center[0:3]), radius=radius, height=radius, direction=u, resolution=20)
mapper = tvtk.PolyDataMapper(input=src.output)
actor = tvtk.Actor(mapper=mapper)
fig.scene.add_actor(actor)
actor.property.color = self.soma_color
return actor
# create a colored segment on the scene
def mksegment(p1, p2, color):
actor = vtkCone(p1, p2)
actor.property.color = color
fig.scene.add_actor(actor)
return actor
def drawsection(pts):
section = []
for i in range(1, len(pts)):
section.append(mksegment(pts[i - 1], pts[i], self.section_color))
return section
#fig.scene.disable_render = True
self.soma = drawsoma()
self.apic = drawsection(self.mitral.apic.points)
for i in range(len(self.mitral.tuft)):
self.tuft.append(drawsection(self.mitral.tuft[i].points))
# gid associated to a segment
self.dend_gids = []
for i in range(len(self.mitral.dend)):
self.dend.append(drawsection(self.mitral.dend[i].points))
aux = []
for i in range(len(self.dend[-1])):
aux.append(set())
self.dend_gids.append(aux)
def __interp_rules(self, tointerp):
def move(key, forward):
newkey = set()
for _key in key:
sectype, isec, iseg = _key
if forward:
iseg += 1
if iseg < len(self.dend[isec]):
newkey.add((sectype, isec, iseg))
elif len(self.mitral.dend[isec].sons) > 0:
for _son in self.mitral.dend[isec].sons:
newkey.add((sectype, self.mitral.dend.index(_son), 0))
else:
iseg -= 1
if iseg >= 0:
newkey.add((sectype, isec, iseg))
elif self.mitral.dend[isec].parent != self.mitral.soma:
isec = self.mitral.dend.index(self.mitral.dend[isec].parent)
iseg = len(self.dend[isec])-1
newkey.add((sectype, isec, iseg))
return newkey
def look(keys, forward=True):
retkeys = set()
while len(keys) > 0:
_keys = set()
for k in move(keys, forward):
if k in tointerp:
_keys.add(k)
else:
retkeys.add(k)
keys = _keys
return retkeys
def look_previous(key): return look(key, False)
def look_forward(key): return look(key)
interprules = {}
for key in tointerp:
interprules.update({key:look_forward(set([key])).union(look_previous(set([key])))})
return interprules
def __init__(self, mgid):
self.mgid = mgid
from getmitral import getmitral
self.mitral = getmitral(mgid)
self.soma = None
self.apic = None
self.dend = []
self.tuft = []
self.soma_color = (250. / 255, 210. / 255, 51. / 255)
self.section_color = (1., 1., 1.)
self.__vtkconvert()
self.conn_info = []
tointerp = set()
for isec in range(len(self.dend)):
for iseg in range(len(self.dend[isec])):
tointerp.add((2, isec, iseg))
for gid in gd.mgid_dict[mgid]:
if gid >= params.gid_granule_begin + granules.ngranule:
if gid % 2 == 0:
self.conn_info.append(gd.gid_dict[gid])
isec, x = gd.gid_dict[gid][1:3]
iseg = int(x*len(self.dend[isec]))
if iseg >= len(self.dend[isec]):
iseg = len(self.dend[isec])-1
tointerp.discard((2, isec, iseg))
self.__tointerp = self.__interp_rules(tointerp)
self.__show_weights = False
def __set_segment(self, info):
for secinfo, ind_color in info.items():
# interpolate
sec_type, isec, iseg = secinfo
if sec_type == -1: # soma
o = self.soma
elif sec_type == 0: # tuft
o = self.tuft[isec][iseg]
elif sec_type == 1: # apical
o = self.apic[iseg]
elif sec_type == 2: # dendrites
o = self.dend[isec][iseg]
# set
o.property.color = palette[ind_color]
# interpolate
for key1, linked in self.__tointerp.items():
ind_color = 0
for key2 in linked:
ind_color += info[key2]
ind_color /= len(linked)
self.dend[key1[1]][key1[2]].property.color = palette[ind_color]
def show_weights(self, excit):
if len(weights) == 0:
return
self.__show_weights = True
self.__excit = excit
w = {}
for gid in gd.mgid_dict[self.mgid]:
if gid >= params.gid_granule_begin + granules.ngranule:
if gid % 2:
continue
isec, x = gd.gid_dict[gid][1:3]
if x >= 1:
iseg = len(self.dend[isec]) - 1
else:
iseg = int(x * len(self.dend[isec]))
if not excit:
gid -= 1
try:
wsym = weights[gid]
try:
w[(2, isec, iseg)].append(wsym)
except KeyError:
w.update({ (2, isec, iseg):[ wsym ] })
except KeyError:
print 'gid %d not found' % gid
max_steps = 100.
for k, x in w.items():
w[k] = int(max(x) / max_steps * (len(palette) - 1))
self.__set_segment(w)
def clean_weight(self):
self.__show_weights = False
# color all black
for dnd in self.dend:
for seg in dnd:
seg.property.opacity = 1.
self.clean()
def __color_section(self, sec, color):
for s in sec:
s.property.color = color
def clean(self):
if self.__show_weights:
self.show_weights(self.__excit)
else:
for sec in self.dend:
self.__color_section(sec, self.section_color)
for sec in self.tuft:
self.__color_section(sec, self.section_color)
self.__color_section(self.apic, self.section_color)
self.soma.property.color = self.soma_color
try:
from enthought.traits.api import HasTraits, Range, String, Button, Int, Bool, Str, Float
from enthought.traits.ui.api import View, Item
except:
from traits.api import HasTraits, Range, String, Button, Int, Bool, Str, Float
from traitsui.api import View, Item
class BulbGUI(HasTraits):
w_excit = Button('Weights Excit.')
w_inhib = Button('Weights Inhib.')
w_clean = Button('Weights Clean')
t_stop = Float
t_win = Float
max_freqs = Float
view = View(Item(name='w_excit'), Item(name='w_inhib'), Item(name='w_clean'))
def __init__(self, mbp):
self.edit_traits()
self.mbp = mbp
def _w_excit_fired(self):
self.mbp.show_weights(True)
fig.scene.render()
def _w_inhib_fired(self):
self.mbp.show_weights(False)
fig.scene.render()
def _w_clean_fired(self):
self.mbp.clean_weights()
fig.scene.render()
class mayaBulbPlot:
def __init__(self):
self.sel_descriptor = None
self.mitrals = {}
self.granules = GranulesManager()
def draw_mitral(self, mgid):
if self.mitrals.has_key(mgid):
return
m = vtkMitral(mgid)
m.sel_color = (0,1,0)
m.granules = self.granules
self.mitrals.update({ mgid:m })
self.granules.add(m.conn_info) # draw granules
self.granules.show()
def show_weights(self, excit):
for m in self.mitrals.values():
m.show_weights(excit)
def clean_weights(self):
for m in self.mitrals.values():
m.clean_weight()
mbp = mayaBulbPlot()
mbp.granules.mitrals = mbp.mitrals
loadweights(fileweights)
mbp.granules.setup_colors(gcolors())
gui = BulbGUI(mbp)
fig.scene.disable_render=True
mbp.draw_mitral(185)
mbp.draw_mitral(202)
mbp.draw_mitral(137)
fig.scene.disable_render=False
import BulbSurf
import odordisp as od
l,val=od.OdorsInput('input-odors.txt')
ii=l.index('Mint')
b=BulbSurf.Bulb3d(fig)
for i in [187/5,137/5,202/5]:
b.real_h[i].property.opacity=0.5
b.real_h[i].property.color = palette[val[ii][i]]
fig.scene.camera.view_up=[ 0.01551967, 0.99458581, 0.10275311]
fig.scene.camera.position=[ 3521.84237552, 621.77602255, 4977.47240287]
|
JustasB/MitralSuite
|
Models/Migliore2014/mayasyn.py
|
Python
|
mit
| 12,745
|
[
"Mayavi",
"VTK"
] |
6514647a3f0ae50666e26da9a755da71cc8ef7b3e9b996bdd960f2b290a1a993
|
# pylint: disable=missing-docstring
from lettuce import world, step
from selenium.webdriver.common.keys import Keys
from xmodule.modulestore.django import modulestore
VIDEO_BUTTONS = {
'CC': '.hide-subtitles',
'volume': '.volume',
'play': '.video_control.play',
'pause': '.video_control.pause',
'handout': '.video-handout.video-download-button a',
}
SELECTORS = {
'spinner': '.video-wrapper .spinner',
'controls': 'section.video-controls',
}
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
@step('youtube stub server (.*) YouTube API')
def configure_youtube_api(_step, action):
action = action.strip()
if action == 'proxies':
world.youtube.config['youtube_api_blocked'] = False
elif action == 'blocks':
world.youtube.config['youtube_api_blocked'] = True
else:
raise ValueError('Parameter `action` should be one of "proxies" or "blocks".')
@step('I have created a Video component$')
def i_created_a_video_component(step):
step.given('I am in Studio editing a new unit')
world.create_component_instance(
step=step,
category='video',
)
world.wait_for_xmodule()
world.disable_jquery_animations()
world.wait_for_present('.is-initialized')
world.wait(DELAY)
world.wait_for_invisible(SELECTORS['spinner'])
if not world.youtube.config.get('youtube_api_blocked'):
world.wait_for_visible(SELECTORS['controls'])
@step('I have created a Video component with subtitles$')
def i_created_a_video_with_subs(_step):
_step.given('I have created a Video component with subtitles "OEoXaMPEzfM"')
@step('I have created a Video component with subtitles "([^"]*)"$')
def i_created_a_video_with_subs_with_name(_step, sub_id):
_step.given('I have created a Video component')
# Store the current URL so we can return here
video_url = world.browser.url
# Upload subtitles for the video using the upload interface
_step.given('I have uploaded subtitles "{}"'.format(sub_id))
# Return to the video
world.visit(video_url)
world.wait_for_xmodule()
# update .sub filed with proper subs name (which mimics real Studio/XML behavior)
# this is needed only for that videos which are created in acceptance tests.
_step.given('I edit the component')
world.wait_for_ajax_complete()
_step.given('I save changes')
world.disable_jquery_animations()
world.wait_for_present('.is-initialized')
world.wait_for_invisible(SELECTORS['spinner'])
@step('I have uploaded subtitles "([^"]*)"$')
def i_have_uploaded_subtitles(_step, sub_id):
_step.given('I go to the files and uploads page')
_step.given('I upload the test file "subs_{}.srt.sjson"'.format(sub_id.strip()))
@step('when I view the (.*) it does not have autoplay enabled$')
def does_not_autoplay(_step, video_type):
world.wait(DELAY)
world.wait_for_ajax_complete()
actual = world.css_find('.%s' % video_type)[0]['data-autoplay']
expected = [u'False', u'false', False]
assert actual in expected
assert world.css_has_class('.video_control', 'play')
@step('creating a video takes a single click$')
def video_takes_a_single_click(_step):
component_css = '.xmodule_VideoModule'
assert world.is_css_not_present(component_css)
world.css_click("a[data-category='video']")
assert world.is_css_present(component_css)
@step('I edit the component$')
def i_edit_the_component(_step):
world.edit_component()
@step('I have (hidden|toggled) captions$')
def hide_or_show_captions(step, shown):
button_css = 'a.hide-subtitles'
if shown == 'hidden':
world.css_click(button_css)
if shown == 'toggled':
world.css_click(button_css)
# When we click the first time, a tooltip shows up. We want to
# click the button rather than the tooltip, so move the mouse
# away to make it disappear.
button = world.css_find(button_css)
# mouse_out is not implemented on firefox with selenium
if not world.is_firefox:
button.mouse_out()
world.css_click(button_css)
@step('I have created a video with only XML data$')
def xml_only_video(step):
# Create a new video *without* metadata. This requires a certain
# amount of rummaging to make sure all the correct data is present
step.given('I have clicked the new unit button')
# Wait for the new unit to be created and to load the page
world.wait(1)
course = world.scenario_dict['COURSE']
store = modulestore()
parent_location = store.get_items(course.id, qualifiers={'category': 'vertical'})[0].location
youtube_id = 'ABCDEFG'
world.scenario_dict['YOUTUBE_ID'] = youtube_id
# Create a new Video component, but ensure that it doesn't have
# metadata. This allows us to test that we are correctly parsing
# out XML
world.ItemFactory.create(
parent_location=parent_location,
category='video',
data='<video youtube="1.00:%s"></video>' % youtube_id,
modulestore=store,
user_id=world.scenario_dict["USER"].id
)
@step('The correct Youtube video is shown$')
def the_youtube_video_is_shown(_step):
ele = world.css_find('.video').first
assert ele['data-streams'].split(':')[1] == world.scenario_dict['YOUTUBE_ID']
@step('Make sure captions are (.+)$')
def set_captions_visibility_state(_step, captions_state):
SELECTOR = '.closed .subtitles'
world.wait_for_visible('.hide-subtitles')
if captions_state == 'closed':
if world.is_css_not_present(SELECTOR):
world.css_find('.hide-subtitles').click()
else:
if world.is_css_present(SELECTOR):
world.css_find('.hide-subtitles').click()
@step('I hover over button "([^"]*)"$')
def hover_over_button(_step, button):
world.css_find(VIDEO_BUTTONS[button.strip()]).mouse_over()
@step('Captions (?:are|become) "([^"]*)"$')
def check_captions_visibility_state(_step, visibility_state):
if visibility_state == 'visible':
assert world.css_visible('.subtitles')
else:
assert not world.css_visible('.subtitles')
def find_caption_line_by_data_index(index):
SELECTOR = ".subtitles > li[data-index='{index}']".format(index=index)
return world.css_find(SELECTOR).first
@step('I focus on caption line with data-index "([^"]*)"$')
def focus_on_caption_line(_step, index):
world.wait_for_present('.video.is-captions-rendered')
world.wait_for(lambda _: world.css_text('.subtitles'), timeout=30)
find_caption_line_by_data_index(int(index.strip()))._element.send_keys(Keys.TAB)
@step('I press "enter" button on caption line with data-index "([^"]*)"$')
def click_on_the_caption(_step, index):
world.wait_for_present('.video.is-captions-rendered')
world.wait_for(lambda _: world.css_text('.subtitles'), timeout=30)
find_caption_line_by_data_index(int(index.strip()))._element.send_keys(Keys.ENTER)
@step('I see caption line with data-index "([^"]*)" has class "([^"]*)"$')
def caption_line_has_class(_step, index, className):
SELECTOR = ".subtitles > li[data-index='{index}']".format(index=int(index.strip()))
assert world.css_has_class(SELECTOR, className.strip())
@step('I see a range on slider$')
def see_a_range_slider_with_proper_range(_step):
world.wait_for_visible(VIDEO_BUTTONS['pause'])
assert world.css_visible(".slider-range")
@step('I (.*) see video button "([^"]*)"$')
def do_not_see_or_not_button_video(_step, action, button_type):
world.wait(DELAY)
world.wait_for_ajax_complete()
action = action.strip()
button = button_type.strip()
if action == 'do not':
assert not world.is_css_present(VIDEO_BUTTONS[button])
elif action == 'can':
assert world.css_visible(VIDEO_BUTTONS[button])
else:
raise ValueError('Parameter `action` should be one of "do not" or "can".')
@step('I click video button "([^"]*)"$')
def click_button_video(_step, button_type):
world.wait(DELAY)
world.wait_for_ajax_complete()
button = button_type.strip()
world.css_click(VIDEO_BUTTONS[button])
@step('I seek video to "([^"]*)" seconds$')
def seek_video_to_n_seconds(_step, seconds):
time = float(seconds.strip())
jsCode = "$('.video').data('video-player-state').videoPlayer.onSlideSeek({{time: {0:f}}})".format(time)
world.browser.execute_script(jsCode)
@step('I see video starts playing from "([^"]*)" position$')
def start_playing_video_from_n_seconds(_step, position):
world.wait_for(
func=lambda _: world.css_html('.vidtime')[:4] == position.strip(),
timeout=5
)
|
olexiim/edx-platform
|
cms/djangoapps/contentstore/features/video.py
|
Python
|
agpl-3.0
| 8,674
|
[
"VisIt"
] |
af4ddda2106e7a5dea7642fa67512ed2c9e535610b1d652260aa4303b58de0c7
|
## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2014 Max-Planck-Society
##
## Author: Maksim Greiner, Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
#from nifty import *
import numpy as np
from nifty import pi, \
about, \
field, \
sqrt,exp,log
def power_backward_conversion_lm(k_space,p,mean=None):
"""
This function is designed to convert a theoretical/statistical power
spectrum of a log-normal field to the theoretical power spectrum of
the underlying Gaussian field.
The function only works for power spectra defined for lm_spaces
Parameters
----------
k_space : nifty.rg_space,
a regular grid space with the attribute `Fourier = True`
p : np.array,
the power spectrum of the log-normal field.
Needs to have the same number of entries as
`k_space.get_power_indices()[0]`
mean : float, *optional*
specifies the mean of the log-normal field. If `mean` is not
specified the function will use the monopole of the power spectrum.
If it is specified the function will NOT use the monopole of the
spectrum. (default: None)
WARNING: a mean that is too low can violate positive definiteness
of the log-normal field. In this case the function produces an
error.
Returns
-------
mean : float,
the recovered mean of the underlying Gaussian distribution.
p1 : np.array,
the power spectrum of the underlying Gaussian field, where the
monopole has been set to zero. Eventual monopole power has been
shifted to the mean.
References
----------
.. [#] M. Greiner and T.A. Ensslin, "Log-transforming the matter power spectrum";
`arXiv:1312.1354 <http://arxiv.org/abs/1312.1354>`_
"""
p = np.copy(p)
if(mean is not None):
p[0] = 4*pi*mean**2
klen = k_space.get_power_indices()[0]
C_0_Omega = field(k_space,val=0)
C_0_Omega.val[:len(klen)] = p*sqrt(2*klen+1)/sqrt(4*pi)
C_0_Omega = C_0_Omega.transform()
if(np.any(C_0_Omega.val<0.)):
raise ValueError(about._errors.cstring("ERROR: spectrum or mean incompatible with positive definiteness.\n Try increasing the mean."))
return None
lC = log(C_0_Omega)
Z = lC.transform()
spec = Z.val[:len(klen)]
mean = (spec[0]-0.5*sqrt(4*pi)*log((p*(2*klen+1)/(4*pi)).sum()))/sqrt(4*pi)
spec[0] = 0.
spec = spec*sqrt(4*pi)/sqrt(2*klen+1)
spec = np.real(spec)
if(np.any(spec<0.)):
spec = spec*(spec>0.)
about.warnings.cprint("WARNING: negative modes set to zero.")
return mean.real,spec
def power_forward_conversion_lm(k_space,p,mean=0):
"""
This function is designed to convert a theoretical/statistical power
spectrum of a Gaussian field to the theoretical power spectrum of
the exponentiated field.
The function only works for power spectra defined for lm_spaces
Parameters
----------
k_space : nifty.rg_space,
a regular grid space with the attribute `Fourier = True`
p : np.array,
the power spectrum of the Gaussian field.
Needs to have the same number of entries as
`k_space.get_power_indices()[0]`
m : float, *optional*
specifies the mean of the Gaussian field (default: 0).
Returns
-------
p1 : np.array,
the power spectrum of the exponentiated Gaussian field.
References
----------
.. [#] M. Greiner and T.A. Ensslin, "Log-transforming the matter power spectrum";
`arXiv:1312.1354 <http://arxiv.org/abs/1312.1354>`_
"""
m = mean
klen = k_space.get_power_indices()[0]
C_0_Omega = field(k_space,val=0)
C_0_Omega.val[:len(klen)] = p*sqrt(2*klen+1)/sqrt(4*pi)
C_0_Omega = C_0_Omega.transform()
C_0_0 = (p*(2*klen+1)/(4*pi)).sum()
exC = exp(C_0_Omega+C_0_0+2*m)
Z = exC.transform()
spec = Z.val[:len(klen)]
spec = spec*sqrt(4*pi)/sqrt(2*klen+1)
spec = np.real(spec)
if(np.any(spec<0.)):
spec = spec*(spec>0.)
about.warnings.cprint("WARNING: negative modes set to zero.")
return spec
|
ultimanet/nifty
|
lm/nifty_power_conversion_lm.py
|
Python
|
gpl-3.0
| 5,320
|
[
"Gaussian"
] |
745bcf6caf828e8f9af242e5bd48da62014d7dcf955f95cd48d77c6f6b3ee05e
|
from __future__ import print_function
import logging
import numpy
import pywt
import SimpleITK as sitk
import six
from six.moves import range
logger = logging.getLogger(__name__)
def getMask(mask, **kwargs):
"""
Function to get the correct mask. Includes enforcing a correct pixel data type (UInt32).
Also supports extracting the mask for a segmentation (stored as SimpleITK Vector image) if necessary.
In this case, the mask at index ``label_channel`` is extracted. The resulting 3D volume is then treated as it were a
scalar input volume (i.e. with the region of interest defined by voxels with value matching ``label``).
Finally, checks if the mask volume contains an ROI identified by ``label``. Raises a value error if the label is not
present (including a list of valid labels found).
:param mask: SimpleITK Image object representing the mask. Can be a vector image to allow for overlapping masks.
:param kwargs: keyword arguments. If argument ``label_channel`` is present, this is used to select the channel.
Otherwise label_channel ``0`` is assumed.
:return: SimpleITK.Image with pixel type UInt32 representing the mask volume
"""
global logger
label = kwargs.get('label', 1)
label_channel = kwargs.get('label_channel', 0)
if 'vector' in mask.GetPixelIDTypeAsString().lower():
logger.debug('Mask appears to be a segmentation object (=stored as vector image).')
n_components = mask.GetNumberOfComponentsPerPixel()
assert label_channel < n_components, \
"Mask %i requested, but segmentation object only contains %i objects" % (label_channel, n_components)
logger.info('Extracting mask at index %i', label_channel)
selector = sitk.VectorIndexSelectionCastImageFilter()
selector.SetIndex(label_channel)
mask = selector.Execute(mask)
logger.debug('Force casting mask to UInt32 to ensure correct datatype.')
mask = sitk.Cast(mask, sitk.sitkUInt32)
labels = numpy.unique(sitk.GetArrayFromImage(mask))
if len(labels) == 1 and labels[0] == 0:
raise ValueError('No labels found in this mask (i.e. nothing is segmented)!')
if label not in labels:
raise ValueError('Label (%g) not present in mask. Choose from %s' % (label, labels[labels != 0]))
return mask
def getBinEdges(parameterValues, **kwargs):
r"""
Calculate and return the histogram using parameterValues (1D array of all segmented voxels in the image).
**Fixed bin width:**
Returns the bin edges, a list of the edges of the calculated bins, length is N(bins) + 1. Bins are defined such, that
the bin edges are equally spaced from zero, and that the leftmost edge :math:`\leq \min(X_{gl})`. These bin edges
represent the half-open ranges of each bin :math:`[\text{lower_edge}, \text{upper_edge})` and result in gray value
discretization as follows:
.. math::
X_{b, i} = \lfloor \frac{X_{gl, i}}{W} \rfloor - \lfloor \frac {\min(X_{gl})}{W} \rfloor + 1
Here, :math:`X_{gl, i}` and :math:`X_{b, i}` are gray level intensities before and after discretization, respectively.
:math:`{W}` is the bin width value (specfied in ``binWidth`` parameter). The first part of the formula ensures that
the bins are equally spaced from 0, whereas the second part ensures that the minimum gray level intensity inside the
ROI after binning is always 1.
In the case where the maximum gray level intensity is equally dividable by the binWidth, i.e.
:math:`\max(X_{gl}) \mod W = 0`, this will result in that maximum gray level being assigned to bin
:math:`[\max(X_{gl}), \max(X_{gl}) + W)`, which is consistent with numpy.digitize, but different from the behaviour
of numpy.histogram, where the final bin has a closed range, including the maximum gray level, i.e.
:math:`[\max(X_{gl}) - W, \max(X_{gl})]`.
.. note::
This method is slightly different from the fixed bin size discretization method described by IBSI. The two most
notable differences are 1) that PyRadiomics uses a floor division (and adds 1), as opposed to a ceiling division and
2) that in PyRadiomics, bins are always equally spaced from 0, as opposed to equally spaced from the minimum
gray level intensity.
*Example: for a ROI with values ranging from 54 to 166, and a bin width of 25, the bin edges will be [50, 75, 100,
125, 150, 175].*
This value can be directly passed to ``numpy.histogram`` to generate a histogram or ``numpy.digitize`` to discretize
the ROI gray values. See also :py:func:`binImage()`.
**Fixed bin Count:**
.. math::
X_{b, i} = \left\{ {\begin{array}{lcl}
\lfloor N_b\frac{(X_{gl, i} - \min(X_{gl})}{\max(X_{gl}) - \min(X_{gl})} \rfloor + 1 &
\mbox{for} & X_{gl, i} < \max(X_{gl}) \\
N_b & \mbox{for} & X_{gl, i} = \max(X_{gl}) \end{array}} \right.
Here, :math:`N_b` is the number of bins to use, as defined in ``binCount``.
References
- Leijenaar RTH, Nalbantov G, Carvalho S, et al. The effect of SUV discretization in quantitative FDG-PET Radiomics:
the need for standardized methodology in tumor texture analysis. Sci Rep. 2015;5(August):11075.
"""
global logger
binWidth = kwargs.get('binWidth', 25)
binCount = kwargs.get('binCount')
if binCount is not None:
binEdges = numpy.histogram(parameterValues, binCount)[1]
binEdges[-1] += 1 # Ensures that the maximum value is included in the topmost bin when using numpy.digitize
else:
minimum = min(parameterValues)
maximum = max(parameterValues)
# Start binning form the first value lesser than or equal to the minimum value and evenly dividable by binwidth
lowBound = minimum - (minimum % binWidth)
# Add + 2* binwidth to ensure the maximum value is included in the range generated by numpy.arange, and that values
# equal to highbound are binned into a separate bin by numpy.histogram (This ensures ALL bins are half open, as
# numpy.histogram treats the last bin as a closed interval. Moreover, this ensures consistency with numpy.digitize,
# which will assign len(bins) + 1 to values equal to rightmost bin edge, treating all bins as half-open)
highBound = maximum + 2 * binWidth
binEdges = numpy.arange(lowBound, highBound, binWidth)
# if min(parameterValues) % binWidth = 0 and min(parameterValues) = max(parameterValues), binEdges will only contain
# 1 value. If this is the case (flat region) ensure that numpy.histogram creates 1 bin (requires 2 edges). For
# numpy.histogram, a binCount (1) would also suffice, however, this is not accepted by numpy.digitize, which also uses
# binEdges calculated by this function.
if len(binEdges) == 1: # Flat region, ensure that there is 1 bin
binEdges = [binEdges[0] - .5, binEdges[0] + .5] # Simulates binEdges returned by numpy.histogram if bins = 1
logger.debug('Calculated %d bins for bin width %g with edges: %s)', len(binEdges) - 1, binWidth, binEdges)
return binEdges # numpy.histogram(parameterValues, bins=binedges)
def binImage(parameterMatrix, parameterMatrixCoordinates=None, **kwargs):
r"""
Discretizes the parameterMatrix (matrix representation of the gray levels in the ROI) using the binEdges calculated
using :py:func:`getBinEdges`. Only voxels defined by parameterMatrixCoordinates (defining the segmentation) are used
for calculation of histogram and subsequently discretized. Voxels outside segmentation are left unchanged.
"""
global logger
logger.debug('Discretizing gray levels inside ROI')
discretizedParameterMatrix = numpy.zeros(parameterMatrix.shape, dtype='int')
if parameterMatrixCoordinates is None:
binEdges = getBinEdges(parameterMatrix.flatten(), **kwargs)
discretizedParameterMatrix = numpy.digitize(parameterMatrix, binEdges)
else:
binEdges = getBinEdges(parameterMatrix[parameterMatrixCoordinates], **kwargs)
discretizedParameterMatrix[parameterMatrixCoordinates] = numpy.digitize(parameterMatrix[parameterMatrixCoordinates], binEdges)
return discretizedParameterMatrix, binEdges
def checkMask(imageNode, maskNode, **kwargs):
"""
Checks whether the Region of Interest (ROI) defined in the mask size and dimensions match constraints, specified in
settings. The following checks are performed.
1. Check whether the mask corresponds to the image (i.e. has a similar size, spacing, direction and origin). **N.B.
This check is performed by SimpleITK, if it fails, an error is logged, with additional error information from
SimpleITK logged with level DEBUG (i.e. logging-level has to be set to debug to store this information in the log
file).** The tolerance can be increased using the ``geometryTolerance`` parameter. Alternatively, if the
``correctMask`` parameter is ``True``, PyRadiomics will check if the mask contains a valid ROI (inside image
physical area) and if so, resample the mask to image geometry. See :ref:`radiomics-settings-label` for more info.
2. Check if the label is present in the mask
3. Count the number of dimensions in which the size of the ROI > 1 (i.e. does the ROI represent a single voxel (0), a
line (1), a surface (2) or a volume (3)) and compare this to the minimum number of dimension required (specified in
``minimumROIDimensions``).
4. Optional. Check if there are at least N voxels in the ROI. N is defined in ``minimumROISize``, this test is skipped
if ``minimumROISize = None``.
This function returns a tuple of two items. The first item is the bounding box of the mask. The second item is the
mask that has been corrected by resampling to the input image geometry (if that resampling was successful).
If a check fails, a ValueError is raised. No features will be extracted for this mask.
If the mask passes all tests, this function returns the bounding box, which is used in the :py:func:`cropToTumorMask`
function.
The bounding box is calculated during (1.) and used for the subsequent checks. The bounding box is
calculated by SimpleITK.LabelStatisticsImageFilter() and returned as a tuple of indices: (L_x, U_x, L_y, U_y, L_z,
U_z), where 'L' and 'U' are lower and upper bound, respectively, and 'x', 'y' and 'z' the three image dimensions.
By reusing the bounding box calculated here, calls to SimpleITK.LabelStatisticsImageFilter() are reduced, improving
performance.
Uses the following settings:
- minimumROIDimensions [1]: Integer, range 1-3, specifies the minimum dimensions (1D, 2D or 3D, respectively).
Single-voxel segmentations are always excluded.
- minimumROISize [None]: Integer, > 0, specifies the minimum number of voxels required. Test is skipped if
this parameter is set to None.
.. note::
If the first check fails there are generally 2 possible causes:
1. The image and mask are matched, but there is a slight difference in origin, direction or spacing. The exact
cause, difference and used tolerance are stored with level DEBUG in a log (if enabled). For more information on
setting up logging, see ":ref:`setting up logging <radiomics-logging-label>`" and the helloRadiomics examples
(located in the ``pyradiomics/examples`` folder). This problem can be fixed by changing the global tolerance
(``geometryTolerance`` parameter) or enabling mask correction (``correctMask`` parameter).
2. The image and mask do not match, but the ROI contained within the mask does represent a physical volume
contained within the image. If this is the case, resampling is needed to ensure matching geometry between image
and mask before features can be extracted. This can be achieved by enabling mask correction using the
``correctMask`` parameter.
"""
global logger
correctedMask = None
label = kwargs.get('label', 1)
minDims = kwargs.get('minimumROIDimensions', 2)
minSize = kwargs.get('minimumROISize', None)
logger.debug('Checking mask with label %d', label)
logger.debug('Calculating bounding box')
# Determine bounds
lsif = sitk.LabelStatisticsImageFilter()
try:
lsif.Execute(imageNode, maskNode)
# If lsif fails, and mask is corrected, it includes a check whether the label is present. Therefore, perform
# this test here only if lsif does not fail on the first attempt.
if label not in lsif.GetLabels():
raise ValueError('Label (%g) not present in mask' % label)
except RuntimeError as e:
# If correctMask = True, try to resample the mask to the image geometry, otherwise return None ("fail")
if not kwargs.get('correctMask', False):
if "Both images for LabelStatisticsImageFilter don't match type or dimension!" in e.args[0]:
logger.debug('Additional information on error.', exc_info=True)
raise ValueError('Image/Mask datatype or size mismatch. Potential fix: enable correctMask, see '
'Documentation:Usage:Customizing the Extraction:Settings:correctMask for more information')
elif "Inputs do not occupy the same physical space!" in e.args[0]:
logger.debug('Additional information on error.', exc_info=True)
raise ValueError('Image/Mask geometry mismatch. Potential fix: increase tolerance using geometryTolerance, '
'see Documentation:Usage:Customizing the Extraction:Settings:geometryTolerance for more '
'information')
else:
raise e # unhandled error
logger.warning('Image/Mask geometry mismatch, attempting to correct Mask')
correctedMask = _correctMask(imageNode, maskNode, **kwargs) # Raises Value error if ROI outside image physical space
# Resampling successful, try to calculate boundingbox
try:
lsif.Execute(imageNode, correctedMask)
except RuntimeError:
logger.debug('Bounding box calculation with resampled mask failed', exc_info=True)
raise ValueError('Calculation of bounding box failed, for more information run with DEBUG logging and check log')
# LBound and UBound of the bounding box, as (L_X, U_X, L_Y, U_Y, L_Z, U_Z)
boundingBox = numpy.array(lsif.GetBoundingBox(label))
logger.debug('Checking minimum number of dimensions requirements (%d)', minDims)
ndims = numpy.sum((boundingBox[1::2] - boundingBox[0::2] + 1) > 1) # UBound - LBound + 1 = Size
if ndims == 0:
raise ValueError('mask only contains 1 segmented voxel! Cannot extract features for a single voxel.')
elif ndims < minDims:
raise ValueError('mask has too few dimensions (number of dimensions %d, minimum required %d)' % (ndims, minDims))
if minSize is not None:
logger.debug('Checking minimum size requirements (minimum size: %d)', minSize)
roiSize = lsif.GetCount(label)
if roiSize <= minSize:
raise ValueError('Size of the ROI is too small (minimum size: %g, ROI size: %g' % (minSize, roiSize))
return boundingBox, correctedMask
def _correctMask(imageNode, maskNode, **kwargs):
"""
If the mask geometry does not match the image geometry, this function can be used to resample the mask to the image
physical space.
First, the mask is checked for a valid ROI (i.e. maskNode contains an ROI with the given label value, which does not
include areas outside of the physical image bounds).
If the ROI is valid, the maskNode is resampled using the imageNode as a reference image and a nearest neighbor
interpolation.
If the ROI is valid, the resampled mask is returned, otherwise ``None`` is returned.
"""
global logger
logger.debug('Resampling mask to image geometry')
_checkROI(imageNode, maskNode, **kwargs) # Raises a value error if ROI is invalid
rif = sitk.ResampleImageFilter()
rif.SetReferenceImage(imageNode)
rif.SetInterpolator(sitk.sitkNearestNeighbor)
logger.debug('Resampling...')
return rif.Execute(maskNode)
def _checkROI(imageNode, maskNode, **kwargs):
"""
Check whether maskNode contains a valid ROI defined by label:
1. Check whether the label value is present in the maskNode.
2. Check whether the ROI defined by the label does not include an area outside the physical area of the image.
For the second check, a tolerance of 1e-3 is allowed.
If the ROI is valid, the bounding box (lower bounds, followd by size in all dimensions (X, Y, Z ordered)) is
returned. Otherwise, a ValueError is raised.
"""
global logger
label = kwargs.get('label', 1)
logger.debug('Checking ROI validity')
# Determine bounds of cropped volume in terms of original Index coordinate space
lssif = sitk.LabelShapeStatisticsImageFilter()
lssif.Execute(maskNode)
logger.debug('Checking if label %d is persent in the mask', label)
if label not in lssif.GetLabels():
raise ValueError('Label (%d) not present in mask', label)
# LBound and size of the bounding box, as (L_X, L_Y, [L_Z], S_X, S_Y, [S_Z])
bb = numpy.array(lssif.GetBoundingBox(label))
Nd = maskNode.GetDimension()
# Determine if the ROI is within the physical space of the image
logger.debug('Comparing physical space of bounding box to physical space of image')
# Step 1: Get the origin and UBound corners of the bounding box in physical space
# The additional 0.5 represents the difference between the voxel center and the voxel corner
# Upper bound index of ROI = bb[:Nd] + bb[Nd:] - 1 (LBound + Size - 1), .5 is added to get corner
ROIBounds = (maskNode.TransformContinuousIndexToPhysicalPoint(bb[:Nd] - .5), # Origin
maskNode.TransformContinuousIndexToPhysicalPoint(bb[:Nd] + bb[Nd:] - 0.5)) # UBound
# Step 2: Translate the ROI physical bounds to the image coordinate space
ROIBounds = (imageNode.TransformPhysicalPointToContinuousIndex(ROIBounds[0]), # Origin
imageNode.TransformPhysicalPointToContinuousIndex(ROIBounds[1]))
logger.debug('ROI bounds (image coordinate space): %s', ROIBounds)
# Check if any of the ROI bounds are outside the image indices (i.e. -0.5 < ROI < Im.Size -0.5)
# The additional 0.5 is to allow for different spacings (defines the edges, not the centers of the edge-voxels
tolerance = 1e-3 # Define a tolerance to correct for machine precision errors
if numpy.any(numpy.min(ROIBounds, axis=0) < (- .5 - tolerance)) or \
numpy.any(numpy.max(ROIBounds, axis=0) > (numpy.array(imageNode.GetSize()) - .5 + tolerance)):
raise ValueError('Bounding box of ROI is larger than image space:\n\t'
'ROI bounds (x, y, z image coordinate space) %s\n\tImage Size %s' %
(ROIBounds, imageNode.GetSize()))
logger.debug('ROI valid, calculating resampling grid')
return bb
def cropToTumorMask(imageNode, maskNode, boundingBox, **kwargs):
"""
Create a sitkImage of the segmented region of the image based on the input label.
Create a sitkImage of the labelled region of the image, cropped to have a
cuboid shape equal to the ijk boundaries of the label.
:param boundingBox: The bounding box used to crop the image. This is the bounding box as returned by
:py:func:`checkMask`.
:param label: [1], value of the label, onto which the image and mask must be cropped.
:return: Cropped image and mask (SimpleITK image instances).
"""
global logger
padDistance = kwargs.get('padDistance', 0)
size = numpy.array(maskNode.GetSize())
ijkMinBounds = boundingBox[0::2] - padDistance
ijkMaxBounds = size - boundingBox[1::2] - padDistance - 1
# Ensure cropped area is not outside original image bounds
ijkMinBounds = numpy.maximum(ijkMinBounds, 0)
ijkMaxBounds = numpy.maximum(ijkMaxBounds, 0)
# Crop Image
logger.debug('Cropping to size %s', (boundingBox[1::2] - boundingBox[0::2]) + 1)
cif = sitk.CropImageFilter()
try:
cif.SetLowerBoundaryCropSize(ijkMinBounds)
cif.SetUpperBoundaryCropSize(ijkMaxBounds)
except TypeError:
# newer versions of SITK/python want a tuple or list
cif.SetLowerBoundaryCropSize(ijkMinBounds.tolist())
cif.SetUpperBoundaryCropSize(ijkMaxBounds.tolist())
croppedImageNode = cif.Execute(imageNode)
croppedMaskNode = cif.Execute(maskNode)
return croppedImageNode, croppedMaskNode
def resampleImage(imageNode, maskNode, **kwargs):
"""
Resamples image and mask to the specified pixel spacing (The default interpolator is Bspline).
Resampling can be enabled using the settings 'interpolator' and 'resampledPixelSpacing' in the parameter file or as
part of the settings passed to the feature extractor. See also
:ref:`feature extractor <radiomics-featureextractor-label>`.
'imageNode' and 'maskNode' are SimpleITK Objects, and 'resampledPixelSpacing' is the output pixel spacing (sequence of
3 elements).
If only in-plane resampling is required, set the output pixel spacing for the out-of-plane dimension (usually the last
dimension) to 0. Spacings with a value of 0 are replaced by the spacing as it is in the original mask.
Only part of the image and labelmap are resampled. The resampling grid is aligned to the input origin, but only voxels
covering the area of the image ROI (defined by the bounding box) and the padDistance are resampled. This results in a
resampled and partially cropped image and mask. Additional padding is required as some filters also sample voxels
outside of segmentation boundaries. For feature calculation, image and mask are cropped to the bounding box without
any additional padding, as the feature classes do not need the gray level values outside the segmentation.
The resampling grid is calculated using only the input mask. Even when image and mask have different directions, both
the cropped image and mask will have the same direction (equal to direction of the mask). Spacing and size are
determined by settings and bounding box of the ROI.
.. note::
Before resampling the bounds of the non-padded ROI are compared to the bounds. If the ROI bounding box includes
areas outside of the physical space of the image, an error is logged and (None, None) is returned. No features will
be extracted. This enables the input image and mask to have different geometry, so long as the ROI defines an area
within the image.
.. note::
The additional padding is adjusted, so that only the physical space within the mask is resampled. This is done to
prevent resampling outside of the image. Please note that this assumes the image and mask to image the same physical
space. If this is not the case, it is possible that voxels outside the image are included in the resampling grid,
these will be assigned a value of 0. It is therefore recommended, but not enforced, to use an input mask which has
the same or a smaller physical space than the image.
"""
global logger
resampledPixelSpacing = kwargs['resampledPixelSpacing']
interpolator = kwargs.get('interpolator', sitk.sitkBSpline)
padDistance = kwargs.get('padDistance', 5)
label = kwargs.get('label', 1)
logger.debug('Resampling image and mask')
if imageNode is None or maskNode is None:
raise ValueError('Requires both image and mask to resample')
maskSpacing = numpy.array(maskNode.GetSpacing())
imageSpacing = numpy.array(imageNode.GetSpacing())
Nd_resampled = len(resampledPixelSpacing)
Nd_mask = len(maskSpacing)
assert Nd_resampled == Nd_mask, \
'Wrong dimensionality (%i-D) of resampledPixelSpacing!, %i-D required' % (Nd_resampled, Nd_mask)
# If spacing for a direction is set to 0, use the original spacing (enables "only in-slice" resampling)
logger.debug('Where resampled spacing is set to 0, set it to the original spacing (mask)')
resampledPixelSpacing = numpy.array(resampledPixelSpacing)
resampledPixelSpacing = numpy.where(resampledPixelSpacing == 0, maskSpacing, resampledPixelSpacing)
# Check if the maskNode contains a valid ROI. If ROI is valid, the bounding box needed to calculate the resampling
# grid is returned.
bb = _checkROI(imageNode, maskNode, **kwargs)
# Do not resample in those directions where labelmap spans only one slice.
maskSize = numpy.array(maskNode.GetSize())
resampledPixelSpacing = numpy.where(bb[Nd_mask:] != 1, resampledPixelSpacing, maskSpacing)
# If current spacing is equal to resampledPixelSpacing, no interpolation is needed
# Tolerance = 1e-5 + 1e-8*abs(resampledSpacing)
logger.debug('Comparing resampled spacing to original spacing (image')
if numpy.allclose(imageSpacing, resampledPixelSpacing):
logger.info('New spacing equal to original image spacing, just resampling the mask')
# Ensure that image and mask geometry match
rif = sitk.ResampleImageFilter()
rif.SetReferenceImage(imageNode)
rif.SetInterpolator(sitk.sitkNearestNeighbor)
maskNode = rif.Execute(maskNode)
# re-calculate the bounding box of the mask
lssif = sitk.LabelShapeStatisticsImageFilter()
lssif.Execute(maskNode)
bb = numpy.array(lssif.GetBoundingBox(label))
low_up_bb = numpy.empty(Nd_mask * 2, dtype=int)
low_up_bb[::2] = bb[:Nd_mask]
low_up_bb[1::2] = bb[:Nd_mask] + bb[Nd_mask:] - 1
return cropToTumorMask(imageNode, maskNode, low_up_bb, **kwargs)
spacingRatio = maskSpacing / resampledPixelSpacing
# Determine bounds of cropped volume in terms of new Index coordinate space,
# round down for lowerbound and up for upperbound to ensure entire segmentation is captured (prevent data loss)
# Pad with an extra .5 to prevent data loss in case of upsampling. For Ubound this is (-1 + 0.5 = -0.5)
bbNewLBound = numpy.floor((bb[:Nd_mask] - 0.5) * spacingRatio - padDistance)
bbNewUBound = numpy.ceil((bb[:Nd_mask] + bb[Nd_mask:] - 0.5) * spacingRatio + padDistance)
# Ensure resampling is not performed outside bounds of original image
maxUbound = numpy.ceil(maskSize * spacingRatio) - 1
bbNewLBound = numpy.where(bbNewLBound < 0, 0, bbNewLBound)
bbNewUBound = numpy.where(bbNewUBound > maxUbound, maxUbound, bbNewUBound)
# Calculate the new size. Cast to int to prevent error in sitk.
newSize = numpy.array(bbNewUBound - bbNewLBound + 1, dtype='int').tolist()
# Determine continuous index of bbNewLBound in terms of the original Index coordinate space
bbOriginalLBound = bbNewLBound / spacingRatio
# Origin is located in center of first voxel, e.g. 1/2 of the spacing
# from Corner, which corresponds to 0 in the original Index coordinate space.
# The new spacing will be in 0 the new Index coordinate space. Here we use continuous
# index to calculate where the new 0 of the new Index coordinate space (of the original volume
# in terms of the original spacing, and add the minimum bounds of the cropped area to
# get the new Index coordinate space of the cropped volume in terms of the original Index coordinate space.
# Then use the ITK functionality to bring the continuous index into the physical space (mm)
newOriginIndex = numpy.array(.5 * (resampledPixelSpacing - maskSpacing) / maskSpacing)
newCroppedOriginIndex = newOriginIndex + bbOriginalLBound
newOrigin = maskNode.TransformContinuousIndexToPhysicalPoint(newCroppedOriginIndex)
imagePixelType = imageNode.GetPixelID()
maskPixelType = maskNode.GetPixelID()
direction = numpy.array(maskNode.GetDirection())
logger.info('Applying resampling from spacing %s and size %s to spacing %s and size %s',
maskSpacing, maskSize, resampledPixelSpacing, newSize)
try:
if isinstance(interpolator, six.string_types):
interpolator = getattr(sitk, interpolator)
except Exception:
logger.warning('interpolator "%s" not recognized, using sitkBSpline', interpolator)
interpolator = sitk.sitkBSpline
rif = sitk.ResampleImageFilter()
rif.SetOutputSpacing(resampledPixelSpacing)
rif.SetOutputDirection(direction)
rif.SetSize(newSize)
rif.SetOutputOrigin(newOrigin)
logger.debug('Resampling image')
rif.SetOutputPixelType(imagePixelType)
rif.SetInterpolator(interpolator)
resampledImageNode = rif.Execute(imageNode)
logger.debug('Resampling mask')
rif.SetOutputPixelType(maskPixelType)
rif.SetInterpolator(sitk.sitkNearestNeighbor)
resampledMaskNode = rif.Execute(maskNode)
return resampledImageNode, resampledMaskNode
def normalizeImage(image, **kwargs):
r"""
Normalizes the image by centering it at the mean with standard deviation. Normalization is based on all gray values in
the image, not just those inside the segmentation.
:math:`f(x) = \frac{s(x - \mu_x)}{\sigma_x}`
Where:
- :math:`x` and :math:`f(x)` are the original and normalized intensity, respectively.
- :math:`\mu_x` and :math:`\sigma_x` are the mean and standard deviation of the image instensity values.
- :math:`s` is an optional scaling defined by ``scale``. By default, it is set to 1.
Optionally, outliers can be removed, in which case values for which :math:`x > \mu_x + n\sigma_x` or
:math:`x < \mu_x - n\sigma_x` are set to :math:`\mu_x + n\sigma_x` and :math:`\mu_x - n\sigma_x`, respectively.
Here, :math:`n>0` and defined by ``outliers``. This, in turn, is controlled by the ``removeOutliers`` parameter.
Removal of outliers is done after the values of the image are normalized, but before ``scale`` is applied.
"""
global logger
scale = kwargs.get('normalizeScale', 1)
outliers = kwargs.get('removeOutliers')
logger.debug('Normalizing image with scale %d', scale)
image = sitk.Normalize(image)
if outliers is not None:
logger.debug('Removing outliers > %g standard deviations', outliers)
imageArr = sitk.GetArrayFromImage(image)
imageArr[imageArr > outliers] = outliers
imageArr[imageArr < -outliers] = -outliers
newImage = sitk.GetImageFromArray(imageArr)
newImage.CopyInformation(image)
image = newImage
image *= scale
return image
def resegmentMask(imageNode, maskNode, **kwargs):
r"""
Resegment the Mask based on the range specified by the threshold(s) in ``resegmentRange``. Either 1 or 2 thresholds
can be defined. In case of 1 threshold, all values equal to or higher than that threshold are included. If there are
2 thresholds, all voxels with a value inside the closed-range defined by these thresholds is included
(i.e. a voxels is included if :math:`T_{lower} \leq X_gl \leq T_{upper}`).
The resegmented mask is therefore always equal or smaller in size than the original mask.
In the case where either resegmentRange or resegmentMode contains illigal values, a ValueError is raised.
There are 3 modes for defining the threshold:
1. absolute (default): The values in resegmentRange define as absolute values (i.e. corresponding to the gray values
in the image
2. relative: The values in resegmentRange define the threshold as relative to the maximum value found in the ROI.
(e.g. 0.5 indicates a threshold at 50% of maximum gray value)
3. sigma: The threshold is defined as the number of sigma from the mean. (e.g. resegmentRange [-3, 3] will include
all voxels that have a value that differs 3 or less standard deviations from the mean).
"""
global logger
resegmentRange = kwargs['resegmentRange']
resegmentMode = kwargs.get('resegmentMode', 'absolute')
label = kwargs.get('label', 1)
if resegmentRange is None:
raise ValueError('resegmentRange is None.')
if len(resegmentRange) == 0 or len(resegmentRange) > 2:
raise ValueError('Length %i is not allowed for resegmentRange' % len(resegmentRange))
logger.debug('Resegmenting mask (range %s, mode %s)', resegmentRange, resegmentMode)
im_arr = sitk.GetArrayFromImage(imageNode)
ma_arr = (sitk.GetArrayFromImage(maskNode) == label) # boolean array
oldSize = numpy.sum(ma_arr)
if resegmentMode == 'absolute':
logger.debug('Resegmenting in absolute mode')
thresholds = sorted(resegmentRange)
elif resegmentMode == 'relative':
max_gl = numpy.max(im_arr[ma_arr])
logger.debug('Resegmenting in relative mode, max %g', max_gl)
thresholds = [max_gl * th for th in sorted(resegmentRange)]
elif resegmentMode == 'sigma':
mean_gl = numpy.mean(im_arr[ma_arr])
sd_gl = numpy.std(im_arr[ma_arr])
logger.debug('Resegmenting in sigma mode, mean %g, std %g', mean_gl, sd_gl)
thresholds = [mean_gl + sd_gl * th for th in sorted(resegmentRange)]
else:
raise ValueError('Resegment mode %s not recognized.' % resegmentMode)
# Apply lower threshold
logger.debug('Applying lower threshold (%g)', thresholds[0])
ma_arr[ma_arr] = im_arr[ma_arr] >= thresholds[0]
# If 2 thresholds are defined, also apply an upper threshold
if len(thresholds) == 2:
logger.debug('Applying upper threshold (%g)', thresholds[1])
ma_arr[ma_arr] = im_arr[ma_arr] <= thresholds[1]
roiSize = numpy.sum(ma_arr)
if roiSize <= 1:
raise ValueError("Resegmentation excluded too many voxels with label %i (retained %i voxel(s))! "
"Cannot extract features" % (label, roiSize))
# Transform the boolean array back to an image with the correct voxels set to the label value
newMask_arr = numpy.zeros(ma_arr.shape, dtype='int')
newMask_arr[ma_arr] = label
newMask = sitk.GetImageFromArray(newMask_arr)
newMask.CopyInformation(maskNode)
logger.debug('Resegmentation complete, new size: %d voxels (excluded %d voxels)', roiSize, oldSize - roiSize)
return newMask
def getOriginalImage(inputImage, inputMask, **kwargs):
"""
This function does not apply any filter, but returns the original image. This function is needed to
dynamically expose the original image as a valid image type.
:return: Yields original image, 'original' and ``kwargs``
"""
global logger
logger.debug('Yielding original image')
yield inputImage, 'original', kwargs
def getLoGImage(inputImage, inputMask, **kwargs):
r"""
Applies a Laplacian of Gaussian filter to the input image and yields a derived image for each sigma value specified.
A Laplacian of Gaussian image is obtained by convolving the image with the second derivative (Laplacian) of a Gaussian
kernel.
The Gaussian kernel is used to smooth the image and is defined as
.. math::
G(x, y, z, \sigma) = \frac{1}{(\sigma \sqrt{2 \pi})^3}e^{-\frac{x^2 + y^2 + z^2}{2\sigma^2}}
The Gaussian kernel is convolved by the laplacian kernel :math:`\nabla^2G(x, y, z)`, which is sensitive to areas with
rapidly changing intensities, enhancing edges. The width of the filter in the Gaussian kernel is determined by
:math:`\sigma` and can be used to emphasize more fine (low :math:`\sigma` values) or coarse (high :math:`\sigma`
values) textures.
.. warning::
The LoG filter implemented in PyRadiomics is a 3D LoG filter, and therefore requires 3D input. Features using a
single slice (2D) segmentation can still be extracted, but the input image *must* be a 3D image, with a minimum size
in all dimensions :math:`\geq \sigma`. If input image is too small, a warning is logged and :math:`\sigma` value is
skipped. Moreover, the image size *must* be at least 4 voxels in each dimensions, if this constraint is not met, no
LoG derived images can be generated.
Following settings are possible:
- sigma: List of floats or integers, must be greater than 0. Filter width (mm) to use for the Gaussian kernel
(determines coarseness).
.. warning::
Setting for sigma must be provided. If omitted, no LoG image features are calculated and the function
will return an empty dictionary.
Returned filter name reflects LoG settings:
log-sigma-<sigmaValue>-3D.
References:
- `SimpleITK Doxygen documentation
<https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1LaplacianRecursiveGaussianImageFilter.html>`_
- `ITK Doxygen documentation <https://itk.org/Doxygen/html/classitk_1_1LaplacianRecursiveGaussianImageFilter.html>`_
- `<https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian>`_
:return: Yields log filtered image for each specified sigma, corresponding image type name and ``kwargs`` (customized
settings).
"""
global logger
logger.debug('Generating LoG images')
# Check if size of image is > 4 in all 3D directions (otherwise, LoG filter will fail)
size = numpy.array(inputImage.GetSize())
spacing = numpy.array(inputImage.GetSpacing())
if numpy.min(size) < 4:
logger.warning('Image too small to apply LoG filter, size: %s', size)
return
sigmaValues = kwargs.get('sigma', [])
for sigma in sigmaValues:
logger.info('Computing LoG with sigma %g', sigma)
if sigma > 0.0:
if numpy.all(size >= numpy.ceil(sigma / spacing) + 1):
lrgif = sitk.LaplacianRecursiveGaussianImageFilter()
lrgif.SetNormalizeAcrossScale(True)
lrgif.SetSigma(sigma)
inputImageName = 'log-sigma-%s-mm-3D' % (str(sigma).replace('.', '-'))
logger.debug('Yielding %s image', inputImageName)
yield lrgif.Execute(inputImage), inputImageName, kwargs
else:
logger.warning('applyLoG: sigma(%g)/spacing(%s) + 1 must be greater than the size(%s) of the inputImage',
sigma,
spacing,
size)
else:
logger.warning('applyLoG: sigma must be greater than 0.0: %g', sigma)
def getWaveletImage(inputImage, inputMask, **kwargs):
"""
Applies wavelet filter to the input image and yields the decompositions and the approximation.
Following settings are possible:
- start_level [0]: integer, 0 based level of wavelet which should be used as first set of decompositions
from which a signature is calculated
- level [1]: integer, number of levels of wavelet decompositions from which a signature is calculated.
- wavelet ["coif1"]: string, type of wavelet decomposition. Enumerated value, validated against possible values
present in the ``pyWavelet.wavelist()``. Current possible values (pywavelet version 0.4.0) (where an
aditional number is needed, range of values is indicated in []):
- haar
- dmey
- sym[2-20]
- db[1-20]
- coif[1-5]
- bior[1.1, 1.3, 1.5, 2.2, 2.4, 2.6, 2.8, 3.1, 3.3, 3.5, 3.7, 3.9, 4.4, 5.5, 6.8]
- rbio[1.1, 1.3, 1.5, 2.2, 2.4, 2.6, 2.8, 3.1, 3.3, 3.5, 3.7, 3.9, 4.4, 5.5, 6.8]
Returned filter name reflects wavelet type:
wavelet[level]-<decompositionName>
N.B. only levels greater than the first level are entered into the name.
:return: Yields each wavelet decomposition and final approximation, corresponding imaget type name and ``kwargs``
(customized settings).
"""
global logger
logger.debug('Generating Wavelet images')
Nd = inputImage.GetDimension()
axes = list(range(Nd - 1, -1, -1))
if kwargs.get('force2D', False):
axes.remove(kwargs.get('force2Ddimension', 0))
approx, ret = _swt3(inputImage, tuple(axes), **kwargs)
for idx, wl in enumerate(ret, start=1):
for decompositionName, decompositionImage in wl.items():
logger.info('Computing Wavelet %s', decompositionName)
if idx == 1:
inputImageName = 'wavelet-%s' % (decompositionName)
else:
inputImageName = 'wavelet%s-%s' % (idx, decompositionName)
logger.debug('Yielding %s image', inputImageName)
yield decompositionImage, inputImageName, kwargs
if len(ret) == 1:
inputImageName = 'wavelet-%s' % ('L' * len(axes))
else:
inputImageName = 'wavelet%s-%s' % (len(ret), ('L' * len(axes)))
logger.debug('Yielding approximation (%s) image', inputImageName)
yield approx, inputImageName, kwargs
def _swt3(inputImage, axes, **kwargs): # Stationary Wavelet Transform 3D
wavelet = kwargs.get('wavelet', 'coif1')
level = kwargs.get('level', 1)
start_level = kwargs.get('start_level', 0)
matrix = sitk.GetArrayFromImage(inputImage) # This function gets a numpy array from the SimpleITK Image "inputImage"
matrix = numpy.asarray(matrix) # The function np.asarray converts "matrix" (which could be also a tuple) into an array.
original_shape = matrix.shape
# original_shape becomes a tuple (?,?,?) containing the number of rows, columns, and slices of the image
# this is of course dependent on the number of dimensions, but the same principle holds
padding = tuple([(0, 1 if dim % 2 != 0 else 0) for dim in original_shape])
# padding is necessary because of pywt.swtn (see function Notes)
data = matrix.copy() # creates a modifiable copy of "matrix" and we call it "data"
data = numpy.pad(data, padding, 'wrap') # padding the tuple "padding" previously computed
if not isinstance(wavelet, pywt.Wavelet):
wavelet = pywt.Wavelet(wavelet)
for i in range(0, start_level): # if start_level = 0 (default) this for loop never gets executed
# compute all decompositions and saves them in "dec" dict
dec = pywt.swtn(data, wavelet, level=1, start_level=0, axes=axes)[0]
# copies in "data" just the "aaa" decomposition (i.e. approximation; No of consecutive 'a's = len(axes))
data = dec['a' * len(axes)].copy()
ret = [] # initialize empty list
for i in range(start_level, start_level + level):
# compute the n-dimensional stationary wavelet transform
dec = pywt.swtn(data, wavelet, level=1, start_level=0, axes=axes)[0]
# Copy the approximation into data (approximation in output / input for next levels)
data = dec['a' * len(axes)].copy()
dec_im = {} # initialize empty dict
for decName, decImage in six.iteritems(dec):
# Returning the approximiation is done only for the last loop,
# and is handled separately below (by building it from `data`)
# There for, skip it here
if decName == 'a' * len(axes):
continue
decTemp = decImage.copy()
decTemp = decTemp[tuple(slice(None, -1 if dim % 2 != 0 else None) for dim in original_shape)]
sitkImage = sitk.GetImageFromArray(decTemp)
sitkImage.CopyInformation(inputImage)
dec_im[str(decName).replace('a', 'L').replace('d', 'H')] = sitkImage
# modifies 'a' with 'L' (Low-pass filter) and 'd' with 'H' (High-pass filter)
ret.append(dec_im) # appending all the filtered sitk images (stored in "dec_im") to the "ret" list
data = data[tuple(slice(None, -1 if dim % 2 != 0 else None) for dim in original_shape)]
approximation = sitk.GetImageFromArray(data)
approximation.CopyInformation(inputImage)
return approximation, ret # returns the approximation and the detail (ret) coefficients of the stationary wavelet decomposition
def getSquareImage(inputImage, inputMask, **kwargs):
r"""
Computes the square of the image intensities.
Resulting values are rescaled on the range of the initial original image and negative intensities are made
negative in resultant filtered image.
:math:`f(x) = (cx)^2,\text{ where } c=\displaystyle\frac{1}{\sqrt{\max(|x|)}}`
Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.
:return: Yields square filtered image, 'square' and ``kwargs`` (customized settings).
"""
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
coeff = 1 / numpy.sqrt(numpy.max(numpy.abs(im)))
im = (coeff * im) ** 2
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding square image')
yield im, 'square', kwargs
def getSquareRootImage(inputImage, inputMask, **kwargs):
r"""
Computes the square root of the absolute value of image intensities.
Resulting values are rescaled on the range of the initial original image and negative intensities are made
negative in resultant filtered image.
:math:`f(x) = \left\{ {\begin{array}{lcl}
\sqrt{cx} & \mbox{for} & x \ge 0 \\
-\sqrt{-cx} & \mbox{for} & x < 0\end{array}} \right.,\text{ where } c=\max(|x|)`
Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.
:return: Yields square root filtered image, 'squareroot' and ``kwargs`` (customized settings).
"""
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
coeff = numpy.max(numpy.abs(im))
im[im > 0] = numpy.sqrt(im[im > 0] * coeff)
im[im < 0] = - numpy.sqrt(-im[im < 0] * coeff)
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding squareroot image')
yield im, 'squareroot', kwargs
def getLogarithmImage(inputImage, inputMask, **kwargs):
r"""
Computes the logarithm of the absolute value of the original image + 1.
Resulting values are rescaled on the range of the initial original image and negative intensities are made
negative in resultant filtered image.
:math:`f(x) = \left\{ {\begin{array}{lcl}
c\log{(x + 1)} & \mbox{for} & x \ge 0 \\
-c\log{(-x + 1)} & \mbox{for} & x < 0\end{array}} \right. \text{, where } c=\frac{\max(|x|)}{\log(\max(|x|) + 1)}`
Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.
:return: Yields logarithm filtered image, 'logarithm' and ``kwargs`` (customized settings)
"""
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
im_max = numpy.max(numpy.abs(im))
im[im > 0] = numpy.log(im[im > 0] + 1)
im[im < 0] = - numpy.log(- (im[im < 0] - 1))
im = im * (im_max / numpy.max(numpy.abs(im)))
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding logarithm image')
yield im, 'logarithm', kwargs
def getExponentialImage(inputImage, inputMask, **kwargs):
r"""
Computes the exponential of the original image.
Resulting values are rescaled on the range of the initial original image.
:math:`f(x) = e^{cx},\text{ where } c=\displaystyle\frac{\log(\max(|x|))}{\max(|x|)}`
Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.
:return: Yields exponential filtered image, 'exponential' and ``kwargs`` (customized settings)
"""
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
im_max = numpy.max(numpy.abs(im))
coeff = numpy.log(im_max) / im_max
im = numpy.exp(coeff * im)
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding exponential image')
yield im, 'exponential', kwargs
def getGradientImage(inputImage, inputMask, **kwargs):
r"""
Compute and return the Gradient Magnitude in the image.
By default, takes into account the image spacing, this can be switched off by specifying
``gradientUseSpacing = False``.
References:
- `SimpleITK documentation
<https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1GradientMagnitudeImageFilter.html>`_
- `<https://en.wikipedia.org/wiki/Image_gradient>`_
"""
gmif = sitk.GradientMagnitudeImageFilter()
gmif.SetUseImageSpacing(kwargs.get('gradientUseSpacing', True))
im = gmif.Execute(inputImage)
yield im, 'gradient', kwargs
def getLBP2DImage(inputImage, inputMask, **kwargs):
"""
Compute and return the Local Binary Pattern (LBP) in 2D. If ``force2D`` is set to false (= feature extraction in 3D) a
warning is logged, as this filter processes the image in a by-slice operation. The plane in which the LBP is
applied can be controlled by the ``force2Ddimension`` parameter (see also :py:func:`generateAngles`).
Following settings are possible (in addition to ``force2Ddimension``):
- ``lbp2DRadius`` [1]: Float, specifies the radius in which the neighbours should be sampled
- ``lbp2DSamples`` [9]: Integer, specifies the number of samples to use
- ``lbp2DMethod`` ['uniform']: String, specifies the method for computing the LBP to use.
For more information see `scikit documentation
<http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.local_binary_pattern>`_
:return: Yields LBP filtered image, 'lbp-2D' and ``kwargs`` (customized settings)
.. note::
LBP can often return only a very small number of different gray levels. A customized bin width is often needed.
.. warning::
Requires package ``scikit-image`` to function. If not available, this filter logs a warning and does not yield an image.
References:
- T. Ojala, M. Pietikainen, and D. Harwood (1994), "Performance evaluation of texture measures with classification
based on Kullback discrimination of distributions", Proceedings of the 12th IAPR International Conference on Pattern
Recognition (ICPR 1994), vol. 1, pp. 582 - 585.
- T. Ojala, M. Pietikainen, and D. Harwood (1996), "A Comparative Study of Texture Measures with Classification Based
on Feature Distributions", Pattern Recognition, vol. 29, pp. 51-59.
"""
global logger
try:
from skimage.feature import local_binary_pattern
except ImportError:
logger.warning('Could not load required package "skimage", cannot implement filter LBP 2D')
return
lbp_radius = kwargs.get('lbp2DRadius', 1)
lbp_samples = kwargs.get('lbp2DSamples', 8)
lbp_method = kwargs.get('lbp2DMethod', 'uniform')
im_arr = sitk.GetArrayFromImage(inputImage)
Nd = inputImage.GetDimension()
if Nd == 3:
# Warn the user if features are extracted in 3D, as this function calculates LBP in 2D
if not kwargs.get('force2D', False):
logger.warning('Calculating Local Binary Pattern in 2D, but extracting features in 3D. Use with caution!')
lbp_axis = kwargs.get('force2Ddimension', 0)
im_arr = im_arr.swapaxes(0, lbp_axis)
for idx in range(im_arr.shape[0]):
im_arr[idx, ...] = local_binary_pattern(im_arr[idx, ...], P=lbp_samples, R=lbp_radius, method=lbp_method)
im_arr = im_arr.swapaxes(0, lbp_axis)
elif Nd == 2:
im_arr = local_binary_pattern(im_arr, P=lbp_samples, R=lbp_radius, method=lbp_method)
else:
logger.warning('LBP 2D is only available for 2D or 3D with forced 2D extraction')
return
im = sitk.GetImageFromArray(im_arr)
im.CopyInformation(inputImage)
yield im, 'lbp-2D', kwargs
def getLBP3DImage(inputImage, inputMask, **kwargs):
"""
Compute and return the Local Binary Pattern (LBP) in 3D using spherical harmonics.
If ``force2D`` is set to true (= feature extraction in 2D) a warning is logged.
LBP is only calculated for voxels segmented in the mask
Following settings are possible:
- ``lbp3DLevels`` [2]: integer, specifies the the number of levels in spherical harmonics to use.
- ``lbp3DIcosphereRadius`` [1]: Float, specifies the radius in which the neighbours should be sampled
- ``lbp3DIcosphereSubdivision`` [1]: Integer, specifies the number of subdivisions to apply in the icosphere
:return: Yields LBP filtered image for each level, 'lbp-3D-m<level>' and ``kwargs`` (customized settings).
Additionally yields the kurtosis image, 'lbp-3D-k' and ``kwargs``.
.. note::
LBP can often return only a very small number of different gray levels. A customized bin width is often needed.
.. warning::
Requires package ``scipy`` and ``trimesh`` to function. If not available, this filter logs a warning and does not
yield an image.
References:
- Banerjee, J, Moelker, A, Niessen, W.J, & van Walsum, T.W. (2013), "3D LBP-based rotationally invariant region
description." In: Park JI., Kim J. (eds) Computer Vision - ACCV 2012 Workshops. ACCV 2012. Lecture Notes in Computer
Science, vol 7728. Springer, Berlin, Heidelberg. doi:10.1007/978-3-642-37410-4_3
"""
global logger
Nd = inputImage.GetDimension()
if Nd != 3:
logger.warning('LBP 3D only available for 3 dimensional images, found %i dimensions', Nd)
return
try:
from scipy.stats import kurtosis
from scipy.ndimage.interpolation import map_coordinates
from scipy.special import sph_harm
from trimesh.creation import icosphere
except ImportError:
logger.warning('Could not load required package "scipy" or "trimesh", cannot implement filter LBP 3D')
return
# Warn the user if features are extracted in 2D, as this function calculates LBP in 3D
if kwargs.get('force2D', False):
logger.warning('Calculating Local Binary Pattern in 3D, but extracting features in 2D. Use with caution!')
label = kwargs.get('label', 1)
lbp_levels = kwargs.get('lbp3DLevels', 2)
lbp_icosphereRadius = kwargs.get('lbp3DIcosphereRadius', 1)
lbp_icosphereSubdivision = kwargs.get('lbp3DIcosphereSubdivision', 1)
im_arr = sitk.GetArrayFromImage(inputImage)
ma_arr = sitk.GetArrayFromImage(inputMask)
# Variables used in the shape comments:
# Np Number of voxels
# Nv Number of vertices
# Vertices icosahedron for spherical sampling
coords_icosahedron = numpy.array(icosphere(lbp_icosphereSubdivision, lbp_icosphereRadius).vertices) # shape(Nv, 3)
# Corresponding polar coordinates
theta = numpy.arccos(numpy.true_divide(coords_icosahedron[:, 2], lbp_icosphereRadius))
phi = numpy.arctan2(coords_icosahedron[:, 1], coords_icosahedron[:, 0])
# Corresponding spherical harmonics coefficients Y_{m, n, theta, phi}
Y = sph_harm(0, 0, theta, phi) # shape(Nv,)
n_ix = numpy.array(0)
for n in range(1, lbp_levels):
for m in range(-n, n + 1):
n_ix = numpy.append(n_ix, n)
Y = numpy.column_stack((Y, sph_harm(m, n, theta, phi)))
# shape (Nv, x) where x is the number of iterations in the above loops + 1
# Get labelled coordinates
ROI_coords = numpy.where(ma_arr == label) # shape(3, Np)
# Interpolate f (samples on the spheres across the entire volume)
coords = numpy.array(ROI_coords).T[None, :, :] + coords_icosahedron[:, None, :] # shape(Nv, Np, 3)
f = map_coordinates(im_arr, coords.T, order=3) # Shape(Np, Nv) Note that 'Np' and 'Nv' are swapped due to .T
# Compute spherical Kurtosis
k = kurtosis(f, axis=1) # shape(Np,)
# Apply sign function
f_centroids = im_arr[ROI_coords] # Shape(Np,)
f = numpy.greater_equal(f, f_centroids[:, None]).astype(int) # Shape(Np, Nv)
# Compute c_{m,n} coefficients
c = numpy.multiply(f[:, :, None], Y[None, :, :]) # Shape(Np, Nv, x)
c = c.sum(axis=1) # Shape(Np, x)
# Integrate over m
f = numpy.multiply(c[:, None, n_ix == 0], Y[None, :, n_ix == 0]) # Shape (Np, Nv, 1)
for n in range(1, lbp_levels):
f = numpy.concatenate((f,
numpy.sum(numpy.multiply(c[:, None, n_ix == n], Y[None, :, n_ix == n]),
axis=2, keepdims=True)
),
axis=2)
# Shape f (Np, Nv, levels)
# Compute L2-Norm
f = numpy.sqrt(numpy.sum(f ** 2, axis=1)) # shape(Np, levels)
# Keep only Real Part
f = numpy.real(f) # shape(Np, levels)
k = numpy.real(k) # shape(Np,)
# Yield the derived images for each level
result = numpy.ndarray(im_arr.shape)
for l_idx in range(lbp_levels):
result[ROI_coords] = f[:, l_idx]
# Create a SimpleITK image
im = sitk.GetImageFromArray(result)
im.CopyInformation(inputImage)
yield im, 'lbp-3D-m%d' % (l_idx + 1), kwargs
# Yield Kurtosis
result[ROI_coords] = k
# Create a SimpleITK image
im = sitk.GetImageFromArray(result)
im.CopyInformation(inputImage)
yield im, 'lbp-3D-k', kwargs
|
Radiomics/pyradiomics
|
radiomics/imageoperations.py
|
Python
|
bsd-3-clause
| 54,849
|
[
"Gaussian"
] |
3cfa92549925878c7f2a0940f9dfdcc26b5095e2a6bfa81aaa00228904669b24
|
import re
class Compiler(object):
RE_INTERPOLATE = re.compile(r'(\\)?([#!]){(.*?)}')
doctypes = {
'5': '<!DOCTYPE html>'
, 'xml': '<?xml version="1.0" encoding="utf-8" ?>'
, 'default': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'transitional': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'strict': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
, 'frameset': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">'
, '1.1': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
, 'basic': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">'
, 'mobile': '<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">'
}
inlineTags = [
'a'
, 'abbr'
, 'acronym'
, 'b'
, 'br'
, 'code'
, 'em'
, 'font'
, 'i'
, 'img'
, 'ins'
, 'kbd'
, 'map'
, 'samp'
, 'small'
, 'span'
, 'strong'
, 'sub'
, 'sup'
, 'textarea'
]
selfClosing = [
'meta'
, 'img'
, 'link'
, 'input'
, 'area'
, 'base'
, 'col'
, 'br'
, 'hr'
]
autocloseCode = 'if,for,block,filter,autoescape,with,trans,spaceless,comment,cache,macro,localize,compress'.split(',')
filters = {
'cdata':lambda x,y:'<![CDATA[\n%s\n]]>'%x
}
def __init__(self,node,**options):
self.options = options
self.node = node
self.hasCompiledDoctype = False
self.hasCompiledTag = False
self.pp = options.get('pretty',True)
self.debug = options.get('compileDebug',False)!=False
self.filters.update(options.get('filters',{}))
self.doctypes.update(options.get('doctypes',{}))
self.selfClosing.extend(options.get('selfClosing',[]))
self.autocloseCode.extend(options.get('autocloseCode',[]))
self.inlineTags.extend(options.get('inlineTags',[]))
self.indents = 0
self.doctype = None
self.terse = False
self.xml = False
if 'doctype' in self.options: self.setDoctype(options['doctype'])
def compile_top(self):
return ''
def compile(self):
self.buf = [self.compile_top()]
self.lastBufferedIdx = -1
self.visit(self.node)
return unicode(u''.join(self.buf))
def setDoctype(self,name):
self.doctype = self.doctypes.get(name or 'default','<!DOCTYPE %s>'%name)
self.terse = name in ['5','html']
self.xml = self.doctype.startswith('<?xml')
def buffer (self,str):
if self.lastBufferedIdx == len(self.buf):
self.lastBuffered += str
self.buf[self.lastBufferedIdx-1] = self.lastBuffered
else:
self.buf.append(str)
self.lastBuffered = str;
self.lastBufferedIdx = len(self.buf)
def visit(self,node,*args,**kwargs):
# debug = self.debug
# if debug:
# self.buf.append('__jade.unshift({ lineno: %d, filename: %s });' % (node.line,('"%s"'%node.filename) if node.filename else '__jade[0].filename'));
# if node.debug==False and self.debug:
# self.buf.pop()
# self.buf.pop()
self.visitNode(node,*args,**kwargs)
# if debug: self.buf.append('__jade.shift();')
def visitNode (self,node,*args,**kwargs):
name = node.__class__.__name__
# print name, node
return getattr(self,'visit%s'%name)(node,*args,**kwargs)
def visitLiteral(self,node):
self.buffer(node.str)
def visitBlock(self,block):
for node in block.nodes:
self.visit(node)
def visitCodeBlock(self,block):
self.buffer('{%% block %s %%}'%block.name)
if block.mode=='prepend': self.buffer('{{super()}}')
self.visitBlock(block)
if block.mode=='append': self.buffer('{{super()}}')
self.buffer('{% endblock %}')
def visitDoctype(self,doctype=None):
if doctype and (doctype.val or not self.doctype):
self.setDoctype(doctype.val or 'default')
if self.doctype: self.buffer(self.doctype)
self.hasCompiledDoctype = True
def visitMixin(self,mixin):
if mixin.block:
self.buffer('{%% macro %s(%s) %%}'%(mixin.name,mixin.args))
self.visitBlock(mixin.block)
self.buffer('{% endmacro %}')
else:
self.buffer('{{%s(%s)}}'%(mixin.name,mixin.args))
def visitTag(self,tag):
self.indents += 1
name = tag.name
if not self.hasCompiledTag:
if not self.hasCompiledDoctype and 'html'==name:
self.visitDoctype()
self.hasCompiledTag = True
if self.pp and name not in self.inlineTags:
self.buffer('\n'+' '*(self.indents-1))
closed = name in self.selfClosing and not self.xml
self.buffer('<%s'%name)
self.visitAttributes(tag.attrs)
self.buffer('/>' if not self.terse and closed else '>')
if not closed:
if tag.code: self.visitCode(tag.code)
if tag.text: self.buffer(self.interpolate(tag.text.nodes[0].lstrip()))
self.escape = 'pre' == tag.name
self.visit(tag.block)
if self.pp and not name in self.inlineTags and not tag.textOnly:
self.buffer('\n')
if self.pp and (not name in self.inlineTags):
self.buffer(' '*(self.indents-1))
self.buffer('</%s>'%name)
self.indents -= 1
def visitFilter(self,filter):
if filter.name not in self.filters:
if filter.isASTFilter:
raise Exception('unknown ast filter "%s:"'%filter.name)
else:
raise Exception('unknown filter "%s:"'%filter.name)
fn = self.filters.get(filter.name)
if filter.isASTFilter:
self.buf.append(fn(filter.block,self,filter.attrs))
else:
text = ''.join(filter.block.nodes)
text = self.interpolate(text)
filter.attrs = filter.attrs or {}
filter.attrs['filename'] = self.options.get('filename',None)
self.buffer(fn(text,filter.attrs))
def _interpolate(self,attr,repl):
return self.RE_INTERPOLATE.sub(lambda matchobj:repl(matchobj.group(3)),attr)
def interpolate(self,text):
return self._interpolate(text,lambda x:'{{%s}}'%x)
def visitText(self,text):
text = ''.join(text.nodes)
text = self.interpolate(text)
self.buffer(text)
self.buffer('\n')
def visitComment(self,comment):
if not comment.buffer: return
if self.pp: self.buffer('\n'+' '*(self.indents))
self.buffer('<!--%s-->'%comment.val)
def visitAssignment(self,assignment):
self.buffer('{%% set %s = %s %%}'%(assignment.name,assignment.val))
def visitExtends(self,node):
self.buffer('{%% extends "%s" %%}'%(node.path))
def visitInclude(self,node):
self.buffer('{%% include "%s" %%}'%(node.path))
def visitBlockComment(self,comment):
if not comment.buffer: return
isConditional = comment.val.strip().startswith('if')
self.buffer('<!--[%s]>'%comment.val.strip() if isConditional else '<!--%s'%comment.val)
self.visit(comment.block)
self.buffer('<![endif]-->' if isConditional else '-->')
def visitConditional(self,conditional):
TYPE_CODE = {
'if': lambda x: 'if %s'%x,
'unless': lambda x: 'if not %s'%x,
'elif': lambda x: 'elif %s'%x,
'else': lambda x: 'else'
}
self.buf.append('{%% %s %%}'%TYPE_CODE[conditional.type](conditional.sentence))
if conditional.block:
self.visit(conditional.block)
for next in conditional.next:
self.visitConditional(next)
if conditional.type in ['if','unless']: self.buf.append('{% endif %}')
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
self.buf.append('{{%s%s}}'%(val,'|escape' if code.escape else ''))
else:
self.buf.append('{%% %s %%}'%code.val)
if code.block:
# if not code.buffer: self.buf.append('{')
self.visit(code.block)
# if not code.buffer: self.buf.append('}')
if not code.buffer:
codeTag = code.val.strip().split(' ',1)[0]
if codeTag in self.autocloseCode:
self.buf.append('{%% end%s %%}'%codeTag)
def visitEach(self,each):
self.buf.append('{%% for %s in %s %%}'%(','.join(each.keys),each.obj))
self.visit(each.block)
self.buf.append('{% endfor %}')
def attributes(self,attrs):
return "{{__pyjade_attrs(%s)}}"%attrs
def visitDynamicAttributes(self,attrs):
buf,classes,params = [],[],{}
terse='terse=True' if self.terse else ''
for attr in attrs:
if attr['name'] == 'class':
classes.append('(%s)'%attr['val'])
else:
pair = "('%s',(%s))"%(attr['name'],attr['val'])
buf.append(pair)
if classes:
classes = " , ".join(classes)
buf.append("('class', (%s))"%classes)
buf = ', '.join(buf)
if self.terse: params['terse'] = 'True'
if buf: params['attrs'] = '[%s]'%buf
param_string = ', '.join(['%s=%s'%(n,v) for n,v in params.iteritems()])
if buf or terse:
self.buf.append(self.attributes(param_string))
def visitAttributes(self,attrs):
temp_attrs = []
for attr in attrs:
if attr['static']:
if temp_attrs:
self.visitDynamicAttributes(temp_attrs)
temp_attrs = []
self.buf.append(' %s=%s'%(attr['name'],attr['val']))
else:
temp_attrs.append(attr)
if temp_attrs: self.visitDynamicAttributes(temp_attrs)
try:
import coffeescript
Compiler.filters['coffeescript'] = lambda x, y: '<script>%s</script>' % coffeescript.compile(x)
except ImportError:
pass
try:
import markdown
Compiler.filters['markdown'] = lambda x,y: markdown.markdown(x, output_format='html5')
except ImportError:
pass
|
xlk521/cloudguantou
|
pyjade/compiler.py
|
Python
|
bsd-3-clause
| 10,790
|
[
"VisIt"
] |
8206bf1b9f969d691ca4f0dfe8b66521f956949bb077f394160ae9c9383c5343
|
#!/bin/env python
"""
Script to call the DataRecoveryAgent functionality by hand.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import S_OK, gLogger
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
__RCSID__ = "$Id$"
class Params(object):
"""Collection of Parameters set via CLI switches."""
def __init__(self):
self.enabled = False
self.transID = 0
def setEnabled(self, _):
self.enabled = True
return S_OK()
def setTransID(self, transID):
self.transID = int(transID)
return S_OK()
def registerSwitches(self):
Script.registerSwitch("T:", "TransID=", "TransID to Check/Fix", self.setTransID)
Script.registerSwitch("X", "Enabled", "Enable the changes", self.setEnabled)
@Script()
def main():
PARAMS = Params()
PARAMS.registerSwitches()
Script.parseCommandLine(ignoreErrors=False)
# Create Data Recovery Agent and run over single transformation.
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.TransformationSystem.Agent.DataRecoveryAgent import DataRecoveryAgent
DRA = DataRecoveryAgent("Transformation/DataRecoveryAgent", "Transformation/DataRecoveryAgent")
DRA.jobStatus = ["Done", "Failed"]
DRA.enabled = PARAMS.enabled
TRANSFORMATION = TransformationClient().getTransformations(condDict={"TransformationID": PARAMS.transID})
if not TRANSFORMATION["OK"]:
gLogger.error("Failed to find transformation: %s" % TRANSFORMATION["Message"])
exit(1)
if not TRANSFORMATION["Value"]:
gLogger.error("Did not find any transformations")
exit(1)
TRANS_INFO_DICT = TRANSFORMATION["Value"][0]
TRANS_INFO_DICT.pop("Body", None)
gLogger.notice("Found transformation: %s" % TRANS_INFO_DICT)
DRA.treatTransformation(PARAMS.transID, TRANS_INFO_DICT)
exit(0)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/TransformationSystem/scripts/dirac_transformation_recover_data.py
|
Python
|
gpl-3.0
| 2,021
|
[
"DIRAC"
] |
c53877cf1eed8b7f7fe7e1f7b09fba8c1014c45574ca904a7545174313f6c58b
|
"""hiPhive force constants calculator interface."""
# Copyright (C) 2018 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
def phonopy_atoms_to_ase(atoms_phonopy):
"""Convert PhonopyAtoms to Atoms."""
try:
from ase.atoms import Atoms
except ImportError:
raise ImportError("ASE python module was not found.")
ase_atoms = Atoms(
cell=atoms_phonopy.cell,
scaled_positions=atoms_phonopy.scaled_positions,
numbers=atoms_phonopy.numbers,
pbc=True,
)
return ase_atoms
def get_fc2(
supercell,
primitive,
displacements,
forces,
symprec,
atom_list=None,
options=None,
log_level=0,
):
"""Calculate fc2 using hiPhive."""
if log_level:
msg = [
"-------------------------------"
" hiPhive start "
"------------------------------",
"hiPhive is a non-trivial force constants calculator. "
"Please cite the paper:",
'"The Hiphive Package for the Extraction of High‐Order Force ' "Constants",
' by Machine Learning"',
"by Fredrik Eriksson, Erik Fransson, and Paul Erhart,",
"Advanced Theory and Simulations, DOI:10.1002/adts.201800184 " "(2019)",
"",
]
print("\n".join(msg))
fc2 = run_hiphive(
supercell=supercell,
primitive=primitive,
displacements=displacements,
forces=forces,
options=options,
symprec=symprec,
log_level=log_level,
)
p2s_map = primitive.p2s_map
is_compact_fc = atom_list is not None and (atom_list == p2s_map).all()
if is_compact_fc:
fc2 = np.array(fc2[p2s_map], dtype="double", order="C")
elif atom_list is not None:
fc2 = np.array(fc2[atom_list], dtype="double", order="C")
if log_level:
print(
"--------------------------------"
" hiPhive end "
"-------------------------------"
)
return fc2
def run_hiphive(
supercell, primitive, displacements, forces, options, symprec, log_level
):
"""Run hiphive.
supercell : Supercell
Perfect supercell.
primitive : Primitive
Primitive cell.
displacements : ndarray
Displacements of atoms in supercell.
shape=(supercells, natom, 3)
forces : ndarray
Forces on atoms in supercell.
shape=(supercells, natom, 3)
options : str
Force constants calculation options.
log_level : int
Log control. 0: quiet, 1: normal, 2: verbose 3: debug
"""
try:
from hiphive import ClusterSpace, ForceConstantPotential, StructureContainer
from hiphive.cutoffs import estimate_maximum_cutoff
from hiphive.fitting import Optimizer
from hiphive.input_output.logging_tools import set_config
except ImportError:
raise ImportError("hiPhive python module was not found.")
set_config(level=30)
ase_supercell = phonopy_atoms_to_ase(supercell)
ase_prim = phonopy_atoms_to_ase(primitive)
# setup training structures
structures = []
for d, f in zip(displacements, forces):
structure = ase_supercell.copy()
structure.new_array("displacements", d)
structure.new_array("forces", f)
structures.append(structure)
# parse options
if options is None:
options_dict = {}
else:
options_dict = _decode_options(options)
# select cutoff
max_cutoff = estimate_maximum_cutoff(ase_supercell) - 1e-5
if "cutoff" in options_dict:
cutoff = options_dict["cutoff"]
if cutoff > max_cutoff:
raise ValueError(
"Cutoff {:.4f} is larger than maximum allowed "
"cutoff, {:.4f}, for the given supercell."
"\nDecrease cutoff or provide larger supercells.".format(
cutoff, max_cutoff
)
)
else:
cutoff = max_cutoff
# setup ClusterSpace
cutoffs = [cutoff]
cs = ClusterSpace(ase_prim, cutoffs, symprec=symprec)
cs.print_orbits()
sc = StructureContainer(cs)
for structure in structures:
sc.add_structure(structure)
n_rows, n_cols = sc.data_shape
if n_rows < n_cols:
raise ValueError(
"Fitting problem is under-determined."
"\nProvide more structures or decrease cutoff."
)
# Estimate error
opt = Optimizer(sc.get_fit_data(), train_size=0.75)
opt.train()
print(opt)
print("RMSE train : {:.4f}".format(opt.rmse_train))
print("RMSE test : {:.4f}".format(opt.rmse_test))
# Final train
opt = Optimizer(sc.get_fit_data(), train_size=1.0)
opt.train()
# get force constants
fcp = ForceConstantPotential(cs, opt.parameters)
fcs = fcp.get_force_constants(ase_supercell)
fc2 = fcs.get_fc_array(order=2)
return fc2
def _decode_options(options):
"""Parse options given in str.
When options = 'cutoff = 4.0', options is converted to {'cutoff': 4.0}.
In this implementation (can be modified), using phonopy command line
options, ``options`` is passed by --fc-calc-opt such as::
phonopy --hiphiveph --fc-calc-opt "cutoff = 4" ...
"""
option_dict = {}
for pair in options.split(","):
key, value = [x.strip() for x in pair.split("=")]
if key == "cutoff":
option_dict[key] = float(value)
return option_dict
|
atztogo/phonopy
|
phonopy/interface/hiphive_interface.py
|
Python
|
bsd-3-clause
| 7,020
|
[
"ASE",
"phonopy"
] |
d8ff69c8e3da13fbc6211acc6e5c9447f0362c65f3ae71a4d4c7efbb2530bff8
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageImport(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageImport(), 'Processing.',
(), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkImageImport.py
|
Python
|
bsd-3-clause
| 470
|
[
"VTK"
] |
43627b7d0558cfa77e3c2ccebc86298052af3e8e896688a753905f9b46dd4e1e
|
__author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import urllib
import copy
import itertools
import mask
import os
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = {}
self.cats = {}
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
print('Done (t=%0.2fs)'%(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns = {}
imgToAnns = {}
catToImgs = {}
cats = {}
imgs = {}
if 'annotations' in self.dataset:
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
if 'images' in self.dataset:
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('%s: %s'%(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
# this can be changed by defaultdict
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
if datasetType == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# res.dataset['info'] = copy.deepcopy(self.dataset['info'])
# res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print('Loading and preparing results... ')
tic = time.time()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
print('DONE (t=%0.2fs)'%(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print('downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic))
|
CivilNet/Gemfield
|
dockerfiles/py-faster-rcnn/files/gemfield/py-faster-rcnn/lib/pycocotools/coco.py
|
Python
|
gpl-3.0
| 14,891
|
[
"VisIt"
] |
6ee13da76ab022a328af1d1d1e61fe634e63627088bc9b77f7e80dcf9861604b
|
import sys
import unittest
sys.path.append("..")
from NeuralNet import *
class NeuronTest(unittest.TestCase):
def testWeights(self):
_weights = 10
Neuron = neuron(_weights)
self.assertEqual(len(Neuron.weights),_weights+1)
self.assertEqual(Neuron.numinputs,_weights+1)
self.assertNotEqual(Neuron.weights,None)
class neuronLayerTest(unittest.TestCase):
def testconstructor(self):
nrons = 10
inpts = 3
nlayer = neuronLayer(nrons,inpts)
self.assertEqual(nlayer.numNeurons,nrons)
self.assertEqual(len(nlayer.nhiddenlayer),nrons)
self.assertNotEqual(nlayer.nhiddenlayer,None)
def testhiddenlayer(self):
nrons = 10
inpts = 3
nlayer = neuronLayer(nrons,inpts)
for i in xrange(nrons):
self.assertTrue(nlayer.gethiddenlayer(i))
def testnumneurons(self):
nrons = 10
inpts = 3
nlayer = neuronLayer(nrons,inpts)
self.assertTrue(nlayer.getnumNeurons(),nrons)
class nnetTest(unittest.TestCase):
def testconstructor(self):
nettest = nnet()
self.assertEqual(nettest.numinputs,0)
self.assertEqual(nettest.numoutputs,0)
self.assertEqual(nettest.hiddenlayers,0)
self.assertEqual(nettest.neuronsperhidden,0)
#if not hidden layers, then nnet creates one output layer
self.assertEqual(len(nettest.neuronlayerlist),1)
self.assertEqual(nettest.bias,0.5)
def testgetweights(self):
nettest = nnet()
self.assertEqual(len(nettest.getWeights()),0)
if __name__ == '__main__':
unittest.main()
|
washt/NeuralNetPy
|
tests/nnetTest.py
|
Python
|
mit
| 1,445
|
[
"NEURON"
] |
f4fb1bf000eb1a7c35371e71e15207e495fe3cf548e6e571eac55c43c714dc1f
|
# Copyright (C) 2015-2022: The University of Edinburgh
# Authors: Craig Warren and Antonis Giannopoulos
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
import datetime
from importlib import import_module
import os
import pickle
import sys
from time import perf_counter
from colorama import init, Fore, Style
init()
import numpy as np
from gprMax.constants import floattype
from gprMax.exceptions import CmdInputError
from gprMax.gprMax import run_std_sim
from gprMax.gprMax import run_mpi_sim
from gprMax.utilities import get_terminal_width
from gprMax.utilities import open_path_file
def run_opt_sim(args, inputfile, usernamespace):
"""Run a simulation using Taguchi's optmisation process.
Args:
args (dict): Namespace with command line arguments
inputfile (object): File object for the input file.
usernamespace (dict): Namespace that can be accessed by user
in any Python code blocks in input file.
"""
tsimstart = perf_counter()
if args.n > 1:
raise CmdInputError('When a Taguchi optimisation is being carried out the number of model runs argument is not required')
inputfileparts = os.path.splitext(inputfile.name)
# Default maximum number of iterations of optimisation to perform (used
# if the stopping criterion is not achieved)
maxiterations = 20
# Process Taguchi code blocks in the input file; pass in ordered
# dictionary to hold parameters to optimise
tmp = usernamespace.copy()
tmp.update({'optparams': OrderedDict()})
taguchinamespace = taguchi_code_blocks(inputfile, tmp)
# Extract dictionaries and variables containing initialisation parameters
optparams = taguchinamespace['optparams']
fitness = taguchinamespace['fitness']
if 'maxiterations' in taguchinamespace:
maxiterations = taguchinamespace['maxiterations']
# Store initial parameter ranges
optparamsinit = list(optparams.items())
# Dictionary to hold history of optmised values of parameters
optparamshist = OrderedDict((key, list()) for key in optparams)
# Import specified fitness function
fitness_metric = getattr(import_module('user_libs.optimisation_taguchi.fitness_functions'), fitness['name'])
# Select OA
OA, N, cols, k, s, t = construct_OA(optparams)
taguchistr = '\n--- Taguchi optimisation'
print('{} {}\n'.format(taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))
print('Orthogonal array: {:g} experiments per iteration, {:g} parameters ({:g} will be used), {:g} levels, and strength {:g}'.format(N, cols, k, s, t))
tmp = [(k, v) for k, v in optparams.items()]
print('Parameters to optimise with ranges: {}'.format(str(tmp).strip('[]')))
print('Output name(s) from model: {}'.format(fitness['args']['outputs']))
print('Fitness function "{}" with stopping criterion {:g}'.format(fitness['name'], fitness['stop']))
print('Maximum iterations: {:g}'.format(maxiterations))
# Initialise arrays and lists to store parameters required throughout optimisation
# Lower, central, and upper values for each parameter
levels = np.zeros((s, k), dtype=floattype)
# Optimal lower, central, or upper value for each parameter
levelsopt = np.zeros(k, dtype=np.uint8)
# Difference used to set values for levels
levelsdiff = np.zeros(k, dtype=floattype)
# History of fitness values from each confirmation experiment
fitnessvalueshist = []
iteration = 0
while iteration < maxiterations:
# Reset number of model runs to number of experiments
args.n = N
usernamespace['number_model_runs'] = N
# Fitness values for each experiment
fitnessvalues = []
# Set parameter ranges and define experiments
optparams, levels, levelsdiff = calculate_ranges_experiments(optparams, optparamsinit, levels, levelsopt, levelsdiff, OA, N, k, s, iteration)
# Run model for each experiment
# Mixed mode MPI with OpenMP or CUDA - MPI task farm for models with
# each model parallelised with OpenMP (CPU) or CUDA (GPU)
if args.mpi:
run_mpi_sim(args, inputfile, usernamespace, optparams)
# Standard behaviour - models run serially with each model parallelised
# with OpenMP (CPU) or CUDA (GPU)
else:
run_std_sim(args, inputfile, usernamespace, optparams)
# Calculate fitness value for each experiment
for experiment in range(1, N + 1):
outputfile = inputfileparts[0] + str(experiment) + '.out'
fitnessvalues.append(fitness_metric(outputfile, fitness['args']))
os.remove(outputfile)
taguchistr = '\n--- Taguchi optimisation, iteration {}: {} initial experiments with fitness values {}.'.format(iteration + 1, N, fitnessvalues)
print('{} {}\n'.format(taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))
# Calculate optimal levels from fitness values by building a response
# table; update dictionary of parameters with optimal values
optparams, levelsopt = calculate_optimal_levels(optparams, levels, levelsopt, fitnessvalues, OA, N, k)
# Update dictionary with history of parameters with optimal values
for key, value in optparams.items():
optparamshist[key].append(value[0])
# Run a confirmation experiment with optimal values
args.n = 1
usernamespace['number_model_runs'] = 1
# Mixed mode MPI with OpenMP or CUDA - MPI task farm for models with
# each model parallelised with OpenMP (CPU) or CUDA (GPU)
if args.mpi:
run_mpi_sim(args, inputfile, usernamespace, optparams)
# Standard behaviour - models run serially with each model parallelised
# with OpenMP (CPU) or CUDA (GPU)
else:
run_std_sim(args, inputfile, usernamespace, optparams)
# Calculate fitness value for confirmation experiment
outputfile = inputfileparts[0] + '.out'
fitnessvalueshist.append(fitness_metric(outputfile, fitness['args']))
# Rename confirmation experiment output file so that it is retained for each iteraction
os.rename(outputfile, os.path.splitext(outputfile)[0] + '_final' + str(iteration + 1) + '.out')
taguchistr = '\n--- Taguchi optimisation, iteration {} completed. History of optimal parameter values {} and of fitness values {}'.format(iteration + 1, dict(optparamshist), fitnessvalueshist)
print('{} {}\n'.format(taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))
iteration += 1
# Stop optimisation if stopping criterion has been reached
if fitnessvalueshist[iteration - 1] > fitness['stop']:
taguchistr = '\n--- Taguchi optimisation stopped as fitness criteria reached: {:g} > {:g}'.format(fitnessvalueshist[iteration - 1], fitness['stop'])
print('{} {}\n'.format(taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))
break
# Stop optimisation if successive fitness values are within a percentage threshold
fitnessvaluesthres = 0.1
if iteration > 2:
fitnessvaluesclose = (np.abs(fitnessvalueshist[iteration - 2] - fitnessvalueshist[iteration - 1]) / fitnessvalueshist[iteration - 1]) * 100
if fitnessvaluesclose < fitnessvaluesthres:
taguchistr = '\n--- Taguchi optimisation stopped as successive fitness values within {}%'.format(fitnessvaluesthres)
print('{} {}\n'.format(taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))
break
tsimend = perf_counter()
# Save optimisation parameters history and fitness values history to file
opthistfile = inputfileparts[0] + '_hist.pickle'
with open(opthistfile, 'wb') as f:
pickle.dump(optparamshist, f)
pickle.dump(fitnessvalueshist, f)
pickle.dump(optparamsinit, f)
taguchistr = '\n=== Taguchi optimisation completed in [HH:MM:SS]: {} after {} iteration(s)'.format(datetime.timedelta(seconds=int(tsimend - tsimstart)), iteration)
print('{} {}\n'.format(taguchistr, '=' * (get_terminal_width() - 1 - len(taguchistr))))
print('History of optimal parameter values {} and of fitness values {}\n'.format(dict(optparamshist), fitnessvalueshist))
def taguchi_code_blocks(inputfile, taguchinamespace):
"""
Looks for and processes a Taguchi code block (containing Python code) in
the input file. It will ignore any lines that are comments, i.e. begin
with a double hash (##), and any blank lines.
Args:
inputfile (object): File object for the input file.
taguchinamespace (dict): Namespace that can be accessed by user a
Taguchi code block in input file.
Returns:
processedlines (list): Input commands after Python processing.
"""
# Strip out any newline characters and comments that must begin with double hashes
inputlines = [line.rstrip() for line in inputfile if(not line.startswith('##') and line.rstrip('\n'))]
# Rewind input file in preparation for passing to standard command reading function
inputfile.seek(0)
# Store length of dict
taglength = len(taguchinamespace)
x = 0
while(x < len(inputlines)):
if(inputlines[x].startswith('#taguchi:')):
# String to hold Python code to be executed
taguchicode = ''
x += 1
while not inputlines[x].startswith('#end_taguchi:'):
# Add all code in current code block to string
taguchicode += inputlines[x] + '\n'
x += 1
if x == len(inputlines):
raise CmdInputError('Cannot find the end of the Taguchi code block, i.e. missing #end_taguchi: command.')
# Compile code for faster execution
taguchicompiledcode = compile(taguchicode, '<string>', 'exec')
# Execute code block & make available only usernamespace
exec(taguchicompiledcode, taguchinamespace)
x += 1
# Check if any Taguchi code blocks were found
if len(taguchinamespace) == taglength:
raise CmdInputError('No #taguchi and #end_taguchi code blocks found.')
return taguchinamespace
def construct_OA(optparams):
"""
Load an orthogonal array (OA) from a numpy file. Configure and
return OA and properties of OA.
Args:
optparams (dict): Dictionary containing name of parameters to
optimise and their initial ranges
Returns:
OA (array): Orthogonal array
N (int): Number of experiments in OA
cols (int): Number of columns in OA
k (int): Number of columns in OA cut down to number of parameters to optimise
s (int): Number of levels in OA
t (int): Strength of OA
"""
oadirectory = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, 'user_libs', 'optimisation_taguchi')
oadirectory = os.path.abspath(oadirectory)
# Properties of the orthogonal array (OA)
# Strength
t = 2
# Number of levels
s = 3
# Number of parameters to optimise
k = len(optparams)
# Load the appropriate OA
if k <= 4:
OA = np.load(os.path.join(oadirectory, 'OA_9_4_3_2.npy'))
# Number of experiments
N = OA.shape[0]
# Number of columns of OA before cut down
cols = OA.shape[1]
# Cut down OA columns to number of parameters to optimise
OA = OA[:, 0:k]
elif k <= 7:
OA = np.load(os.path.join(oadirectory, 'OA_18_7_3_2.npy'))
# Number of experiments
N = OA.shape[0]
# Number of columns of OA before cut down
cols = OA.shape[1]
# Cut down OA columns to number of parameters to optimise
OA = OA[:, 0:k]
else:
# THIS CASE NEEDS FURTHER TESTING
print(Fore.RED + 'WARNING: Optimising more than 7 parameters is currently an experimental feature!' + Style.RESET_ALL)
p = int(np.ceil(np.log(k * (s - 1) + 1) / np.log(s)))
# Number of experiments
N = s**p
# Number of columns
cols = int((N - 1) / (s - 1))
# Algorithm to construct OA from:
# http://ieeexplore.ieee.org/xpl/articleDetails.jsp?reload=true&arnumber=6812898
OA = np.zeros((N + 1, cols + 1), dtype=np.int8)
# Construct basic columns
for ii in range(1, p + 1):
col = int((s**(ii - 1) - 1) / (s - 1) + 1)
for row in range(1, N + 1):
OA[row, col] = np.mod(np.floor((row - 1) / (s**(p - ii))), s)
# Construct non-basic columns
for ii in range(2, p + 1):
col = int((s**(ii - 1) - 1) / (s - 1) + 1)
for jj in range(1, col):
for kk in range(1, s):
OA[:, col + (jj - 1) * (s - 1) + kk] = np.mod(OA[:, jj] * kk + OA[:, col], s)
# First row and first columns are unneccessary, only there to
# match algorithm, and cut down columns to number of parameters to optimise
OA = OA[1:, 1:k + 1]
return OA, N, cols, k, s, t
def calculate_ranges_experiments(optparams, optparamsinit, levels, levelsopt, levelsdiff, OA, N, k, s, i):
"""Calculate values for parameters to optimise for a set of experiments.
Args:
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
optparamsinit (list): Initial ranges for parameters to optimise
levels (array): Lower, central, and upper values for each parameter
levelsopt (array): Optimal level for each parameter from previous iteration
levelsdiff (array): Difference used to set values in levels array
OA (array): Orthogonal array
N (int): Number of experiments in OA
k (int): Number of parameters to optimise in OA
s (int): Number of levels in OA
i (int): Iteration number
Returns:
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
levels (array): Lower, central, and upper values for each parameter
levelsdiff (array): Difference used to set values in levels array
"""
# Gaussian reduction function used for calculating levels
T = 18 # Usually values between 15 - 20
RR = np.exp(-(i / T)**2)
# Calculate levels for each parameter
for p in range(k):
# Set central level for first iteration to midpoint of initial range and don't use RR
if i == 0:
levels[1, p] = ((optparamsinit[p][1][1] - optparamsinit[p][1][0]) / 2) + optparamsinit[p][1][0]
levelsdiff[p] = (optparamsinit[p][1][1] - optparamsinit[p][1][0]) / (s + 1)
# Set central level to optimum from previous iteration
else:
levels[1, p] = levels[levelsopt[p], p]
levelsdiff[p] = RR * levelsdiff[p]
# Set levels if below initial range
if levels[1, p] - levelsdiff[p] < optparamsinit[p][1][0]:
levels[0, p] = optparamsinit[p][1][0]
levels[1, p] = optparamsinit[p][1][0] + levelsdiff[p]
levels[2, p] = optparamsinit[p][1][0] + 2 * levelsdiff[p]
# Set levels if above initial range
elif levels[1, p] + levelsdiff[p] > optparamsinit[p][1][1]:
levels[0, p] = optparamsinit[p][1][1] - 2 * levelsdiff[p]
levels[1, p] = optparamsinit[p][1][1] - levelsdiff[p]
levels[2, p] = optparamsinit[p][1][1]
# Set levels normally
else:
levels[0, p] = levels[1, p] - levelsdiff[p]
levels[2, p] = levels[1, p] + levelsdiff[p]
# Update dictionary of parameters to optimise with lists of new values; clear dictionary first
optparams = OrderedDict((key, list()) for key in optparams)
p = 0
for key, value in optparams.items():
for exp in range(N):
if OA[exp, p] == 0:
optparams[key].append(levels[0, p])
elif OA[exp, p] == 1:
optparams[key].append(levels[1, p])
elif OA[exp, p] == 2:
optparams[key].append(levels[2, p])
p += 1
return optparams, levels, levelsdiff
def calculate_optimal_levels(optparams, levels, levelsopt, fitnessvalues, OA, N, k):
"""Calculate optimal levels from results of fitness metric by building a response table.
Args:
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
levels (array): Lower, central, and upper values for each parameter
levelsopt (array): Optimal level for each parameter from previous iteration
fitnessvalues (list): Values from results of fitness metric
OA (array): Orthogonal array
N (int): Number of experiments in OA
k (int): Number of parameters to optimise in OA
Returns:
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
levelsopt (array): Optimal level for each parameter from previous iteration
"""
# Build a table of responses based on the results of the fitness metric
for p in range(k):
responses = np.zeros(3, dtype=floattype)
cnt1 = 0
cnt2 = 0
cnt3 = 0
for exp in range(N):
if OA[exp, p] == 0:
responses[0] += fitnessvalues[exp]
cnt1 += 1
elif OA[exp, p] == 1:
responses[1] += fitnessvalues[exp]
cnt2 += 1
elif OA[exp, p] == 2:
responses[2] += fitnessvalues[exp]
cnt3 += 1
responses[0] /= cnt1
responses[1] /= cnt2
responses[2] /= cnt3
# Calculate optimal level from table of responses
optlevel = np.where(responses == np.amax(responses))[0]
# If 2 experiments produce the same fitness value pick first level
# (this shouldn't happen if the fitness function is designed correctly)
if len(optlevel) > 1:
optlevel = optlevel[0]
levelsopt[p] = optlevel
# Update dictionary of parameters to optimise with lists of new values; clear dictionary first
optparams = OrderedDict((key, list()) for key in optparams)
p = 0
for key, value in optparams.items():
optparams[key].append(levels[levelsopt[p], p])
p += 1
return optparams, levelsopt
def plot_optimisation_history(fitnessvalueshist, optparamshist, optparamsinit):
"""Plot the history of fitness values and each optimised parameter values for the optimisation.
Args:
fitnessvalueshist (list): History of fitness values
optparamshist (dict): Name of parameters to optimise and history of their values
"""
import matplotlib.pyplot as plt
# Plot history of fitness values
fig, ax = plt.subplots(subplot_kw=dict(xlabel='Iterations', ylabel='Fitness value'), num='History of fitness values', figsize=(20, 10), facecolor='w', edgecolor='w')
iterations = np.arange(1, len(fitnessvalueshist) + 1)
ax.plot(iterations, fitnessvalueshist, 'r', marker='.', ms=15, lw=1)
ax.set_xlim(1, len(fitnessvalueshist))
ax.grid()
# Plot history of optimisation parameters
p = 0
for key, value in optparamshist.items():
fig, ax = plt.subplots(subplot_kw=dict(xlabel='Iterations', ylabel='Parameter value'), num='History of ' + key + ' parameter', figsize=(20, 10), facecolor='w', edgecolor='w')
ax.plot(iterations, optparamshist[key], 'r', marker='.', ms=15, lw=1)
ax.set_xlim(1, len(fitnessvalueshist))
ax.set_ylim(optparamsinit[p][1][0], optparamsinit[p][1][1])
ax.grid()
p += 1
plt.show()
|
gprMax/gprMax
|
gprMax/optimisation_taguchi.py
|
Python
|
gpl-3.0
| 20,476
|
[
"Gaussian"
] |
90bd868a111fdbd358d393eeee665ed24de583b3aa8d94b59365a334e869ab15
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pyscf import lib
from pyscf.pbc import gto, scf
from pyscf.pbc.tools import k2gamma
cell = gto.Cell()
cell.a = '''
1.755000 1.755000 -1.755000
1.755000 -1.755000 1.755000
-1.755000 1.755000 1.755000'''
cell.atom = '''Li 0.00000 0.00000 0.00000'''
#same type of basis for different elements
cell.basis = 'gth-szv'
cell.pseudo = {'Li': 'GTH-PBE-q3'}
cell.mesh = [20]*3
cell.verbose = 6
cell.output = '/dev/null'
cell.build()
kpts = cell.make_kpts([2,2,2])
mf = scf.KUKS(cell, kpts)
mf.xc = 'lda,vwn'
mf.kernel()
def tearDownModule():
global cell, mf
del cell, mf
class KnownValues(unittest.TestCase):
def test_k2gamma(self):
popa, popb = mf.mulliken_meta()[0]
self.assertAlmostEqual(lib.finger(popa).sum(), 1.5403023058, 7)
self.assertAlmostEqual(lib.finger(popb).sum(), 1.5403023058, 7)
popa, popb = k2gamma.k2gamma(mf).mulliken_meta()[0]
self.assertAlmostEqual(lib.finger(popa), 0.8007278745, 7)
self.assertAlmostEqual(lib.finger(popb), 0.8007278745, 7)
if __name__ == '__main__':
print("Full Tests for pbc.tools.k2gamma")
unittest.main()
|
gkc1000/pyscf
|
pyscf/pbc/tools/test/test_k2gamma.py
|
Python
|
apache-2.0
| 1,797
|
[
"PySCF"
] |
da030a092e00136fed42aaf512f1f97c388259ebcfc8a17bb82a1f74c2fe2561
|
# -*- coding: UTF-8 -*-
import numpy as np
from numpy.fft import fft
class fgn( object ) :
"""A class to generate fractional Gaussian process of fixed length using
a circulant matrix method suggested by Dietrich and Newsam (1997). For the
best performance N-1 should be a power of two."""
## For better performance N-1 should be a power of two.
def __init__( self, N, H = .5, sigma = 1.0, cache = None ) :
## The circulant embedding method actually generates a pair of independent long-range
## dependent processes.
self.__cache = list( ) if cache is None else cache
## Remember the sample size
self.__N = N
## Setup a local rng
self.__np_rand = None
## Save other parameters
self.__H, self.__sigma = H, sigma
## Reset the internal state of the generator
def reset( self ) :
del self.__cache[:]
## A separate procedure for lazy initialization.
def initialize( self, numpy_random_state ) :
## Reset the internal state and initialize the random number generator
self.reset( )
self.__np_rand = numpy_random_state
## Preallocate arrays
self.__acf_ft = np.empty( 2 * self.__N - 2, dtype = np.float64 )
self.__cplx_output = np.empty( 2 * self.__N - 2, dtype = np.complex128 )
self.__cplx_input = np.empty( 2 * self.__N - 2, dtype = np.complex128 )
## The autocorrelation structure for the fBM is constant provided the Hurst exponent
## and the size sample are fixed. "Synthese de la covariance du fGn", Synthesise
## the covariance of the fractional Gaussian noise. This autocorrelation function
## models long range (epochal) dependence.
R = np.arange( self.__N, dtype = np.float64 )
## The noise autocorrelation structure is directly derivable from the autocorrelation
## of the time-continuous fBM:
## r(s,t) = .5 * ( |s|^{2H}+|t|^{2H}-|s-t|^{2H} )
## If the noise is generated for an equally spaced. sampling of an fBM like process,
## then the autocorrelation function must be multiplied by ∆^{2H}. Since Fourier
## Transform is linear (even the discrete one), this routine can just generate a unit
## variance fractional Gaussian noise.
R = self.__sigma * self.__sigma * .5 * (
np.abs( R - 1 ) ** ( 2.0 * self.__H )
+ np.abs( R + 1 ) ** ( 2.0 * self.__H )
- 2 * np.abs( R ) ** ( 2.0 * self.__H ) )
## Generate the first row of the 2Mx2M Toeplitz matrix, where 2M = N + N-2: it should
## be [ r_0, ..., r_{N-1}, r_{N-2}, ..., r_1 ]
self.__cplx_input[:] = np.append( R, R[::-1][1:-1] ) + 1j * 0
del R
## The circulant matrix, defined by the autocorrelation structure above is necessarily
## positive definite, which is equivalent to the FFT of any its row being non-negative.
self.__cplx_output[:] = fft( self.__cplx_input )
## Due to numerical round-off errors we truncate close to zero negative real Fourier
## coefficients.
self.__acf_ft[:] = np.sqrt( np.maximum( np.real( self.__cplx_output ), 0.0 ) / ( 2 * self.__N - 2 ) )
## fGn generator via circulant embedding method
def __gen( self ) :
## Basically the idea is to utilize the convolution property of the Fourier Transform
## and multiply the transform of the autocorrelation function by the independent
## Gaussian white noise in the frequency domain and then get back to the time domain.
## cf. \url{ http://www.thefouriertransform.com/transform/properties.php }
## Begin with generation of the Gaussian white noise with unit variance and zero mean.
self.__cplx_input[:] = self.__np_rand.randn( 2 * self.__N - 2 ) + 1j * self.__np_rand.randn( 2 * self.__N - 2 )
## Compute the convolution of the circulant row (of autocorrelations) with the noise.
self.__cplx_input *= self.__acf_ft
## "%% ATTENTION: ne pas utiliser ifft, qui utilise une normalisation differente"
## Compute this (see p.~1091 [Dietrich, Newsam; 1997]) :
## F \times (\frac{1}{2M}\Lambda)^\frac{1}{2} \times w
self.__cplx_output[:] = fft( self.__cplx_input )
## [Dietrich, Newsam; 1997] write : "In our case the real and imaginary parts of any N
## consecutive entries yield two independent realizations of \mathcal{N}_N(0,R) where
## $R$ is the autocorrelation structure of an fBM."
## Therefore take the first N complex draws to get a pair of independent realizations.
return ( np.real( self.__cplx_output[ :self.__N ] ), np.imag( self.__cplx_output[ :self.__N ] ) )
def __del__( self ) :
self.reset( )
## A visbile function, to generate the sample
def __call__( self ) :
## Generate the next sample only if needed.
if not self.__cache :
self.__cache.extend( self.__gen( ) )
## Return a pregenerated sample
return self.__cache.pop( )
|
ivannz/study_notes
|
year_14_15/course_project/release/processes/fgn_numpy.py
|
Python
|
mit
| 4,560
|
[
"Gaussian"
] |
192c7419e52a8e9218bb86012fc9363415663253a94e7832840c4a3c1efdfcbe
|
# $Id$
#
# Copyright (C) 2004-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" unit testing code for MOE-type descriptors with EStates
"""
import os
import unittest
from rdkit import Chem
from rdkit.Chem.EState import EState_VSA
class TestCase(unittest.TestCase):
@staticmethod
def referenceData():
filename = os.sep.join(
[os.path.dirname(os.path.abspath(__file__)), 'test_data', 'EState_VSA.csv'])
with open(filename) as fin:
header = fin.readline()
header = [s.strip() for s in header.split(',')][1:]
funcEstates = dict((k, getattr(EState_VSA, k)) for k in header)
yield funcEstates
for line in fin:
line = [s.strip() for s in line.split(',')]
smiles = line.pop(0)
mol = Chem.MolFromSmiles(smiles)
data = dict((k, float(v)) for k, v in zip(header, line))
yield smiles, mol, data
def test1(self):
referenceData = self.referenceData()
funcEstates = next(referenceData)
for smiles, mol, data in referenceData:
for name in funcEstates:
calc = funcEstates[name](mol)
exp = data[name]
self.assertAlmostEqual(calc, exp, delta=1e-4,
msg='{0}: {1:.4f}!={2:.4f}'.format(smiles, calc, exp))
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
bp-kelley/rdkit
|
rdkit/Chem/EState/UnitTestVSA.py
|
Python
|
bsd-3-clause
| 1,529
|
[
"MOE",
"RDKit"
] |
174e5271a22811e5e9f02075bc9733561e4b03867fdedd898eb2f69737786d7c
|
#!/usr/bin/env python
"""Copyright 2008 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.4'
__date__ = 'May 02 2008'
#Basic imports
from ctypes import *
import sys
#Phidget specific imports
from Phidgets.PhidgetException import *
from Phidgets.Events.Events import *
from Phidgets.Devices.LED import *
from time import sleep
#Create an LED object
led = LED()
#Information Display Function
def displayDeviceInfo():
print "|------------|----------------------------------|--------------|------------|"
print "|- Attached -|- Type -|- Serial No. -|- Version -|"
print "|------------|----------------------------------|--------------|------------|"
print "|- %8s -|- %30s -|- %10d -|- %8d -|" % (led.isAttached(), led.getDeviceType(), led.getSerialNum(), led.getDeviceVersion())
print "|------------|----------------------------------|--------------|------------|"
return 0
#Event Handler Callback Functions
def ledAttached(e):
attached = e.device
print "LED %i Attached!" % (attached.getSerialNum())
return 0
def ledDetached(e):
detached = e.device
print "LED %i Detached!" % (detached.getSerialNum())
return 0
def ledError(e):
print "Phidget Error %i: %s" % (e.eCode, e.description)
return 0
#Main Program Code
try:
led.setOnAttachHandler(ledAttached)
led.setOnDetachHandler(ledDetached)
led.setOnErrorhandler(ledError)
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
exit(1)
print "Opening phidget object..."
try:
led.openPhidget()
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
exit(1)
print "Waiting for attach...."
try:
led.waitForAttach(10000)
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
try:
led.closePhidget()
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
exit(1)
exit(1)
else:
displayDeviceInfo()
#This example assumes that there are LED's plugged into locations 0-7
print "Turning on LED's 0 - 9"
for i in range(8):
if i < 7:
print i,
else:
print i
led.setDiscreteLED(i, 100)
sleep(1)
print "Turning off LED's 0 - 9"
for i in range(8):
if i < 7:
print i,
else:
print i
led.setDiscreteLED(i, 0)
sleep(1)
print "Press Enter to quit...."
chr = sys.stdin.read(1)
print "Closing..."
try:
led.closePhidget()
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
print "Exiting..."
exit(1)
print "Done."
exit(0)
|
jantman/tuxostat
|
fs_backup/home/tuxostat/devel/Python/LED-simple.py
|
Python
|
gpl-3.0
| 2,952
|
[
"VisIt"
] |
c6a5cb35fcf9a829a85d3b79571e514fcb764fe42ef5ff0374745b727f4b3de2
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to absolute (and therefore normalized paths).
path = os.path.abspath(path)
relative_to = os.path.abspath(relative_to)
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer:
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (mac|sun|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
prefix = { 'solaris': 'sun', 'mac': 'mac', 'win': 'win' }.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
|
lianliuwei/gyp
|
pylib/gyp/common.py
|
Python
|
bsd-3-clause
| 15,816
|
[
"VisIt"
] |
757cf8bdd0ba5156fb28d49f06961e189de692f7afc9f27b41ccd39abdbbdc0e
|
#!/usr/bin/env python
'A model of the earth and its magnetic field, with earth center as origin.'
from __future__ import division # avoid integer division
import numpy as np
import mayavi
from mayavi.mlab import *
def makeRcoordinates(x,y,z):
'''Makes r length and r^hat from coordinates'''
r = np.sqrt(x**2 + y**2 + z**2)
x_hat = x / r
y_hat = y / r
z_hat = z / r
return r, x_hat, y_hat, z_hat
def calcMdotRhat(m, x_hat, y_hat, z_hat):
'''Calculates m dot r^hat'''
mr = m[0]*x_hat + m[1]*y_hat + m[2]*z_hat
return mr
def calcBfield(r, x_hat, y_hat, z_hat, m, mr):
'''Calculate B-field from r^-3*(3*(m dot r^hat)*r^hat - m)'''
bx = r**-3*(3*mr*x_hat-m[0])
by = r**-3*(3*mr*y_hat-m[1])
bz = r**-3*(3*mr*z_hat-m[2])
# where r=0, B=0 (origin)
bx[np.isnan(bx)] = 0
by[np.isnan(by)] = 0
bz[np.isnan(bz)] = 0
return bx, by, bz
# Constants
earthRadius = 10 # outer core about 5e3km below surface -> ratio m/r~0.2
m = np.array([-2*np.sin(13./180*np.pi),0,2*np.cos(13./180*np.pi)]) # rot tilt ~23deg, mag tilt ~10deg from rot -> ~13deg from z-axis
# Create grid
n = 13 # 0.5x number of steps
steps = earthRadius / n
x,y,z = np.mgrid[-2*earthRadius:2*earthRadius:steps,-2*earthRadius:2*earthRadius:steps,-2*earthRadius:2*earthRadius:steps]
r,x_hat,y_hat,z_hat = makeRcoordinates(x,y,z)
# create earth
theta, phi = np.mgrid[0:np.pi:11j, 0:np.pi*2:21j]
ex = earthRadius * np.sin(theta) * np.cos(phi)
ey = earthRadius * np.sin(theta) * np.sin(phi)
ez = earthRadius * np.cos(theta)
# Calculate B-field
mr = calcMdotRhat(m, x_hat, y_hat, z_hat)
bx, by, bz = calcBfield(r, x_hat, y_hat, z_hat, m, mr)
# Remove data no longer in use
del m, mr, x_hat, y_hat, z_hat
# Plot
fig = figure(size=(720,720))
# B-field
streamline = flow(x, y, z, bx, by, bz)
streamline.stream_tracer.start_position = np.array([ 0., 0., 0.])
streamline.stream_tracer.progress = 1.0
streamline.stream_tracer.integration_direction = 'both'
# earth
mesh(ex, ey, ez, color=(0, 0, 0))
# viewing
fig.scene.background = (1,1,1) # white background
fig.scene.y_plus_view() # see from Y-axis
fig.scene.camera.roll(90) # roll north to point upwards
fig.scene.show_axes = True
fig.scene.camera.zoom(1.3)
# prevent segfautl (malloc too large) on osx
vectors = fig.children[0].children[0].children[0]
vectors.glyph.mask_points.maximum_number_of_points = 1800
# make pretier
vectors.glyph.glyph.scaling = True
vectors.glyph.glyph.range = np.array([ 1.00000000e-05, 1.00000000e-03])
vectors.glyph.glyph.scale_factor = 3.0
vectors.glyph.mask_points.proportional_maximum_number_of_points = True
vectors.glyph.mask_points.generate_vertices = True
vectors.glyph.mask_input_points = True
# save pictures for animation, dont run at default
def createAnimation():
for i in range(360):
fig.scene.camera.azimuth(1)
filename = `i` + '.png'
savefig(filename, size=(720,720))
def createAnimation2():
for i in range(360,720):
fig.scene.camera.elevation(1)
fig.scene.camera.orthogonalize_view_up() # http://public.kitware.com/pipermail/vtkusers/2003-July/018794.html
filename = `i` + '.png'
savefig(filename, size=(720,720))
|
arve0/TFY4240-Semester-project
|
src/mag_field_flow.py
|
Python
|
mit
| 3,215
|
[
"Mayavi"
] |
7b832762db67e19503a8114f591a51f29eaa2791df12a0732dfb408f38bd55b2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import sys
import os
from rogerthat import translations
from rogerthat.utils import get_full_language_string
import mc_unittest
from mcfw.properties import azzert
from rogerthat.consts import OFFICIALLY_SUPPORTED_LANGUAGES
from rogerthat.utils.languages import OFFICIALLY_SUPPORTED_ISO_LANGUAGES, WEB_TO_ISO_LANGUAGES
PATH_END = os.path.join('src-test', 'rogerthat_tests', 'sourcechecks')
class Test(mc_unittest.TestCase):
def get_src_dir(self):
src_is_ok = os.path.split(__file__)[0].endswith(PATH_END)
self.assert_(src_is_ok, 'Wrong path: %s' % os.path.split(__file__)[0])
src_dir = os.path.normpath(os.path.join(__file__, '..', '..', '..', '..', 'src'))
self.assert_(os.path.isdir(src_dir), "Couldn't find src dir - tried: %s" % src_dir)
return src_dir
def test_get_server_settings(self):
src_dir = self.get_src_dir()
for root, _, files in os.walk(src_dir):
for pfile in (f for f in files if f.endswith('.py')):
filen_name = os.path.join(root, pfile)
with open(filen_name, 'r') as fin:
for i, line in enumerate(fin.readlines()):
self.assert_(not ('get_server_settings()' in line and line[:4] == " " and line[:4] != "def "),
"Do not use get_server_settings at module level in %s at line %s!" % (filen_name,
i + 1))
def test_python_baseclasses(self):
if sys.platform == "win32":
return
import ast
testself = self
class MyVisitor(ast.NodeVisitor):
def visit_ClassDef(self, node):
baseclasses = []
for base in node.bases:
if hasattr(base, 'id'):
parentclassname = base.id
elif hasattr(base, 'value') and hasattr(base.value, 'id') and hasattr(base, 'attr'):
parentclassname = base.value.id + '.' + base.attr
testself.assert_(parentclassname, "Could not interprate base class for node %s\nfile=%s" % (ast.dump(node), self.current_python_file))
baseclasses.append(parentclassname)
testself.assert_(len(baseclasses) > 0, "Could not find baseclasses for node %s\nfile=%s" % (ast.dump(node), self.current_python_file))
testself.assert_('CachedModelMixIn' not in baseclasses or baseclasses[0] == 'CachedModelMixIn', 'CachedModelMixIn MUST be first parent class!!\n--> class %s\n file %s' % (node.name, self.current_python_file))
ast.NodeVisitor.generic_visit(self, node)
m = MyVisitor()
src_dir = self.get_src_dir()
filenames = os.popen('find "%s" -type d -name lib -prune -o -name "*.py"' % src_dir).read().splitlines()
self.assert_(len(filenames) > 0, "Error: couldn't find src files.\nIs the dir correct: %s" % src_dir)
for filename in filenames:
if filename.endswith('rogerthat/bizz/service/mfd/gen.py') or 'lib' in filename:
continue
f = open(filename, 'r')
body = f.read()
f.close()
m.current_python_file = filename
try:
m.visit(ast.parse(body))
except:
print filename
raise
def test_language_dicts(self):
for D in (OFFICIALLY_SUPPORTED_LANGUAGES, WEB_TO_ISO_LANGUAGES, OFFICIALLY_SUPPORTED_ISO_LANGUAGES):
for (a, b) in D.iteritems():
self.assertEqual(unicode, type(a))
self.assertEqual(unicode, type(b))
def test_full_languages(self):
for s in translations.SUPPORTED_LANGUAGES:
azzert(s in OFFICIALLY_SUPPORTED_ISO_LANGUAGES)
azzert(get_full_language_string(s))
|
rogerthat-platform/rogerthat-backend
|
src-test/rogerthat_tests/sourcechecks/test_sourcetest.py
|
Python
|
apache-2.0
| 4,576
|
[
"VisIt"
] |
fb896d70be887c2c0d4e7be19965f5fb466e6670218f35fe5ca7fc2ffc2e2afb
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import unittest
import shutil
import time
import mooseutils
import chigger
class TestExodusReader(unittest.TestCase):
"""
Test use of MooseDataFrame for loading/reloading csv files.
"""
@classmethod
def setUpClass(cls):
"""
Copy test files.
"""
cls.single = "{}_single.e".format(cls.__name__)
shutil.copyfile(os.path.abspath('../input/mug_blocks_out.e'), cls.single)
cls.vector = "{}_vector.e".format(cls.__name__)
shutil.copyfile(os.path.abspath('../input/vector_out.e'), cls.vector)
cls.multiple = "{}_multiple".format(cls.__name__)
cls.testfiles = chigger.utils.copy_adaptive_exodus_test_files(cls.multiple)
cls.multiple += '.e'
@classmethod
def tearDownClass(cls):
"""
Remove test files.
"""
for fname in cls.testfiles:
if os.path.exists(fname):
os.remove(fname)
if os.path.exists(cls.single):
os.remove(cls.single)
if os.path.exists(cls.vector):
os.remove(cls.vector)
def testSingle(self):
"""
Test reading of a single Exodus file.
"""
reader = chigger.exodus.ExodusReader(self.single)
reader.update()
# Times
times = reader.getTimes()
self.assertEqual(len(times), 21)
self.assertEqual(times[0], 0)
self.assertAlmostEqual(times[-1], 2)
# Current Time
reader.setOptions(timestep=None, time=1.01)
if reader.needsUpdate():
reader.update()
tdata = reader.getTimeData()
self.assertAlmostEqual(tdata.time, 1)
self.assertEqual(tdata.timestep, 10)
self.assertEqual(tdata.index, 10)
self.assertEqual(tdata.filename, self.single)
# Blocks
blockinfo = reader.getBlockInformation()
self.assertEqual(list(blockinfo[reader.BLOCK].keys()), ['1', '76'])
self.assertEqual(list(blockinfo[reader.NODESET].keys()), ['1', '2'])
self.assertEqual(list(blockinfo[reader.SIDESET].keys()), ['1', '2'])
self.assertEqual(blockinfo[reader.SIDESET]['2'].name, 'top')
self.assertEqual(blockinfo[reader.SIDESET]['2'].object_type, 3)
self.assertEqual(blockinfo[reader.SIDESET]['2'].object_index, 1)
self.assertEqual(blockinfo[reader.SIDESET]['2'].multiblock_index, 9)
# Variable Info
varinfo = reader.getVariableInformation()
self.assertEqual(list(varinfo.keys()), ['aux_elem', 'convected', 'diffused', 'func_pp'])
# Elemental Variables
elemental = reader.getVariableInformation(var_types=[reader.ELEMENTAL])
self.assertEqual(list(elemental.keys()), ['aux_elem'])
self.assertEqual(elemental['aux_elem'].num_components, 1)
# Nodal Variables
elemental = reader.getVariableInformation(var_types=[reader.NODAL])
self.assertEqual(list(elemental.keys()), ['convected', 'diffused'])
self.assertEqual(elemental['diffused'].num_components, 1)
# Global Variables
gvars = reader.getVariableInformation(var_types=[reader.GLOBAL])
self.assertEqual(list(gvars.keys()), ['func_pp'])
self.assertEqual(gvars['func_pp'].num_components, 1)
def testSingleFieldData(self):
"""
Test that field data can be accessed.
"""
reader = chigger.exodus.ExodusReader(self.single, variables=['func_pp'])
for i, r in enumerate(range(0,21,2)):
reader.update(timestep=i)
self.assertAlmostEqual(reader.getGlobalData('func_pp'), r/10.)
def testVector(self):
"""
Test that vector data can be read.
"""
reader = chigger.exodus.ExodusReader(self.vector)
reader.update()
variables = reader.getVariableInformation()
self.assertEqual(list(variables.keys()), ['u', 'vel_'])
self.assertEqual(variables['vel_'].num_components, 2)
def testAdaptivity(self):
"""
Test that adaptive timestep files load correctly.
"""
reader = chigger.exodus.ExodusReader(self.multiple)
reader.update()
# Times
self.assertEqual(reader.getTimes(), [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5])
# Time
reader.setOptions(timestep=None, time=1.01)
if reader.needsUpdate():
reader.update()
tdata = reader.getTimeData()
self.assertAlmostEqual(tdata.time, 1)
self.assertEqual(tdata.timestep, 2)
self.assertEqual(tdata.index, 0)
self.assertEqual(tdata.filename, self.multiple + '-s002')
# Wait and the "update" the first few files
time.sleep(1.5)
for i in range(6):
mooseutils.touch(self.testfiles[i])
reader.setOptions(time=None, timestep=-1)
if reader.needsUpdate():
reader.update()
tdata = reader.getTimeData()
self.assertEqual(reader.getTimes(), [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
self.assertAlmostEqual(tdata.time, 3.0)
self.assertEqual(tdata.timestep, 6)
self.assertEqual(tdata.index, 0)
self.assertEqual(tdata.filename, self.multiple + '-s006')
def testExceptions(self):
"""
Test for error messages.
"""
# Invalid filename
with self.assertRaisesRegexp(IOError, 'The file foo.e is not a valid filename.'):
chigger.exodus.ExodusReader('foo.e')
reader = chigger.exodus.ExodusReader(self.single, variables=['convected', 'func_pp'])
with self.assertRaisesRegexp(mooseutils.MooseException, 'The variable "convected" must be a global variable.'):
reader.getGlobalData('convected')
def testReload(self):
"""
Test the file reloading is working.
"""
filenames = ['../input/diffusion_1.e', '../input/diffusion_2.e']
common = 'common.e'
shutil.copy(filenames[0], common)
reader = chigger.exodus.ExodusReader(common)
reader.update()
self.assertEqual(reader.getVTKReader().GetNumberOfTimeSteps(), 2)
shutil.copy(filenames[1], common)
reader.update()
self.assertEqual(reader.getVTKReader().GetNumberOfTimeSteps(), 3)
shutil.copy(filenames[0], common)
reader.update()
self.assertEqual(reader.getVTKReader().GetNumberOfTimeSteps(), 2)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
|
nuclear-wizard/moose
|
python/chigger/tests/exodus/test_ExodusReader.py
|
Python
|
lgpl-2.1
| 6,862
|
[
"MOOSE"
] |
70bd3260a5edc942d966e17cdf2980d3d8a6116fddaa2b64415d03da29ba65fe
|
from schema_transformer.helpers import compose, single_result
TEST_XML_DOC = b'''
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcq="http://purl.org/dc/terms/">
<records count="97" morepages="true" start="1" end="10">
<record rownumber="1">
<dc:title>Test</dc:title>
<dc:creator>
Raveh-Sadka, Tali; Thomas, Brian C; Singh, Andrea; Firek, Brian; Brooks,
Brandon; Castelle, Cindy J; Sharon, Itai; Baker, Robyn; Good, Misty; Morowitz,
Michael J; Banfield, Jillian F
</dc:creator>
<dc:subject/>
<dc:subjectRelated/>
<dc:description/>
<dcq:publisher>
eLife Sciences Publications, Ltd.
</dcq:publisher>
<dcq:publisherAvailability/>
<dcq:publisherResearch>
None
</dcq:publisherResearch>
<dcq:publisherSponsor>
USDOE
</dcq:publisherSponsor>
<dcq:publisherCountry>
Country unknown/Code not available
</dcq:publisherCountry>
<dc:date>
2015-03-03
</dc:date>
<dc:language>
English
</dc:language>
<dc:type>
Journal Article
</dc:type>
<dcq:typeQualifier/>
<dc:relation>
Journal Name: eLife; Journal Volume: 4
</dc:relation>
<dc:coverage/>
<dc:format>
Medium: X
</dc:format>
<dc:identifier>
OSTI ID: 1171761, Legacy ID: OSTI ID: 1171761
</dc:identifier>
<dc:identifierReport>
None
</dc:identifierReport>
<dcq:identifierDOEcontract>
5R01AI092531; Long term fellowship; SC0004918; ER65561; APSF-2012-10-05
</dcq:identifierDOEcontract>
<dc:identifierOther>Journal ID: ISSN 2050-084X</dc:identifierOther>
<dc:doi>10.7554/eLife.05477</dc:doi><dc:rights/>
<dc:dateEntry>2015-03-05</dc:dateEntry>
<dc:dateAdded>2015-03-05</dc:dateAdded>
<dc:ostiId>1171761</dc:ostiId>
<dcq:identifier-purl type=""/>
<dcq:identifier-citation>
http://www.osti.gov/pages/biblio/1171761
</dcq:identifier-citation>
</record>
</records>
</rdf:RDF>
'''
TEST_SCHEMA = {
"description": ('//dc:description/node()', compose(lambda x: x.strip(), single_result)),
"contributors": ('//dc:creator/node()', compose(lambda x: x.split(';'), single_result)),
"title": ("//dc:title/node()", lambda x: "Title overwritten"),
"providerUpdatedDateTime": ('//dc:dateEntry/node()', single_result),
"uris": {
"canonicalUri": ('//dcq:identifier-citation/node()', compose(lambda x: x.strip(), single_result)),
"objectUris": [('//dc:doi/node()', compose(lambda x: 'http://dx.doi.org/' + x, single_result))]
},
"languages": ("//dc:language/text()", single_result),
"publisher": {
"name": ("//dcq:publisher/node()", single_result)
},
"sponsorships": [{
"sponsor": {
"sponsorName": ("//dcq:publisherSponsor/node()", single_result)
}
}]
}
TEST_NAMESPACES = {
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'dc': 'http://purl.org/dc/elements/1.1/',
'dcq': 'http://purl.org/dc/terms/'
}
|
fabianvf/schema-transformer
|
tests/utils.py
|
Python
|
apache-2.0
| 3,811
|
[
"Brian"
] |
95ac82709477c5a99d490cb7e0eeab78d37ad92b8aeba36bd9a8f1749461bc4c
|
# sybase/base.py
# Copyright (C) 2010-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase
:name: Sybase
.. note::
The Sybase dialect within SQLAlchemy **is not currently supported**. The
dialect is not tested within continuous integration and is likely to have
many issues and caveats not currently handled.
"""
import re
from sqlalchemy import exc
from sqlalchemy import schema as sa_schema
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.engine import reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql import text
from sqlalchemy.types import BIGINT
from sqlalchemy.types import BINARY
from sqlalchemy.types import CHAR
from sqlalchemy.types import DATE
from sqlalchemy.types import DATETIME
from sqlalchemy.types import DECIMAL
from sqlalchemy.types import FLOAT
from sqlalchemy.types import INT # noqa
from sqlalchemy.types import INTEGER
from sqlalchemy.types import NCHAR
from sqlalchemy.types import NUMERIC
from sqlalchemy.types import NVARCHAR
from sqlalchemy.types import REAL
from sqlalchemy.types import SMALLINT
from sqlalchemy.types import TEXT
from sqlalchemy.types import TIME
from sqlalchemy.types import TIMESTAMP
from sqlalchemy.types import Unicode
from sqlalchemy.types import VARBINARY
from sqlalchemy.types import VARCHAR
RESERVED_WORDS = set(
[
"add",
"all",
"alter",
"and",
"any",
"as",
"asc",
"backup",
"begin",
"between",
"bigint",
"binary",
"bit",
"bottom",
"break",
"by",
"call",
"capability",
"cascade",
"case",
"cast",
"char",
"char_convert",
"character",
"check",
"checkpoint",
"close",
"comment",
"commit",
"connect",
"constraint",
"contains",
"continue",
"convert",
"create",
"cross",
"cube",
"current",
"current_timestamp",
"current_user",
"cursor",
"date",
"dbspace",
"deallocate",
"dec",
"decimal",
"declare",
"default",
"delete",
"deleting",
"desc",
"distinct",
"do",
"double",
"drop",
"dynamic",
"else",
"elseif",
"encrypted",
"end",
"endif",
"escape",
"except",
"exception",
"exec",
"execute",
"existing",
"exists",
"externlogin",
"fetch",
"first",
"float",
"for",
"force",
"foreign",
"forward",
"from",
"full",
"goto",
"grant",
"group",
"having",
"holdlock",
"identified",
"if",
"in",
"index",
"index_lparen",
"inner",
"inout",
"insensitive",
"insert",
"inserting",
"install",
"instead",
"int",
"integer",
"integrated",
"intersect",
"into",
"iq",
"is",
"isolation",
"join",
"key",
"lateral",
"left",
"like",
"lock",
"login",
"long",
"match",
"membership",
"message",
"mode",
"modify",
"natural",
"new",
"no",
"noholdlock",
"not",
"notify",
"null",
"numeric",
"of",
"off",
"on",
"open",
"option",
"options",
"or",
"order",
"others",
"out",
"outer",
"over",
"passthrough",
"precision",
"prepare",
"primary",
"print",
"privileges",
"proc",
"procedure",
"publication",
"raiserror",
"readtext",
"real",
"reference",
"references",
"release",
"remote",
"remove",
"rename",
"reorganize",
"resource",
"restore",
"restrict",
"return",
"revoke",
"right",
"rollback",
"rollup",
"save",
"savepoint",
"scroll",
"select",
"sensitive",
"session",
"set",
"setuser",
"share",
"smallint",
"some",
"sqlcode",
"sqlstate",
"start",
"stop",
"subtrans",
"subtransaction",
"synchronize",
"syntax_error",
"table",
"temporary",
"then",
"time",
"timestamp",
"tinyint",
"to",
"top",
"tran",
"trigger",
"truncate",
"tsequal",
"unbounded",
"union",
"unique",
"unknown",
"unsigned",
"update",
"updating",
"user",
"using",
"validate",
"values",
"varbinary",
"varchar",
"variable",
"varying",
"view",
"wait",
"waitfor",
"when",
"where",
"while",
"window",
"with",
"with_cube",
"with_lparen",
"with_rollup",
"within",
"work",
"writetext",
]
)
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) # decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = "UNICHAR"
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = "UNIVARCHAR"
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = "UNITEXT"
class TINYINT(sqltypes.Integer):
__visit_name__ = "TINYINT"
class BIT(sqltypes.TypeEngine):
__visit_name__ = "BIT"
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = "IMAGE"
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_, **kw):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_, **kw):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_, **kw):
return "UNITEXT"
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
ischema_names = {
"bigint": BIGINT,
"int": INTEGER,
"integer": INTEGER,
"smallint": SMALLINT,
"tinyint": TINYINT,
"unsigned bigint": BIGINT, # TODO: unsigned flags
"unsigned int": INTEGER, # TODO: unsigned flags
"unsigned smallint": SMALLINT, # TODO: unsigned flags
"numeric": NUMERIC,
"decimal": DECIMAL,
"dec": DECIMAL,
"float": FLOAT,
"double": NUMERIC, # TODO
"double precision": NUMERIC, # TODO
"real": REAL,
"smallmoney": SMALLMONEY,
"money": MONEY,
"smalldatetime": DATETIME,
"datetime": DATETIME,
"date": DATE,
"time": TIME,
"char": CHAR,
"character": CHAR,
"varchar": VARCHAR,
"character varying": VARCHAR,
"char varying": VARCHAR,
"unichar": UNICHAR,
"unicode character": UNIVARCHAR,
"nchar": NCHAR,
"national char": NCHAR,
"national character": NCHAR,
"nvarchar": NVARCHAR,
"nchar varying": NVARCHAR,
"national char varying": NVARCHAR,
"national character varying": NVARCHAR,
"text": TEXT,
"unitext": UNITEXT,
"binary": BINARY,
"varbinary": VARBINARY,
"image": IMAGE,
"bit": BIT,
# not in documentation for ASE 15.7
"long varchar": TEXT, # TODO
"timestamp": TIMESTAMP,
"uniqueidentifier": UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(
self.bind, table_name, schema, info_cache=self.info_cache
)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = (
seq_column.key in self.compiled_parameters[0]
)
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s ON"
% self.dialect.identifier_preparer.format_table(tbl)
)
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time."
)
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')"
)
self.set_ddl_autocommit(
self.root_connection.connection.connection, True
)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF"
% self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{"doy": "dayofyear", "dow": "weekday", "milliseconds": "millisecond"},
)
def get_select_precolumns(self, select, **kw):
s = select._distinct and "DISTINCT " or ""
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
limit = select._limit
if limit:
# if select._limit == 1:
# s += "FIRST "
# else:
# s += "TOP %s " % (select._limit,)
s += "TOP %s " % (limit,)
offset = select._offset
if offset:
raise NotImplementedError("Sybase ASE does not support OFFSET")
return s
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ""
def order_by_clause(self, select, **kw):
kw["literal_binds"] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. FROM clause specific to Sybase."""
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (
self.preparer.format_column(column)
+ " "
+ self.dialect.type_compiler.process(
column.type, type_expression=column
)
)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL"
)
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = (
isinstance(column.default, sa_schema.Sequence)
and column.default
)
if sequence:
start, increment = sequence.start or 1, sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element, include_schema=False),
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = "sybase"
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
construct_arguments = []
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name").columns(username=Unicode)
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if (
self.server_version_info is not None
and self.server_version_info < (15,)
):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text(
"""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
if isinstance(table_name, unicode): # noqa
table_name = table_name.encode("ascii")
result = connection.execute(
TABLEID_SQL, schema_name=schema, table_name=table_name
)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
COLUMN_SQL = text(
"""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
"""
)
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (
name,
type_,
nullable,
autoincrement,
default_,
precision,
scale,
length,
) in results:
col_info = self._get_column_info(
name,
type_,
bool(nullable),
bool(autoincrement),
default_,
precision,
scale,
length,
)
columns.append(col_info)
return columns
def _get_column_info(
self,
name,
type_,
nullable,
autoincrement,
default,
precision,
scale,
length,
):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
# is this necessary
# if is_array:
# coltype = ARRAY(coltype)
else:
util.warn(
"Did not recognize type '%s' of column '%s'" % (type_, name)
)
coltype = sqltypes.NULLTYPE
if default:
default = default.replace("DEFAULT", "").strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(
name=name,
type=coltype,
nullable=nullable,
default=default,
autoincrement=autoincrement,
)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text(
"""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
"""
)
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text(
"""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
"""
)
referential_constraints = connection.execute(
REFCONSTRAINT_SQL, table_id=table_id
).fetchall()
REFTABLE_SQL = text(
"""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
"""
)
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (
schema is not None
or reftable["schema"] != self.default_schema_name
):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"],
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
INDEX_SQL = text(
"""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
"""
)
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {
"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names,
}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
PK_SQL = text(
"""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
"""
)
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
if pks:
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {
"constrained_columns": constrained_columns,
"name": pks["name"],
}
else:
return {"constrained_columns": [], "name": None}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text(
"""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text(
"""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
"""
)
if util.py2k:
if isinstance(view_name, unicode): # noqa
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text(
"""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
|
wujuguang/sqlalchemy
|
lib/sqlalchemy/dialects/sybase/base.py
|
Python
|
mit
| 31,953
|
[
"ASE"
] |
3c19f96897b9a548d08eb11bbf2758bca1ea975f7418d8039992178ada3a942a
|
# -*- coding: utf-8 -*-
"""
Lower exception-related instructions.
"""
from pykit import types
from pykit.ir import visit, FunctionPass
class LowerExceptionChecksCostful(FunctionPass):
"""
Lower exception checks (check_error) using C-like checks:
if (result == bad)
goto error;
"""
def op_check_error(self, op):
result, badval = op.args
self.builder.position_after(op)
with self.builder.if_(self.builder.eq(types.Bool, [result, badval])):
self.builder.gen_error_propagation()
op.delete()
def lower_costful(func, env=None):
visit(LowerExceptionChecksCostful(func), func)
def run(func, env):
"""Generate runtime calls into thread library"""
lower_costful(func)
|
ContinuumIO/pykit
|
pykit/lower/lower_errcheck.py
|
Python
|
bsd-3-clause
| 758
|
[
"VisIt"
] |
225407bf25893ac90903beae995d72da51da847efd29b619ca4d60e2950e977e
|
"""Support for package tracking sensors from 17track.net."""
from datetime import timedelta
import logging
from py17track import Client as SeventeenTrackClient
from py17track.errors import SeventeenTrackError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LOCATION,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_call_later
from homeassistant.util import Throttle, slugify
_LOGGER = logging.getLogger(__name__)
ATTR_DESTINATION_COUNTRY = "destination_country"
ATTR_FRIENDLY_NAME = "friendly_name"
ATTR_INFO_TEXT = "info_text"
ATTR_ORIGIN_COUNTRY = "origin_country"
ATTR_PACKAGES = "packages"
ATTR_PACKAGE_TYPE = "package_type"
ATTR_STATUS = "status"
ATTR_TRACKING_INFO_LANGUAGE = "tracking_info_language"
ATTR_TRACKING_NUMBER = "tracking_number"
CONF_SHOW_ARCHIVED = "show_archived"
CONF_SHOW_DELIVERED = "show_delivered"
DATA_PACKAGES = "package_data"
DATA_SUMMARY = "summary_data"
DEFAULT_ATTRIBUTION = "Data provided by 17track.net"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=10)
UNIQUE_ID_TEMPLATE = "package_{0}_{1}"
ENTITY_ID_TEMPLATE = "sensor.seventeentrack_package_{0}"
NOTIFICATION_DELIVERED_ID = "package_delivered_{0}"
NOTIFICATION_DELIVERED_TITLE = "Package {0} delivered"
NOTIFICATION_DELIVERED_MESSAGE = (
"Package Delivered: {0}<br />Visit 17.track for more information: "
"https://t.17track.net/track#nums={1}"
)
VALUE_DELIVERED = "Delivered"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SHOW_ARCHIVED, default=False): cv.boolean,
vol.Optional(CONF_SHOW_DELIVERED, default=False): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Configure the platform and add the sensors."""
websession = aiohttp_client.async_get_clientsession(hass)
client = SeventeenTrackClient(websession)
try:
login_result = await client.profile.login(
config[CONF_USERNAME], config[CONF_PASSWORD]
)
if not login_result:
_LOGGER.error("Invalid username and password provided")
return
except SeventeenTrackError as err:
_LOGGER.error("There was an error while logging in: %s", err)
return
scan_interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
data = SeventeenTrackData(
client,
async_add_entities,
scan_interval,
config[CONF_SHOW_ARCHIVED],
config[CONF_SHOW_DELIVERED],
)
await data.async_update()
class SeventeenTrackSummarySensor(Entity):
"""Define a summary sensor."""
def __init__(self, data, status, initial_state):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._data = data
self._state = initial_state
self._status = status
@property
def available(self):
"""Return whether the entity is available."""
return self._state is not None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return "mdi:package"
@property
def name(self):
"""Return the name."""
return f"Seventeentrack Packages {self._status}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return "summary_{}_{}".format(self._data.account_id, slugify(self._status))
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return "packages"
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
package_data = []
for package in self._data.packages.values():
if package.status != self._status:
continue
package_data.append(
{
ATTR_FRIENDLY_NAME: package.friendly_name,
ATTR_INFO_TEXT: package.info_text,
ATTR_STATUS: package.status,
ATTR_LOCATION: package.location,
ATTR_TRACKING_NUMBER: package.tracking_number,
}
)
if package_data:
self._attrs[ATTR_PACKAGES] = package_data
self._state = self._data.summary.get(self._status)
class SeventeenTrackPackageSensor(Entity):
"""Define an individual package sensor."""
def __init__(self, data, package):
"""Initialize."""
self._attrs = {
ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,
ATTR_DESTINATION_COUNTRY: package.destination_country,
ATTR_INFO_TEXT: package.info_text,
ATTR_LOCATION: package.location,
ATTR_ORIGIN_COUNTRY: package.origin_country,
ATTR_PACKAGE_TYPE: package.package_type,
ATTR_TRACKING_INFO_LANGUAGE: package.tracking_info_language,
ATTR_TRACKING_NUMBER: package.tracking_number,
}
self._data = data
self._friendly_name = package.friendly_name
self._state = package.status
self._tracking_number = package.tracking_number
self.entity_id = ENTITY_ID_TEMPLATE.format(self._tracking_number)
@property
def available(self):
"""Return whether the entity is available."""
return self._data.packages.get(self._tracking_number) is not None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return "mdi:package"
@property
def name(self):
"""Return the name."""
name = self._friendly_name
if not name:
name = self._tracking_number
return f"Seventeentrack Package: {name}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return UNIQUE_ID_TEMPLATE.format(self._data.account_id, self._tracking_number)
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
if not self.available:
# Entity cannot be removed while its being added
async_call_later(self.hass, 1, self._remove)
return
package = self._data.packages.get(self._tracking_number, None)
# If the user has elected to not see delivered packages and one gets
# delivered, post a notification:
if package.status == VALUE_DELIVERED and not self._data.show_delivered:
self._notify_delivered()
# Entity cannot be removed while its being added
async_call_later(self.hass, 1, self._remove)
return
self._attrs.update(
{ATTR_INFO_TEXT: package.info_text, ATTR_LOCATION: package.location}
)
self._state = package.status
self._friendly_name = package.friendly_name
async def _remove(self, *_):
"""Remove entity itself."""
await self.async_remove(force_remove=True)
reg = await self.hass.helpers.entity_registry.async_get_registry()
entity_id = reg.async_get_entity_id(
"sensor",
"seventeentrack",
UNIQUE_ID_TEMPLATE.format(self._data.account_id, self._tracking_number),
)
if entity_id:
reg.async_remove(entity_id)
def _notify_delivered(self):
"""Notify when package is delivered."""
_LOGGER.info("Package delivered: %s", self._tracking_number)
identification = (
self._friendly_name if self._friendly_name else self._tracking_number
)
message = NOTIFICATION_DELIVERED_MESSAGE.format(
identification, self._tracking_number
)
title = NOTIFICATION_DELIVERED_TITLE.format(identification)
notification_id = NOTIFICATION_DELIVERED_TITLE.format(self._tracking_number)
self.hass.components.persistent_notification.create(
message, title=title, notification_id=notification_id
)
class SeventeenTrackData:
"""Define a data handler for 17track.net."""
def __init__(
self, client, async_add_entities, scan_interval, show_archived, show_delivered
):
"""Initialize."""
self._async_add_entities = async_add_entities
self._client = client
self._scan_interval = scan_interval
self._show_archived = show_archived
self.account_id = client.profile.account_id
self.packages = {}
self.show_delivered = show_delivered
self.summary = {}
self.async_update = Throttle(self._scan_interval)(self._async_update)
self.first_update = True
async def _async_update(self):
"""Get updated data from 17track.net."""
try:
packages = await self._client.profile.packages(
show_archived=self._show_archived
)
_LOGGER.debug("New package data received: %s", packages)
new_packages = {p.tracking_number: p for p in packages}
to_add = set(new_packages) - set(self.packages)
_LOGGER.debug("Will add new tracking numbers: %s", to_add)
if to_add:
self._async_add_entities(
[
SeventeenTrackPackageSensor(self, new_packages[tracking_number])
for tracking_number in to_add
],
True,
)
self.packages = new_packages
except SeventeenTrackError as err:
_LOGGER.error("There was an error retrieving packages: %s", err)
try:
self.summary = await self._client.profile.summary(
show_archived=self._show_archived
)
_LOGGER.debug("New summary data received: %s", self.summary)
# creating summary sensors on first update
if self.first_update:
self.first_update = False
self._async_add_entities(
[
SeventeenTrackSummarySensor(self, status, quantity)
for status, quantity in self.summary.items()
],
True,
)
except SeventeenTrackError as err:
_LOGGER.error("There was an error retrieving the summary: %s", err)
self.summary = {}
|
turbokongen/home-assistant
|
homeassistant/components/seventeentrack/sensor.py
|
Python
|
apache-2.0
| 11,106
|
[
"VisIt"
] |
0246c8541bb285badebf9ccc862e2965f4d279f7db19eebb38138374e2d8f858
|
"""
Contains the user interface in the Universe class
"""
from galaxy.web.base.controller import *
from galaxy.model.orm import *
from galaxy import util
import logging, os, string
from random import choice
log = logging.getLogger( __name__ )
require_login_template = """
<h1>Welcome to Galaxy</h1>
<p>
This installation of Galaxy has been configured such that only users who are logged in may use it.%s
</p>
<p/>
"""
require_login_nocreation_template = require_login_template % ""
require_login_creation_template = require_login_template % " If you don't already have an account, <a href='%s'>you may create one</a>."
class User( BaseController ):
@web.expose
def index( self, trans, **kwd ):
return trans.fill_template( '/user/index.mako', user=trans.get_user() )
@web.expose
def change_password(self, trans, old_pass='', new_pass='', conf_pass='', **kwd):
old_pass_err = new_pass_err = conf_pass_err = ''
user = trans.get_user()
if not user:
trans.response.send_redirect( web.url_for( action='login' ) )
if trans.request.method == 'POST':
if not user.check_password( old_pass ):
old_pass_err = "Invalid password"
elif len( new_pass ) < 6:
new_pass_err = "Please use a password of at least 6 characters"
elif new_pass != conf_pass:
conf_pass_err = "New passwords do not match."
else:
user.set_password_cleartext( new_pass )
user.flush()
trans.log_event( "User change password" )
return trans.show_ok_message( "Password has been changed for " + user.email)
# Generate input form
return trans.show_form(
web.FormBuilder( web.url_for() , "Change Password", submit_text="Submit" )
.add_password( "old_pass", "Old Password", value='', error=old_pass_err )
.add_password( "new_pass", "New Password", value='', error=new_pass_err )
.add_password( "conf_pass", "Confirm Password", value='', error=conf_pass_err ) )
@web.expose
def change_email(self, trans, email='', conf_email='', password='', **kwd):
email_err = conf_email_err = pass_err = ''
user = trans.get_user()
if not user:
trans.response.send_redirect( web.url_for( action='login' ) )
if trans.request.method == "POST":
if not user.check_password( password ):
pass_err = "Invalid password"
elif len( email ) == 0 or "@" not in email or "." not in email:
email_err = "Please enter a real email address"
elif len( email) > 255:
email_err = "Email address exceeds maximum allowable length"
elif trans.app.model.User.filter_by( email=email ).first():
email_err = "User with that email already exists"
elif email != conf_email:
conf_email_err = "Email addresses do not match."
else:
user.email = email
user.flush()
trans.log_event( "User change email" )
return trans.show_ok_message( "Email has been changed to: " + user.email, refresh_frames=['masthead', 'history'] )
return trans.show_form(
web.FormBuilder( web.url_for(), "Change Email", submit_text="Submit" )
.add_text( "email", "Email", value=email, error=email_err )
.add_text( "conf_email", "Confirm Email", value='', error=conf_email_err )
.add_password( "password", "Password", value='', error=pass_err ) )
@web.expose
def login( self, trans, email='', password='' ):
email_error = password_error = None
# Attempt login
if trans.app.config.require_login:
refresh_frames = [ 'masthead', 'history', 'tools' ]
else:
refresh_frames = [ 'masthead', 'history' ]
if email or password:
user = trans.app.model.User.filter( trans.app.model.User.table.c.email==email ).first()
if not user:
email_error = "No such user"
elif user.deleted:
email_error = "This account has been marked deleted, contact your Galaxy administrator to restore the account."
elif user.external:
email_error = "This account was created for use with an external authentication method, contact your local Galaxy administrator to activate it."
elif not user.check_password( password ):
password_error = "Invalid password"
else:
trans.handle_user_login( user )
trans.log_event( "User logged in" )
msg = "Now logged in as " + user.email + "."
if trans.app.config.require_login:
msg += ' <a href="%s">Click here</a> to continue to the front page.' % web.url_for( '/static/welcome.html' )
return trans.show_ok_message( msg, refresh_frames=refresh_frames )
form = web.FormBuilder( web.url_for(), "Login", submit_text="Login" ) \
.add_text( "email", "Email address", value=email, error=email_error ) \
.add_password( "password", "Password", value='', error=password_error,
help="<a href='%s'>Forgot password? Reset here</a>" % web.url_for( action='reset_password' ) )
if trans.app.config.require_login:
if trans.app.config.allow_user_creation:
return trans.show_form( form, header = require_login_creation_template % web.url_for( action = 'create' ) )
else:
return trans.show_form( form, header = require_login_nocreation_template )
else:
return trans.show_form( form )
@web.expose
def logout( self, trans ):
if trans.app.config.require_login:
refresh_frames = [ 'masthead', 'history', 'tools' ]
else:
refresh_frames = [ 'masthead', 'history' ]
# Since logging an event requires a session, we'll log prior to ending the session
trans.log_event( "User logged out" )
trans.handle_user_logout()
msg = "You are no longer logged in."
if trans.app.config.require_login:
msg += ' <a href="%s">Click here</a> to return to the login page.' % web.url_for( controller='user', action='login' )
return trans.show_ok_message( msg, refresh_frames=refresh_frames )
@web.expose
def create( self, trans, email='', password='', confirm='', subscribe=False ):
if trans.app.config.require_login:
refresh_frames = [ 'masthead', 'history', 'tools' ]
else:
refresh_frames = [ 'masthead', 'history' ]
if not trans.app.config.allow_user_creation and not trans.user_is_admin():
return trans.show_error_message( 'User registration is disabled. Please contact your Galaxy administrator for an account.' )
email_error = password_error = confirm_error = None
if email:
if len( email ) == 0 or "@" not in email or "." not in email:
email_error = "Please enter a real email address"
elif len( email ) > 255:
email_error = "Email address exceeds maximum allowable length"
elif trans.app.model.User.filter( and_( trans.app.model.User.table.c.email==email,
trans.app.model.User.table.c.deleted==False ) ).first():
email_error = "User with that email already exists"
elif len( password ) < 6:
password_error = "Please use a password of at least 6 characters"
elif password != confirm:
confirm_error = "Passwords do not match"
else:
user = trans.app.model.User( email=email )
user.set_password_cleartext( password )
user.flush()
trans.app.security_agent.create_private_user_role( user )
# We set default user permissions, before we log in and set the default history permissions
trans.app.security_agent.user_set_default_permissions( user )
# The handle_user_login() method has a call to the history_set_default_permissions() method
# (needed when logging in with a history), user needs to have default permissions set before logging in
trans.handle_user_login( user )
trans.log_event( "User created a new account" )
trans.log_event( "User logged in" )
#subscribe user to email list
if subscribe:
mail = os.popen("%s -t" % trans.app.config.sendmail_path, 'w')
mail.write("To: %s\nFrom: %s\nSubject: Join Mailing List\n\nJoin Mailing list." % (trans.app.config.mailing_join_addr,email) )
if mail.close():
return trans.show_warn_message( "Now logged in as " + user.email+". However, subscribing to the mailing list has failed.", refresh_frames=['masthead', 'history'] )
return trans.show_ok_message( "Now logged in as " + user.email, refresh_frames=['masthead', 'history'] )
return trans.show_form(
web.FormBuilder( web.url_for(), "Create account", submit_text="Create" )
.add_text( "email", "Email address", value=email, error=email_error )
.add_password( "password", "Password", value='', error=password_error )
.add_password( "confirm", "Confirm password", value='', error=confirm_error )
.add_input( "checkbox","Subscribe To Mailing List","subscribe", value='subscribe' ) )
@web.expose
def reset_password( self, trans, email=None, **kwd ):
error = ''
reset_user = trans.app.model.User.filter( trans.app.model.User.table.c.email==email ).first()
user = trans.get_user()
if reset_user:
if user and user.id != reset_user.id:
error = "You may only reset your own password"
else:
chars = string.letters + string.digits
new_pass = ""
for i in range(15):
new_pass = new_pass + choice(chars)
mail = os.popen("%s -t" % trans.app.config.sendmail_path, 'w')
mail.write("To: %s\nFrom: no-reply@%s\nSubject: Galaxy Password Reset\n\nYour password has been reset to \"%s\" (no quotes)." % (email, trans.request.remote_addr, new_pass) )
if mail.close():
return trans.show_error_message( 'Failed to reset password. If this problem persists, please submit a bug report.' )
reset_user.set_password_cleartext( new_pass )
reset_user.flush()
trans.log_event( "User reset password: %s" % email )
return trans.show_ok_message( "Password has been reset and emailed to: %s. <a href='%s'>Click here</a> to return to the login form." % ( email, web.url_for( action='login' ) ) )
elif email != None:
error = "The specified user does not exist"
return trans.show_form(
web.FormBuilder( web.url_for(), "Reset Password", submit_text="Submit" )
.add_text( "email", "Email", value=email, error=error ) )
@web.expose
def set_default_permissions( self, trans, **kwd ):
"""Sets the user's default permissions for the new histories"""
if trans.user:
if 'update_roles_button' in kwd:
p = util.Params( kwd )
permissions = {}
for k, v in trans.app.model.Dataset.permitted_actions.items():
in_roles = p.get( k + '_in', [] )
if not isinstance( in_roles, list ):
in_roles = [ in_roles ]
in_roles = [ trans.app.model.Role.get( x ) for x in in_roles ]
action = trans.app.security_agent.get_action( v.action ).action
permissions[ action ] = in_roles
trans.app.security_agent.user_set_default_permissions( trans.user, permissions )
return trans.show_ok_message( 'Default new history permissions have been changed.' )
return trans.fill_template( 'user/permissions.mako' )
else:
# User not logged in, history group must be only public
return trans.show_error_message( "You must be logged in to change your default permitted actions." )
|
dbcls/dbcls-galaxy
|
lib/galaxy/web/controllers/user.py
|
Python
|
mit
| 12,689
|
[
"Galaxy"
] |
fba44d875f365511507cfbcdd004a22c2bca4e16af8a80b6b663b773a12a4a35
|
import matplotlib.pyplot as plt
import os
from astropy.table import Table
import numpy as np
# setup information sources
degas = Table.read(os.path.join(os.environ['SCRIPTDIR'],'degas_base.fits'))
stack = Table.read('/lustre/cv/users/akepley/degas/stack_test/stack_IR6p0_mom1.fits')
plotDir = os.path.join(os.environ['ANALYSISDIR'],'plots','fdense_plots')
if not os.path.exists(plotDir):
os.mkdir(plotDir)
# only look at dr1 galaxies
dr1 = degas['DR1'] == 1
ndr1 = np.sum(dr1)
# setup plot style
markers = ['o','v','^','s','*','D'] # 6 items
colors = ['royalblue','forestgreen','darkorange','royalblue','crimson','rebeccapurple','darkcyan','darkmagenta']
ndr1 = np.sum(dr1)
markerlist = np.tile(markers,int(np.ceil(ndr1/len(markers))))
markerlist = markerlist[0:ndr1]
colorlist = np.tile(colors,int(np.ceil(ndr1/len(colors))))
colorlist = colorlist[0:ndr1]
# set up plot
fig = plt.figure(figsize=(8,6),facecolor='white',edgecolor='white')
fig.subplots_adjust(left=0.1,right=0.8,bottom=0.1, top=0.9)
ax = fig.add_subplot(1,1,1)
# for each dr1 galaxy, show radial trends for each line.
for (galaxy,color,marker) in zip(degas[dr1],colorlist,markerlist):
idx = ( (stack['galaxy'] == galaxy['NAME']) \
& (stack['bin_type'] == 'stellarmass'))
mstar = stack[idx]['bin_mean']
lolims = stack[idx]['ratio_ltir_mean_HCN_lolim']
sfe_dense = stack[idx]['ratio_ltir_mean_HCN']
sfe_dense_err = stack[idx]['ratio_ltir_mean_HCN_err']
sfe_dense_err[lolims] = sfe_dense[lolims] * 0.3
ax.errorbar(mstar, sfe_dense,
yerr = sfe_dense_err,
uplims = lolims,
marker = marker,
markerfacecolor='none',
markeredgecolor=color,
linestyle= '--',
color=color)
ax.scatter(mstar[~lolims], sfe_dense[~lolims],
marker=marker,
color=color,
label=galaxy['NAME'])
ax.set_yscale('log')
ax.set_xscale('log')
ax.legend(loc='upper left',bbox_to_anchor=(1.0,1.0))
ax.set_xlabel(r'$\Sigma_{*}$ (M$_\odot$ pc$^{-2}$)')
ax.set_ylabel(r'log$_{10}$ (IR-to-HCN)')
fig.show()
fig.savefig(os.path.join(plotDir,'sfe_dense_vs_mstar_combined.pdf'))
fig.savefig(os.path.join(plotDir,'sfe_dense_vs_mstar_combined.png'))
plt.close()
|
low-sky/degas
|
scripts/plot_SFEdense_vs_mstar_combined.py
|
Python
|
gpl-3.0
| 2,351
|
[
"Galaxy"
] |
091d46d64788bcc059eda2c29cfcb684dccf9c3b22c76a002e901e270db5b485
|
"""
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
beepee14/scikit-learn
|
sklearn/discriminant_analysis.py
|
Python
|
bsd-3-clause
| 27,308
|
[
"Gaussian"
] |
bf199c3a631741a07394cfe909160e81badad2ebd7bfee5ff15f23c95f3b3b03
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.pbc.cc import ccsd
from pyscf.pbc import scf
def KRCCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):
from pyscf.pbc.mpicc import kccsd_rhf
mf = scf.addons.convert_to_rhf(mf)
return kccsd_rhf.RCCSD(mf, frozen)
KCCSD = KRCCSD
|
gkc1000/pyscf
|
pyscf/pbc/mpicc/__init__.py
|
Python
|
apache-2.0
| 862
|
[
"PySCF"
] |
ef18ca9bbf3f240124b9dc3e451a81e52d241279d40851647c08ad0707fe0688
|
import re
import discord
import Data
import deco
import utilities
import misc
import undecorated
import MemeMachine
import re
import owner
client = discord.Client()
@client.event
async def on_ready():
print('Logged in !')
print('------------')
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith(Data.PREFIX):
if message.content.startswith(Data.MEME_REDIRECT):
await MemeMachine.meme_machine(client, message)
return
foo_name = message.content.split()[0][len(Data.PREFIX):]
try:
await deco.foo_dict[foo_name](client, message)
except KeyError:
print("Command function not found, must be a type request !")
pass
if re.fullmatch(Data.MBTI, message.content[len(Data.PREFIX):].lower()):
await undecorated.typeroles(client, message)
@client.event
async def on_member_join(member):
bot_message = client.get_channel('139718812705619968')
join = 'Welcome on this server, {}.\n'.format(member.mention)
join += 'If you know your type, you can set it using /your-type, e.g. /ESFP\n'
join += 'For a general guidline, please visit #code-of-conduct.\n'
join += 'Otherwise, have fun, and don\'t be shy to say hello, we are a friendly bunch of people :)'
await client.send_message(member.server, join)
await client.send_message(bot_message, '\'{}\' (UID: {}) has joined the server'.format(member.name, member.id))
print('\'{}\' (UID: {}) has joined the server'.format(member.name, member.id))
@client.event
async def on_member_remove(member):
bot_message = client.get_channel('139718812705619968')
await client.send_message(bot_message, '\'{}\' (UID: {}) has left the server'.format(member.name, member.id))
client.run(Data.TOKEN)
|
zenAndroid/zenBot
|
zenBot.py
|
Python
|
mit
| 1,864
|
[
"VisIt"
] |
db4d37cc31c3b65fc7d0d264cbdaff1f4559dfcc5e029aa6646e72353a0f7977
|
#!/usr/bin/env python
#
# Data manager for reference data for the 'humann2' Galaxy tools
import datetime
import json
import optparse
import os
import shutil
import subprocess
import sys
HUMANN2_REFERENCE_DATA = {
"full": "Full",
"DEMO": "Demo",
"uniref50_diamond": "Full UniRef50",
"uniref50_ec_filtered_diamond": "EC-filtered UniRef50",
"uniref50_GO_filtered_rapsearch2": "GO filtered UniRef50 for rapsearch2",
"uniref90_diamond": "Full UniRef90",
"uniref90_ec_filtered_diamond": "EC-filtered UniRef90",
"DEMO_diamond": "Demo"
}
# Utility functions for interacting with Galaxy JSON
def read_input_json(jsonfile):
"""Read the JSON supplied from the data manager tool
Returns a tuple (param_dict,extra_files_path)
'param_dict' is an arbitrary dictionary of parameters
input into the tool; 'extra_files_path' is the path
to a directory where output files must be put for the
receiving data manager to pick them up.
NB the directory pointed to by 'extra_files_path'
doesn't exist initially, it is the job of the script
to create it if necessary.
"""
params = json.loads(open(jsonfile).read())
return (params['param_dict'],
params['output_data'][0]['extra_files_path'])
# Utility functions for creating data table dictionaries
#
# Example usage:
# >>> d = create_data_tables_dict()
# >>> add_data_table(d,'my_data')
# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))
# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))
# >>> print str(json.dumps(d))
def create_data_tables_dict():
"""Return a dictionary for storing data table information
Returns a dictionary that can be used with 'add_data_table'
and 'add_data_table_entry' to store information about a
data table. It can be converted to JSON to be sent back to
the data manager.
"""
d = {}
d['data_tables'] = {}
return d
def add_data_table(d, table):
"""Add a data table to the data tables dictionary
Creates a placeholder for a data table called 'table'.
"""
d['data_tables'][table] = []
def add_data_table_entry(d, table, entry):
"""Add an entry to a data table
Appends an entry to the data table 'table'. 'entry'
should be a dictionary where the keys are the names of
columns in the data table.
Raises an exception if the named data table doesn't
exist.
"""
try:
d['data_tables'][table].append(entry)
except KeyError:
raise Exception("add_data_table_entry: no table '%s'" % table)
def download_humann2_db(data_tables, table_name, database, build, target_dir):
"""Download HUMAnN2 database
Creates references to the specified file(s) on the Galaxy
server in the appropriate data table (determined from the
file extension).
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
table_name: name of the table
database: database to download (chocophlan or uniref)
build: build of the database to download
target_dir: directory to put copy or link to the data file
"""
value = "%s-%s-%s" % (database, build, datetime.date.today().isoformat())
db_target_dir = os.path.join(target_dir, database)
build_target_dir = os.path.join(db_target_dir, build)
os.makedirs(build_target_dir)
cmd = "humann2_databases --download %s %s %s --update-config no" % (
database,
build,
db_target_dir)
subprocess.check_call(cmd, shell=True)
shutil.move(os.path.join(db_target_dir, database), build_target_dir)
add_data_table_entry(
data_tables,
table_name,
dict(
dbkey=build,
value=value,
name=HUMANN2_REFERENCE_DATA[build],
path=build_target_dir))
if __name__ == "__main__":
print("Starting...")
# Read command line
parser = optparse.OptionParser(description='Download HUMAnN2 database')
parser.add_option('--database', help="Database name")
parser.add_option('--build', help="Build of the database")
options, args = parser.parse_args()
print("args : %s" % args)
# Check for JSON file
if len(args) != 1:
sys.stderr.write("Need to supply JSON file name")
sys.exit(1)
jsonfile = args[0]
# Read the input JSON
params, target_dir = read_input_json(jsonfile)
# Make the target directory
print("Making %s" % target_dir)
os.mkdir(target_dir)
# Set up data tables dictionary
data_tables = create_data_tables_dict()
if options.database == "chocophlan":
table_name = 'humann2_nucleotide_database'
else:
table_name = 'humann2_protein_database'
add_data_table(data_tables, table_name)
# Fetch data from specified data sources
download_humann2_db(
data_tables,
table_name,
options.database,
options.build,
target_dir)
# Write output JSON
print("Outputting JSON")
print(str(json.dumps(data_tables)))
open(jsonfile, 'wb').write(json.dumps(data_tables))
print("Done.")
|
blankclemens/tools-iuc
|
data_managers/data_manager_humann2_database_downloader/data_manager/data_manager_humann2_download.py
|
Python
|
mit
| 5,238
|
[
"Galaxy"
] |
5d09d487e50fd27a6e03f2eeb0aaa1d3b5b0de740a5acb7f9b1e7cde59ca03aa
|
# -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2007 Donald N. Allingham, Martin Hawlisch
# Copyright (C) 2009 Yevgeny Zegzda <ezegjda@ya.ru>
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2013 Pat Lefebre
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# H-tree Pedigree View by Pat Lefebre (Based on Pedigree view)
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from html import escape
import math
import os
import pickle
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GdkPixbuf
from gi.repository import PangoCairo
import cairo
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import ChildRef, ChildRefType, Family
from gramps.gui.views.navigationview import NavigationView
from gramps.gui.editors import FilterEditor
from gramps.gui.display import display_url
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.utils.alive import probably_alive
from gramps.gen.utils.file import media_path_full
from gramps.gen.utils.db import find_children, find_parents, find_witnessed_people
from gramps.gen.utils.libformatting import FormattingHelper
from gramps.gen.utils.thumbnails import get_thumbnail_path
from gramps.gen.errors import WindowActiveError
from gramps.gui.editors import EditPerson, EditFamily
from gramps.gui.ddtargets import DdTargets
from gramps.gen.config import config
from gramps.gui.views.bookmarks import PersonBookmarks
from gramps.gen.const import CUSTOM_FILTERS
from gramps.gen.constfunc import is_quartz, win
from gramps.gui.dialog import RunDatabaseRepair, ErrorDialog
from gramps.gui.utils import color_graph_box, hex_to_rgb_float, is_right_click
from gramps.gen.constfunc import lin
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_PAGE = 'https://gramps-project.org/wiki/index.php?title=Addon:HtreePedigreeView'
_PERSON = "p"
_BORN = _('short for born|b.')
_DIED = _('short for died|d.')
_BAPT = _('short for baptized|bap.')
_CHRI = _('short for christened|chr.')
_BURI = _('short for buried|bur.')
_CREM = _('short for cremated|crem.')
class _PersonWidgetBase(Gtk.DrawingArea):
"""
Default set up for person widgets.
Set up drag options and button release events.
"""
def __init__(self, view, format_helper, person):
Gtk.DrawingArea.__init__(self)
self.view = view
self.format_helper = format_helper
self.person = person
self.force_mouse_over = False
self.in_drag = False
self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK)
if self.person:
self.connect("button-release-event", self.cb_on_button_release)
self.connect("drag_data_get", self.cb_drag_data_get)
self.connect("drag_begin", self.cb_drag_begin)
self.connect("drag_end", self.cb_drag_end)
# Enable drag
self.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,
[],
Gdk.DragAction.COPY)
tglist = Gtk.TargetList.new([])
tglist.add(DdTargets.PERSON_LINK.atom_drag_type,
DdTargets.PERSON_LINK.target_flags,
DdTargets.PERSON_LINK.app_id)
#allow drag to a text document, info on drag_get will be 0L !
tglist.add_text_targets(0)
self.drag_source_set_target_list(tglist)
def cb_drag_begin(self, widget, data):
"""Set up some inital conditions for drag. Set up icon."""
self.in_drag = True
self.drag_source_set_icon_name('gramps-person')
def cb_drag_end(self, widget, data):
"""Set up some inital conditions for drag. Set up icon."""
self.in_drag = False
def cb_drag_data_get(self, widget, context, sel_data, info, time):
"""
Returned parameters after drag.
Specified for 'person-link', for others return text info about person.
"""
tgs = [x.name() for x in context.list_targets()]
if info == DdTargets.PERSON_LINK.app_id:
data = (DdTargets.PERSON_LINK.drag_type,
id(self), self.person.get_handle(), 0)
sel_data.set(sel_data.get_target(), 8, pickle.dumps(data))
elif ('TEXT' in tgs or 'text/plain' in tgs) and info == 0:
sel_data.set_text(self.format_helper.format_person(self.person, 11), -1)
def cb_on_button_release(self, widget, event):
"""
Default action for release event from mouse.
Change active person to current.
"""
if self.in_drag:
return False
if event.button == 1 and event.type == Gdk.EventType.BUTTON_RELEASE:
self.view.cb_childmenu_changed(None, self.person.get_handle())
return True
return False
def get_image(self, dbstate, person):
"""
Return a thumbnail image for the given person.
"""
image_path = None
media_list = person.get_media_list()
if media_list:
photo = media_list[0]
object_handle = photo.get_reference_handle()
obj = dbstate.db.get_media_from_handle(
object_handle)
if obj:
mtype = obj.get_mime_type()
if mtype and mtype[0:5] == "image":
image_path = get_thumbnail_path(
media_path_full(
dbstate.db,
obj.get_path()),
rectangle=photo.get_rectangle())
return image_path
class PersonBoxWidgetCairo(_PersonWidgetBase):
"""Draw person box using cairo library"""
def __init__(self, view, format_helper, dbstate, person, alive, maxlines,
image=None, tags=False):
_PersonWidgetBase.__init__(self, view, format_helper, person)
self.set_size_request(120, 25)
# Required for tooltip and mouse-over
self.add_events(Gdk.EventMask.ENTER_NOTIFY_MASK)
# Required for tooltip and mouse-over
self.add_events(Gdk.EventMask.LEAVE_NOTIFY_MASK)
self.alive = alive
self.maxlines = maxlines
self.hightlight = False
self.connect("draw", self.draw)
self.text = ""
if self.person:
self.text = self.format_helper.format_person(self.person,
self.maxlines, True)
gender = self.person.get_gender()
else:
gender = None
self.bgcolor, self.bordercolor = color_graph_box(alive, gender)
if tags and person:
for tag_handle in person.get_tag_list():
# For the complete tag, don't modify the default color
# which is black (#000000000000)
tag = dbstate.db.get_tag_from_handle(tag_handle)
if tag.get_color() != "#000000000000": # only if the color
self.bgcolor = tag.get_color() # is not black
self.bgcolor = hex_to_rgb_float(self.bgcolor)
self.bordercolor = hex_to_rgb_float(self.bordercolor)
self.img_surf = None
if image:
image_path = self.get_image(dbstate, person)
if image_path and os.path.exists(image_path):
with open(image_path, 'rb') as image:
self.img_surf = cairo.ImageSurface.create_from_png(image)
# enable mouse-over
self.connect("enter-notify-event", self.cb_on_enter)
# enable mouse-out
self.connect("leave-notify-event", self.cb_on_leave)
self.context = None
self.textlayout = None
def cb_on_enter(self, widget, event):
"""On mouse-over highlight border"""
if self.person or self.force_mouse_over:
self.hightlight = True
self.queue_draw()
def cb_on_leave(self, widget, event):
"""On mouse-out normal border"""
self.hightlight = False
self.queue_draw()
def draw(self, widget, context):
"""
Redrawing the contents of the widget.
Creat new cairo object and draw in it all (borders, background and etc.)
witout text.
"""
def _boxpath(context, alloc):
# Create box shape and store path
#context.new_path()
context.move_to(0, 5)
context.curve_to(0, 2, 2, 0, 5, 0)
context.line_to(alloc.width-8, 0)
context.curve_to(alloc.width-5, 0,
alloc.width-3, 2,
alloc.width-3, 5)
context.line_to(alloc.width-3, alloc.height-8)
context.curve_to(alloc.width-3, alloc.height-5,
alloc.width-5, alloc.height-3,
alloc.width-8, alloc.height-3)
context.line_to(5, alloc.height-3)
context.curve_to(2, alloc.height-3,
0, alloc.height-5,
0, alloc.height-8)
context.close_path()
# pylint: disable-msg=E1101
minw = 120
minh = 25
alw = self.get_allocated_width()
alh = self.get_allocated_height()
if not self.textlayout:
self.textlayout = PangoCairo.create_layout(context)
if is_quartz():
PangoCairo.context_set_resolution(self.textlayout.get_context(), 72)
# The following seems like it Should work, but it doesn't
# font_desc = self.get_style_context().get_property(
# "font", Gtk.StateFlags.NORMAL)
font_desc = self.get_style_context().get_font(Gtk.StateFlags.NORMAL)
self.textlayout.set_font_description(font_desc)
self.textlayout.set_markup(self.text, -1)
size = self.textlayout.get_pixel_size()
xmin = size[0] + 12
ymin = size[1] + 11
if self.img_surf:
xmin += self.img_surf.get_width()
ymin = max(ymin, self.img_surf.get_height()+4)
self.set_size_request(max(xmin, minw), max(ymin, minh))
alloc = self.get_allocation()
alw = self.get_allocated_width()
alh = self.get_allocated_height()
# widget area for debugging
##context.rectangle(0, 0, alloc.width, alloc.height)
##context.set_source_rgb(1, 0, 1)
##context.fill_preserve()
##context.stroke()
# Create box shape and store path
context.save()
# shadow
context.translate(3, 3)
_boxpath(context, alloc)
context.set_source_rgba(*(self.bordercolor[:3] + (0.4,)))
context.fill_preserve()
context.set_line_width(0)
context.stroke()
context.restore()
context.save()
# box shape used for clipping
_boxpath(context, alloc)
context.clip()
# background (while clipped)
_boxpath(context, alloc)
context.set_source_rgb(*self.bgcolor[:3])
context.fill_preserve()
context.stroke()
# image
if self.img_surf:
context.set_source_surface(self.img_surf,
alloc.width-4-self.img_surf.get_width(), 1)
context.paint()
# Mark deceased
context.new_path()
if self.person and not self.alive:
context.set_source_rgb(0, 0, 0)
context.set_line_width(2)
context.move_to(0, 10)
context.line_to(10, 0)
context.stroke()
#border
_boxpath(context, alloc)
if self.hightlight:
context.set_line_width(5)
else:
context.set_line_width(2)
context.set_source_rgb(*self.bordercolor[:3])
context.stroke()
context.restore()
context.save()
# text
context.move_to(5, 4)
context.set_source_rgb(0, 0, 0)
PangoCairo.show_layout(context, self.textlayout)
context.restore()
context.get_target().flush()
class LineWidget(Gtk.DrawingArea):
"""
Draw lines linking Person boxes - Types A and C.
"""
def __init__(self, child, father, frel, mother, mrel, direction):
Gtk.DrawingArea.__init__(self)
self.child_box = child
self.father_box = father
self.mother_box = mother
self.frel = frel
self.mrel = mrel
self.direction = direction
self.connect("draw", self.expose)
def expose(self, widget, context):
"""
Redraw the contents of the widget.
"""
self.set_size_request(20, 20)
context.set_source_rgb(0.,0.,0.)
# pylint: disable-msg=E1101
alloc = self.get_allocation()
child = self.child_box.get_allocation()
if self.father_box:
father = self.father_box.get_allocation()
if self.mother_box:
mother = self.mother_box.get_allocation()
if self.direction in [2, 3]: # horizontal
child_side = 0
centre = alloc.width / 2
parent_side = alloc.width
middle = child.y - alloc.y + child.height / 2
if self.father_box:
father_side = father.height / 2
if self.mother_box:
mother_side = alloc.height - mother.height / 2
else:
child_side = 0
centre = alloc.height / 2
parent_side = alloc.height
middle = child.x - alloc.x + child.width / 2
if self.father_box:
father_side = father.width / 2
if self.mother_box:
mother_side = alloc.width - mother.width / 2
if self.direction in [1, 3]: # bottom to top or right to left
child_side = parent_side
parent_side = 0
if self.father_box:
self.draw_link(context, parent_side, middle, child_side, centre,
father_side, self.mrel)
if self.mother_box:
self.draw_link(context, parent_side, middle, child_side, centre,
mother_side, self.frel)
def draw_link(self, cr, parent_side, middle, child_side, centre, side, rela):
"""
Draw a link between parent and child.
"""
cr.set_line_width(3)
if rela:
cr.set_dash([], 0) #SOLID
else:
cr.set_dash([9.], 1) #DASH
self.draw_line(cr, parent_side, side, centre, side)
self.draw_line(cr, centre, side, centre, middle, True)
self.draw_line(cr, centre, middle, child_side, middle, True)
cr.stroke()
def draw_line(self, cr, x_from, y_from, x_to, y_to, join=False):
"""
Draw a single line in a link.
"""
# pylint: disable-msg=E1101
if self.direction in [2, 3]: # horizontal
if not join:
cr.move_to(x_from, y_from)
cr.line_to(x_to, y_to)
else:
if not join:
cr.move_to(y_from, x_from)
cr.line_to(y_to, x_to)
class LineWidget2(Gtk.DrawingArea):
"""
Draw lines linking Person boxes - Type B.
"""
def __init__(self, male, rela, direction):
Gtk.DrawingArea.__init__(self)
self.male = male
self.rela = rela
self.direction = direction
self.connect("draw", self.expose)
def expose(self, widget, context):
"""
Redraw the contents of the widget.
"""
self.set_size_request(20, -1)
context.set_source_rgb(0.,0.,0.)
# pylint: disable-msg=E1101
alloc = self.get_allocation()
if self.direction in [2, 3]: # horizontal
child_x = alloc.width / 2
child_y = alloc.height
parent_x = alloc.width
parent_y = alloc.height / 2
mid_x = alloc.width / 2
mid_y = alloc.height / 2
else:
child_y = alloc.width
child_x = alloc.height / 2
parent_y = alloc.width / 2
parent_x = alloc.height
mid_y = alloc.width / 2
mid_x = alloc.height / 2
context.set_line_width(3)
if self.rela:
context.set_dash([], 0) #SOLID
else:
context.set_dash([9.], 1) #DASH
if self.direction in [1, 3]:
parent_x = 0
if not self.male:
child_y = 0
self.draw_line(context, child_x, child_y, mid_x, mid_y)
self.draw_line(context, mid_x, mid_y, parent_x, parent_y, True)
def draw_line(self, cr, x_from, y_from, x_to, y_to, join=False):
"""
Draw a single line in a link.
"""
# pylint: disable-msg=E1101
if self.direction in [2, 3]: # horizontal
if not join:
cr.move_to(x_from, y_from)
cr.line_to(x_to, y_to)
else:
if not join:
cr.move_to(y_from, x_from)
cr.line_to(y_to, x_to)
#-------------------------------------------------------------------------
#
# PedigreeView (Renamed to HtreePedigreeView by Pat Lefebre)
#
#-------------------------------------------------------------------------
class HtreePedigreeView(NavigationView):
"""
View for H-Tree pedigree tree.
Displays the ancestors of a selected individual.
"""
#settings in the config file / Changed 'interface.pedview-layout' from 0 to 1= compact view
CONFIGSETTINGS = (
('interface.pedview-tree-size', 5),
('interface.pedview-layout', 1),
('interface.pedview-show-images', True),
('interface.pedview-show-marriage', True),
('interface.pedview-show-tags', False),
('interface.pedview-tree-direction', 2),
('interface.pedview-show-unknown-people', True),
)
FLEUR_CURSOR = Gdk.Cursor.new_for_display(Gdk.Display.get_default(),
Gdk.CursorType.FLEUR)
def __init__(self, pdata, dbstate, uistate, nav_group=0):
NavigationView.__init__(self, _('H-tree Pedigree View'), pdata, dbstate, uistate,
PersonBookmarks, nav_group)
self.dbstate = dbstate
self.dbstate.connect('database-changed', self.change_db)
uistate.connect('nameformat-changed', self.person_rebuild)
self.format_helper = FormattingHelper(self.dbstate)
# Depth of tree.
self._depth = 1
# Variables for drag and scroll
self._last_x = 0
self._last_y = 0
self._in_move = False
self.key_active_changed = None
# GTK objects
self.scrolledwindow = None
self.table = None
self.additional_uis.append(self.additional_ui)
# Automatic resize
self.force_size = self._config.get('interface.pedview-tree-size')
# Nice tree
self.tree_style = self._config.get('interface.pedview-layout')
# Show photos of persons
self.show_images = self._config.get('interface.pedview-show-images')
# Hide marriage data by default
self.show_marriage_data = self._config.get(
'interface.pedview-show-marriage')
# Show person with tag color
self.show_tag_color = self._config.get('interface.pedview-show-tags')
# Tree draw direction
self.tree_direction = self._config.get('interface.pedview-tree-direction')
self.cb_change_scroll_direction(None, self.tree_direction < 2)
# Show on not unknown people.
# Default - not show, for mo fast display hight tree
self.show_unknown_people = self._config.get(
'interface.pedview-show-unknown-people')
def get_handle_from_gramps_id(self, gid):
"""
returns the handle of the specified object
"""
obj = self.dbstate.db.get_person_from_gramps_id(gid)
if obj:
return obj.get_handle()
else:
return None
def change_page(self):
"""Called when the page changes."""
NavigationView.change_page(self)
self.uistate.clear_filter_results()
if self.dirty:
self.rebuild_trees(self.get_active())
def get_stock(self):
"""
The category stock icon
"""
return 'gramps-pedigree'
def get_viewtype_stock(self):
"""Type of view in category
"""
return 'gramps-pedigree'
def build_widget(self):
"""
Builds the interface and returns a Gtk.Container type that
contains the interface. This containter will be inserted into
a Gtk.ScrolledWindow page.
"""
self.scrolledwindow = Gtk.ScrolledWindow(hadjustment=None,
vadjustment=None)
self.scrolledwindow.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
self.scrolledwindow.add_events(Gdk.EventMask.SCROLL_MASK)
self.scrolledwindow.connect("scroll-event", self.cb_bg_scroll_event)
event_box = Gtk.EventBox()
# Required for drag-scroll events and popup menu
event_box.add_events(Gdk.EventMask.BUTTON_PRESS_MASK
| Gdk.EventMask.BUTTON_RELEASE_MASK
| Gdk.EventMask.BUTTON1_MOTION_MASK)
# Signal begin drag-scroll
event_box.connect("button-press-event", self.cb_bg_button_press)
# Signal end drag-scroll and popup menu
event_box.connect("button-release-event", self.cb_bg_button_release)
#Signal for controll motion-notify when left mouse button pressed
event_box.connect("motion-notify-event", self.cb_bg_motion_notify_event)
self.scrolledwindow.add(event_box)
self.table = Gtk.Grid()
# force LTR layout of the tree, even though the text might be RTL!
# this way the horizontal scroll preferences will be correct always
if self.table.get_direction() == Gtk.TextDirection.RTL:
self.table.set_direction(Gtk.TextDirection.LTR)
self.table.set_halign(Gtk.Align.END)
event_box.add(self.table)
event_box.get_parent().set_shadow_type(Gtk.ShadowType.NONE)
self.table.set_row_spacing(1)
self.table.set_column_spacing(0)
return self.scrolledwindow
additional_ui = [ # Defines the UI string for UIManager
'''
<placeholder id="CommonGo">
<section>
<item>
<attribute name="action">win.Back</attribute>
<attribute name="label" translatable="yes">_Back</attribute>
</item>
<item>
<attribute name="action">win.Forward</attribute>
<attribute name="label" translatable="yes">_Forward</attribute>
</item>
</section>
<section>
<item>
<attribute name="action">win.HomePerson</attribute>
<attribute name="label" translatable="yes">_Home</attribute>
</item>
</section>
</placeholder>
''',
'''
<placeholder id='otheredit'>
<item>
<attribute name="action">win.FilterEdit</attribute>
<attribute name="label" translatable="yes">'''
'''Person Filter Editor</attribute>
</item>
</placeholder>
''',
'''
<section id="AddEditBook">
<item>
<attribute name="action">win.AddBook</attribute>
<attribute name="label" translatable="yes">_Add Bookmark</attribute>
</item>
<item>
<attribute name="action">win.EditBook</attribute>
<attribute name="label" translatable="no">%s...</attribute>
</item>
</section>
''' % _('Organize Bookmarks'), # Following are the Toolbar items
'''
<placeholder id='CommonNavigation'>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-previous</property>
<property name="action-name">win.Back</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the previous object in the history</property>
<property name="label" translatable="yes">_Back</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-next</property>
<property name="action-name">win.Forward</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the next object in the history</property>
<property name="label" translatable="yes">_Forward</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-home</property>
<property name="action-name">win.HomePerson</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the default person</property>
<property name="label" translatable="yes">_Home</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
</placeholder>
''']
def define_actions(self):
"""
Required define_actions function for PageView. Builds the action
group information required. We extend beyond the normal here,
since we want to have more than one action group for the PersonView.
Most PageViews really won't care about this.
Special action groups for Forward and Back are created to allow the
handling of navigation buttons. Forward and Back allow the user to
advance or retreat throughout the history, and we want to have these
be able to toggle these when you are at the end of the history or
at the beginning of the history.
"""
NavigationView.define_actions(self)
self._add_action('FilterEdit', self.cb_filter_editor)
self._add_action('PRIMARY-J', self.jump, '<PRIMARY>J')
self._add_action('F2', self.kb_goto_home, 'F2')
def cb_filter_editor(self, *obj):
"""
Display the person filter editor.
"""
try:
FilterEditor('Person', CUSTOM_FILTERS,
self.dbstate, self.uistate)
except WindowActiveError:
return
def build_tree(self):
"""
This is called by the parent class when the view becomes visible. Since
all handling of visibility is now in rebuild_trees, see that for more
information.
"""
try:
active = self.get_active()
if active:
self.rebuild_trees(active)
else:
self.rebuild_trees(None)
except AttributeError as msg:
RunDatabaseRepair(str(msg),
parent=self.uistate.window)
def _connect_db_signals(self):
"""
Connect database signals.
"""
self._add_db_signal('person-add', self.person_rebuild)
self._add_db_signal('person-update', self.person_rebuild)
self._add_db_signal('person-delete', self.person_rebuild)
self._add_db_signal('person-rebuild', self.person_rebuild_bm)
self._add_db_signal('family-update', self.person_rebuild)
self._add_db_signal('family-add', self.person_rebuild)
self._add_db_signal('family-delete', self.person_rebuild)
self._add_db_signal('family-rebuild', self.person_rebuild)
def change_db(self, db):
"""
Callback associated with DbState. Whenever the database
changes, this task is called. In this case, we rebuild the
columns, and connect signals to the connected database. Tree
is no need to store the database, since we will get the value
from self.state.db
"""
self._change_db(db)
if self.active:
self.bookmarks.redraw()
self.build_tree()
def navigation_type(self):
"""
Indicates the navigation type. Navigation type can be the string
name of any of the primary objects.
"""
return 'Person'
def can_configure(self):
"""
See :class:`~gui.views.pageview.PageView
:return: bool
"""
return True
def on_delete(self):
self._config.save()
NavigationView.on_delete(self)
def on_help_clicked(self, dummy):
""" Button: Display the relevant portion of Gramps manual"""
display_url(WIKI_PAGE)
def goto_handle(self, handle=None):
"""
Rebuild the tree with the given person handle as the root.
"""
self.dirty = True
if handle:
person = self.dbstate.db.get_person_from_handle(handle)
if person:
self.rebuild_trees(handle)
else:
self.rebuild_trees(None)
else:
self.rebuild_trees(None)
self.uistate.modify_statusbar(self.dbstate)
def person_rebuild_bm(self, dummy=None):
"""Large change to person database"""
self.person_rebuild(dummy)
if self.active:
self.bookmarks.redraw()
def person_rebuild(self, dummy=None):
"""Callback function for signals of change database."""
self.format_helper.clear_cache()
self.dirty = True
if self.active:
self.rebuild_trees(self.get_active())
def rebuild_trees(self, person_handle):
"""
Rebuild tree with root person_handle.
Called from many fuctions, when need full redraw tree.
"""
person = None
if person_handle:
person = self.dbstate.db.get_person_from_handle(person_handle)
self.dirty = False
# Increase self.force_size to 6 to allow 6 gen h-tree (by PL)
if self.tree_style == 1 and (
self.force_size > 6 or self.force_size == 0):
self.force_size = 6
############ Replaced all tuples with H-tree tuples and added 6 gen level (by Patsyblefebre) ########
# A position definition is a tuple of nodes.
# Each node consists of a tuple of:
# (person box rectangle, connection, marriage box rectangle)
# A rectangle is a tuple of the format (x, y, width, height)
# A connectcion is either a line or a tuple of two lines.
# A line is of the format (x, y, height). Lines have a width of 1.
if self.tree_style == 1:
if self.force_size == 2:
pos = (((13,14,4,4), None, None),
((13,6,4,4), None, None),
((13,22,4,4), None, None))
elif self.force_size == 3:
pos = (((13,14,4,4), None, None),
((13,6,4,4), None, None),
((13,22,4,4), None, None),
((5,6,4,4), None, None),
((21,6,4,4), None, None),
((5,22,4,4), None, None),
((21,22,4,4), None, None))
elif self.force_size == 4:
pos = (((13,14,4,4), None, None),
((13,6,4,4), None, None),
((13,22,4,4), None, None),
((5,6,4,4), None, None),
((21,6,4,4), None, None),
((5,22,4,4), None, None),
((21,22,4,4), None, None),
((5,2,4,4), None, None),
((5,10,4,4), None, None),
((21,2,4,4), None, None),
((21,10,4,4), None, None),
((5,18,4,4), None, None),
((5,26,4,4), None, None),
((21,18,4,4), None, None),
((21,26,4,4), None, None))
elif self.force_size == 5:
pos = (((13,14,4,4), None, None),
((13,6,4,4), None, None),
((13,22,4,4), None, None),
((5,6,4,4), None, None),
((21,6,4,4), None, None),
((5,22,4,4), None, None),
((21,22,4,4), None, None),
((5,2,4,4), None, None),
((5,10,4,4), None, None),
((21,2,4,4), None, None),
((21,10,4,4), None, None),
((5,18,4,4), None, None),
((5,26,4,4), None, None),
((21,18,4,4), None, None),
((21,26,4,4), None, None),
((0,2,4,4), None, None),
((10,2,4,4), None, None),
((0,10,4,4), None, None),
((10,10,4,4), None, None),
((16,2,4,4), None, None),
((26,2,4,4), None, None),
((16,10,4,4), None, None),
((26,10,4,4), None, None),
((0,18,4,4), None, None),
((10,18,4,4), None, None),
((0,26,4,4), None, None),
((10,26,4,4), None, None),
((16,18,4,4), None, None),
((26,18,4,4), None, None),
((16,26,4,4), None, None),
((26,26,4,4), None, None))
elif self.force_size == 6:
pos = (((13,14,4,4), None, None),
((13,6,4,4), None, None),
((13,22,4,4), None, None),
((5,6,4,4), None, None),
((21,6,4,4), None, None),
((5,22,4,4), None, None),
((21,22,4,4), None, None),
((5,2,4,4), None, None),
((5,10,4,4), None, None),
((21,2,4,4), None, None),
((21,10,4,4), None, None),
((5,18,4,4), None, None),
((5,26,4,4), None, None),
((21,18,4,4), None, None),
((21,26,4,4), None, None),
((0,2,4,4), None, None),
((10,2,4,4), None, None),
((0,10,4,4), None, None),
((10,10,4,4), None, None),
((16,2,4,4), None, None),
((26,2,4,4), None, None),
((16,10,4,4), None, None),
((26,10,4,4), None, None),
((0,18,4,4), None, None),
((10,18,4,4), None, None),
((0,26,4,4), None, None),
((10,26,4,4), None, None),
((16,18,4,4), None, None),
((26,18,4,4), None, None),
((16,26,4,4), None, None),
((26,26,4,4), None, None),
((0,0,4,1), None, None),
((0,4,4,1), None, None),
((10,0,4,1), None, None),
((10,4,4,1), None, None),
((0,8,4,1), None, None),
((0,12,4,1), None, None),
((10,8,4,1), None, None),
((10,12,4,1), None, None),
((16,0,4,1), None, None),
((16,4,4,1), None, None),
((26,0,4,1), None, None),
((26,4,4,1), None, None),
((16,8,4,1), None, None),
((16,12,4,1), None, None),
((26,8,4,1), None, None),
((26,12,4,1), None, None),
((0,16,4,1), None, None),
((0,20,4,1), None, None),
((10,16,4,1), None, None),
((10,20,4,1), None, None),
((0,24,4,1), None, None),
((0,28,4,1), None, None),
((10,24,4,1), None, None),
((10,28,4,1), None, None),
((16,16,4,1), None, None),
((16,20,4,1), None, None),
((26,16,4,1), None, None),
((26,20,4,1), None, None),
((16,24,4,1), None, None),
((16,28,4,1), None, None),
((26,24,4,1), None, None),
((26,28,4,1), None, None))
else:
pos = None
# Build ancestor tree only one for all different sizes
self._depth = 1
lst = [None] * (2**self.force_size)
self.find_tree(person, 0, 1, lst)
# Purge current table content
for child in self.table.get_children():
child.destroy()
##self.table = Gtk.Grid()
if person:
self.rebuild(self.table, pos, lst, self.force_size)
def rebuild(self, table_widget, positions, lst, size):
"""
Function called from rebuild_trees.
For table_widget (Gtk.Grid) place list of person, use positions array.
For style C position calculated, for others style use static positions.
All display options process in this function.
"""
# Calculate maximum table size
xmax = 0
ymax = 0
if self.tree_style == 0:
xmax = 2 * size
ymax = 2 ** size
elif self.tree_style == 1:
xmax = 2 * size + 2
# Add 71 to ymax list of numbers to allow 6 gen h-tree (by PL)
ymax = [0, 10, 14, 16, 32, 71][size - 1]
elif self.tree_style == 2:
# For style C change tree depth if they real size less then max.
if self.show_unknown_people:
self._depth += 1
if size > self._depth:
size = self._depth
xmax = 2 * size
ymax = 2 ** size * 2
pbw = None
for i in range(0, 2 ** size - 1):
####################################################################
# Table placement for person data
####################################################################
if self.tree_style in [0, 2]:
# Dynamic position person in tree
width = _width = 1
height = _height = 3
level = int(math.log(i+1, 2))
offset = i + 1 - (2**level)
if self.tree_style == 0:
_delta = (2**size) // (2**level)
else:
_delta = (2**size) // (2**level) * 2
x_pos = (1 + _width) * level + 1
y_pos = _delta // 2 + offset * _delta - 1
if self.tree_style == 0 and level == size - 1:
y_pos = _delta // 2 + offset * _delta
height = _height = 1
else:
try:
x_pos = positions[i][0][0]+1
y_pos = positions[i][0][1]+1
width = positions[i][0][2]
height = positions[i][0][3]
except IndexError: # no position for this person defined
continue
last_pbw = pbw
pbw = None
if not lst[i] and (
(self.tree_style in [0, 2] and self.show_unknown_people and
lst[((i+1) // 2) - 1]) or self.tree_style == 1):
#
# No person -> show empty box
#
pbw = PersonBoxWidgetCairo(self, self.format_helper,
self.dbstate, None, False, 0, None,
tags=self.show_tag_color)
if i > 0 and lst[((i+1) // 2) - 1]:
fam_h = None
fam = lst[((i+1) // 2) - 1][2]
if fam:
fam_h = fam.get_handle()
if not self.dbstate.db.readonly:
pbw.connect("button-press-event",
self.cb_missing_parent_button_press,
lst[((i+1) // 2) - 1][0].get_handle(), fam_h)
pbw.force_mouse_over = True
elif lst[i]:
#
# Person exists -> populate box
#
image = False
if self.show_images and height > 1 and (
i < ((2**size-1) // 2) or self.tree_style == 2):
image = True
pbw = PersonBoxWidgetCairo(self, self.format_helper,
self.dbstate, lst[i][0], lst[i][3], height, image,
tags=self.show_tag_color)
lst[i][4] = pbw
if height < 7:
pbw.set_tooltip_text(self.format_helper.format_person(
lst[i][0], 11))
fam_h = None
if lst[i][2]:
fam_h = lst[i][2].get_handle()
pbw.connect("button-press-event",
self.cb_person_button_press,
lst[i][0].get_handle(), fam_h)
if pbw:
self.attach_widget(table_widget, pbw, xmax,
x_pos, x_pos+width, y_pos, y_pos+height)
####################################################################
# Connection lines
####################################################################
if self.tree_style == 1 and (
positions[i][1] and len(positions[i][1]) == 2):
# separate boxes for father and mother
x_pos = positions[i][1][0][0]+1
y_pos = positions[i][1][0][1]+1
width = 1
height = positions[i][1][0][2]
rela = False
if lst[2*i+1]: # Father
rela = lst[2*i+1][1]
line = LineWidget2(1, rela, self.tree_direction)
if lst[i] and lst[i][2]:
# Required for popup menu
line.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
line.connect("button-press-event",
self.cb_relation_button_press,
lst[i][2].get_handle())
self.attach_widget(table_widget, line, xmax,
x_pos, x_pos+width, y_pos, y_pos+height)
x_pos = positions[i][1][1][0]+1
y_pos = positions[i][1][1][1]+1
rela = False
if lst[2*i+2]: # Mother
rela = lst[2*i+2][1]
line = LineWidget2(0, rela, self.tree_direction)
if lst[i] and lst[i][2]:
# Required for popup menu
line.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
line.connect("button-press-event",
self.cb_relation_button_press,
lst[i][2].get_handle())
self.attach_widget(table_widget, line, xmax,
x_pos, x_pos+width, y_pos, y_pos+height)
elif self.tree_style in [0, 2] and lst[((i+1) // 2) - 1]:
# combined for father and mother
x_pos = (1 + _width) * level
y_pos = offset * _delta - (_delta // 2) - 1
width = 1
height = _delta + 3
if self.tree_style == 0 and level == size - 1:
height -= 2
y_pos += 1
if i > 0 and i % 2 == 0 and (pbw or last_pbw):
frela = mrela = None
if lst[i]:
frela = lst[i][1]
if lst[i - 1]:
mrela = lst[i-1][1]
line = LineWidget(lst[((i+1) // 2) - 1][4],
last_pbw, frela,
pbw, mrela,
self.tree_direction)
if lst[((i+1) // 2) - 1] and lst[((i+1) // 2) - 1][2]:
# Required for popup menu
line.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
line.connect("button-press-event",
self.cb_relation_button_press,
lst[((i+1) // 2) - 1][2].get_handle())
# Required for tooltip and mouse-over
line.add_events(Gdk.EventMask.ENTER_NOTIFY_MASK)
# Required for tooltip and mouse-over
line.add_events(Gdk.EventMask.LEAVE_NOTIFY_MASK)
line.set_tooltip_text(
self.format_helper.format_relation(
lst[((i+1) // 2) - 1][2], 11))
self.attach_widget(table_widget, line, xmax,
x_pos, x_pos+width, y_pos, y_pos+height)
####################################################################
# Show marriage data
####################################################################
if self.show_marriage_data and (
self.tree_style == 1 and positions[i][2] or
(self.tree_style in [0, 2] and level+1 < size)):
if lst[i] and lst[i][2]:
text = self.format_helper.format_relation(lst[i][2], 1, True)
else:
text = " "
label = Gtk.Label(label=text)
label.set_justify(Gtk.Justification.LEFT)
label.set_use_markup(True)
label.set_line_wrap(True)
label.set_halign(Gtk.Align.START)
if self.tree_style in [0, 2]:
x_pos = (1 + _width) * (level + 1) + 1
y_pos = _delta // 2 + offset * _delta -1 + _height // 2
width = 1
height = 1
if self.tree_style == 0 and level < 2 and size > 4:
# Boxes can be bigger for lowest levels on larger trees.
y_pos -= 2
height += 4
else:
x_pos = positions[i][2][0]+1
y_pos = positions[i][2][1]+1
width = positions[i][2][2]
height = positions[i][2][3]
self.attach_widget(table_widget, label, xmax,
x_pos, x_pos+width, y_pos, y_pos+height)
# add dummy widgets into the corners of the table
# to allow the pedigree to be centered
## label = Gtk.Label(label="")
## table_widget.attach(label, 0, 1, 0, 1,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0)
## label = Gtk.Label(label="")
## if self.tree_direction in [2, 3]:
## table_widget.attach(label, xmax, xmax+1, ymax, ymax+1,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0)
## else:
## table_widget.attach(label, ymax, ymax+1, xmax, xmax+1,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0)
debug = False
if debug:
used_cells = {}
xmax = 0
ymax = 0
# iterate table to see which cells are used.
for child in table_widget.get_children():
left = table_widget.child_get_property(child, "left-attach")
right = table_widget.child_get_property(child, "right-attach")
top = table_widget.child_get_property(child, "top-attach")
bottom = table_widget.child_get_property(child, "bottom-attach")
for x_pos in range(left, right):
for y_pos in range(top, bottom):
try:
used_cells[x_pos][y_pos] = True
except KeyError:
used_cells[x_pos] = {}
used_cells[x_pos][y_pos] = True
if y_pos > ymax:
ymax = y_pos
if x_pos > xmax:
xmax = x_pos
for x_pos in range(0, xmax+1):
for y_pos in range(0, ymax+1):
try:
tmp = used_cells[x_pos][y_pos]
except KeyError:
# fill unused cells
label = Gtk.Label(label="%d,%d"%(x_pos, y_pos))
frame = Gtk.ScrolledWindow(hadjustment=None,
vadjustment=None)
frame.set_shadow_type(Gtk.ShadowType.NONE)
frame.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.NEVER)
frame.add(label)
table_widget.attach(frame, x_pos, y_pos, 1, 1)
table_widget.show_all()
# Setup scrollbars for view root person
window = table_widget.get_parent().get_parent().get_parent()
hadjustment = window.get_hadjustment()
vadjustment = window.get_vadjustment()
if self.tree_direction == 2:
self.update_scrollbar_positions(hadjustment, hadjustment.get_lower())
self.update_scrollbar_positions(vadjustment,
(vadjustment.get_upper() - vadjustment.get_page_size()) / 2)
elif self.tree_direction == 0:
self.update_scrollbar_positions(hadjustment,
(hadjustment.get_upper() - hadjustment.get_page_size()) / 2)
self.update_scrollbar_positions(vadjustment,
vadjustment.get_upper() - vadjustment.get_page_size())
elif self.tree_direction == 1:
self.update_scrollbar_positions(hadjustment,
(hadjustment.get_upper() - hadjustment.get_page_size()) / 2)
self.update_scrollbar_positions(vadjustment, vadjustment.get_lower())
elif self.tree_direction == 3:
self.update_scrollbar_positions(hadjustment,
hadjustment.get_upper() - hadjustment.get_page_size())
self.update_scrollbar_positions(vadjustment,
(vadjustment.get_upper() - vadjustment.get_page_size()) / 2)
# Setup mouse wheel scroll direction for style C,
# depending of tree direction
if self.tree_direction in [0, 1]:
self.cb_change_scroll_direction(None, True)
elif self.tree_direction in [2, 3]:
self.cb_change_scroll_direction(None, False)
def attach_widget(self, table, widget, xmax, right, left, top, bottom):
"""
Attach a widget to the table.
"""
if self.tree_direction == 0: # Vertical (top to bottom)
table.attach(widget, top, right, bottom-top, left-right)
elif self.tree_direction == 1: # Vertical (bottom to top)
table.attach(widget, top, xmax - left + 1, bottom-top, left - right)
elif self.tree_direction == 2: # Horizontal (left to right)
table.attach(widget, right, top, left-right, bottom-top)
elif self.tree_direction == 3: # Horizontal (right to left)
table.attach(widget, xmax - left + 1, top, left - right, bottom-top)
def cb_home(self, menuitem):
"""Change root person to default person for database."""
defperson = self.dbstate.db.get_default_person()
if defperson:
self.change_active(defperson.get_handle())
def cb_edit_person(self, obj, person_handle):
"""
Open edit person window for person_handle.
Called after double click or from submenu.
"""
person = self.dbstate.db.get_person_from_handle(person_handle)
if person:
try:
EditPerson(self.dbstate, self.uistate, [], person)
except WindowActiveError:
return True
return True
return False
def cb_edit_family(self, obj, family_handle):
"""
Open edit person family for family_handle.
Called after double click or from submenu.
"""
family = self.dbstate.db.get_family_from_handle(family_handle)
if family:
try:
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
return True
return True
return False
def cb_add_parents(self, obj, person_handle, family_handle):
"""Edit not full family."""
if family_handle: # one parent already exists -> Edit current family
family = self.dbstate.db.get_family_from_handle(family_handle)
else: # no parents -> create new family
family = Family()
childref = ChildRef()
childref.set_reference_handle(person_handle)
family.add_child_ref(childref)
try:
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
return
def cb_copy_person_to_clipboard(self, obj, person_handle):
"""
Renders the person data into some lines of text and
puts that into the clipboard
"""
person = self.dbstate.db.get_person_from_handle(person_handle)
if person:
clipboard = Gtk.Clipboard.get_for_display(Gdk.Display.get_default(),
Gdk.SELECTION_CLIPBOARD)
clipboard.set_text(self.format_helper.format_person(person, 11), -1)
return True
return False
def cb_copy_family_to_clipboard(self, obj, family_handle):
"""
Renders the family data into some lines of text and
puts that into the clipboard
"""
family = self.dbstate.db.get_family_from_handle(family_handle)
if family:
clipboard = Gtk.Clipboard.get_for_display(Gdk.Display.get_default(),
Gdk.SELECTION_CLIPBOARD)
clipboard.set_text(self.format_helper.format_relation(family, 11), -1)
return True
return False
def cb_on_show_option_menu(self, obj, event, data=None):
"""Right click option menu."""
self.menu = Gtk.Menu()
self.menu.set_reserve_toggle_size(False)
self.add_nav_portion_to_menu(self.menu)
self.add_settings_to_menu(self.menu)
self.menu.popup(None, None, None, None, 0, event.time)
return True
def cb_bg_button_press(self, widget, event):
"""
Enter in scroll mode when mouse button pressed in background
or call option menu.
"""
if event.button == 1 and event.type == Gdk.EventType.BUTTON_PRESS:
widget.get_window().set_cursor(self.FLEUR_CURSOR)
self._last_x = event.x
self._last_y = event.y
self._in_move = True
return True
elif is_right_click(event):
self.cb_on_show_option_menu(widget, event)
return True
return False
def cb_bg_button_release(self, widget, event):
"""Exit from scroll mode when button release."""
if event.button == 1 and event.type == Gdk.EventType.BUTTON_RELEASE:
self.cb_bg_motion_notify_event(widget, event)
widget.get_window().set_cursor(None)
self._in_move = False
return True
return False
def cb_bg_motion_notify_event(self, widget, event):
"""Function for motion notify events for drag and scroll mode."""
if self._in_move and (event.type == Gdk.EventType.MOTION_NOTIFY or
event.type == Gdk.EventType.BUTTON_RELEASE):
window = widget.get_parent()
hadjustment = window.get_hadjustment()
vadjustment = window.get_vadjustment()
self.update_scrollbar_positions(vadjustment,
vadjustment.get_value() - (event.y - self._last_y))
self.update_scrollbar_positions(hadjustment,
hadjustment.get_value() - (event.x - self._last_x))
return True
return False
def update_scrollbar_positions(self, adjustment, value):
"""Controle value then try setup in scrollbar."""
if value > (adjustment.get_upper() - adjustment.get_page_size()):
adjustment.set_value(adjustment.get_upper() - adjustment.get_page_size())
else:
adjustment.set_value(value)
return True
def cb_bg_scroll_event(self, widget, event):
"""
Function change scroll direction to horizontally
if variable self.scroll_direction setup.
"""
if self.scroll_direction and event.type == Gdk.EventType.SCROLL:
if event.direction == Gdk.ScrollDirection.UP:
event.direction = Gdk.ScrollDirection.LEFT
elif event.direction == Gdk.ScrollDirection.DOWN:
event.direction = Gdk.ScrollDirection.RIGHT
return False
def cb_person_button_press(self, obj, event, person_handle, family_handle):
"""
Call edit person function for mouse left button double click on person
or submenu for person for mouse right click.
And setup plug for button press on person widget.
"""
if is_right_click(event):
self.cb_build_full_nav_menu(obj, event,
person_handle, family_handle)
return True
elif event.button == 1 and event.type == Gdk.EventType._2BUTTON_PRESS:
self.cb_edit_person(obj, person_handle)
return True
return True
def cb_relation_button_press(self, obj, event, family_handle):
"""
Call edit family function for mouse left button double click
on family line or call full submenu for mouse right click.
And setup plug for button press on family line.
"""
if is_right_click(event):
self.cb_build_relation_nav_menu(obj, event, family_handle)
return True
elif event.button == 1 and event.type == Gdk.EventType._2BUTTON_PRESS:
self.cb_edit_family(obj, family_handle)
return True
return True
def cb_missing_parent_button_press(self, obj, event,
person_handle, family_handle):
"""
Call function for not full family for mouse left button double click
on missing persons or call submenu for mouse right click.
"""
if event.button == 1 and event.type == Gdk.EventType._2BUTTON_PRESS:
self.cb_add_parents(obj, person_handle, family_handle)
return True
elif is_right_click(event):
self.cb_build_missing_parent_nav_menu(obj, event, person_handle,
family_handle)
return True
return False
def cb_on_show_child_menu(self, obj):
"""User clicked button to move to child of active person"""
person = self.dbstate.db.get_person_from_handle(self.get_active())
if person:
# Build and display the menu attached to the left pointing arrow
# button. The menu consists of the children of the current root
# person of the tree. Attach a child to each menu item.
childlist = find_children(self.dbstate.db, person)
if len(childlist) == 1:
child = self.dbstate.db.get_person_from_handle(childlist[0])
if child:
self.change_active(childlist[0])
elif len(childlist) > 1:
self.my_menu = Gtk.Menu()
self.my_menu.set_reserve_toggle_size(False)
for child_handle in childlist:
child = self.dbstate.db.get_person_from_handle(child_handle)
cname = escape(name_displayer.display(child))
if find_children(self.dbstate.db, child):
label = Gtk.Label(label='<b><i>%s</i></b>' % cname)
else:
label = Gtk.Label(label=cname)
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
menuitem = Gtk.MenuItem()
menuitem.add(label)
self.my_menu.append(menuitem)
menuitem.connect("activate", self.cb_childmenu_changed,
child_handle)
menuitem.show()
self.my_menu.popup(None, None, None, None, 0, 0)
return 1
return 0
def cb_childmenu_changed(self, obj, person_handle):
"""
Callback for the pulldown menu selection, changing to the person
attached with menu item.
"""
self.change_active(person_handle)
return True
def cb_change_scroll_direction(self, menuitem, data):
"""Change scroll_direction option."""
if data:
self.scroll_direction = True
else:
self.scroll_direction = False
def kb_goto_home(self, *obj):
"""Goto home person from keyboard."""
self.cb_home(None)
def find_tree(self, person, index, depth, lst, val=0):
"""Recursively build a list of ancestors"""
if depth > self.force_size or not person:
return
if self._depth < depth:
self._depth = depth
try:
alive = probably_alive(person, self.dbstate.db)
except RuntimeError:
ErrorDialog(_('Relationship loop detected'),
_('A person was found to be his/her own ancestor.'),
parent=self.uistate.window)
alive = False
lst[index] = [person, val, None, alive, None]
parent_families = person.get_parent_family_handle_list()
if parent_families:
family_handle = parent_families[0]
else:
return
mrel = True
frel = True
family = self.dbstate.db.get_family_from_handle(family_handle)
if family:
for child_ref in family.get_child_ref_list():
if child_ref.ref == person.handle:
mrel = child_ref.mrel == ChildRefType.BIRTH
frel = child_ref.frel == ChildRefType.BIRTH
lst[index] = [person, val, family, alive, None]
father_handle = family.get_father_handle()
if father_handle:
father = self.dbstate.db.get_person_from_handle(
father_handle)
self.find_tree(father, (2*index)+1, depth+1, lst, frel)
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.dbstate.db.get_person_from_handle(
mother_handle)
self.find_tree(mother, (2*index)+2, depth+1, lst, mrel)
def add_nav_portion_to_menu(self, menu):
"""
This function adds a common history-navigation portion
to the context menu. Used by both build_nav_menu() and
build_full_nav_menu() methods.
"""
hobj = self.uistate.get_history(self.navigation_type(),
self.navigation_group())
home_sensitivity = True
if not self.dbstate.db.get_default_person():
home_sensitivity = False
# bug 4884: need to translate the home label
entries = [
(_("Pre_vious"), self.back_clicked, not hobj.at_front()),
(_("_Next"), self.fwd_clicked, not hobj.at_end()),
(_("_Home"), self.cb_home, home_sensitivity),
]
for label, callback, sensitivity in entries:
item = Gtk.MenuItem.new_with_mnemonic(label)
item.set_sensitive(sensitivity)
if callback:
item.connect("activate", callback)
item.show()
menu.append(item)
def add_settings_to_menu(self, menu):
"""
Add frequently used settings to the menu. Most settings will be set
from the configuration dialog.
"""
# Separator.
item = Gtk.SeparatorMenuItem()
item.show()
menu.append(item)
# Mouse scroll direction setting.
item = Gtk.MenuItem(label=_("Mouse scroll direction"))
item.set_submenu(Gtk.Menu())
scroll_direction_menu = item.get_submenu()
entry = Gtk.RadioMenuItem(label=_("Top <-> Bottom"))
entry.connect("activate", self.cb_change_scroll_direction, False)
if self.scroll_direction == False:
entry.set_active(True)
entry.show()
scroll_direction_menu.append(entry)
entry = Gtk.RadioMenuItem(label=_("Left <-> Right"))
entry.connect("activate", self.cb_change_scroll_direction, True)
if self.scroll_direction == True:
entry.set_active(True)
entry.show()
scroll_direction_menu.append(entry)
scroll_direction_menu.show()
item.show()
menu.append(item)
# Separator.
item = Gtk.SeparatorMenuItem()
item.show()
menu.append(item)
# Help menu entry
menu.append(item)
item = Gtk.MenuItem(label=_("About H-Tree"))
item.connect("activate", self.on_help_clicked)
item.show()
menu.append(item)
def cb_build_missing_parent_nav_menu(self, obj, event,
person_handle, family_handle):
"""Builds the menu for a missing parent."""
self.menu = Gtk.Menu()
self.menu.set_reserve_toggle_size(False)
add_item = Gtk.MenuItem.new_with_mnemonic(_('_Add'))
add_item.connect("activate", self.cb_add_parents, person_handle,
family_handle)
add_item.show()
self.menu.append(add_item)
# Add a separator line
add_item = Gtk.SeparatorMenuItem()
add_item.show()
self.menu.append(add_item)
# Add history-based navigation
self.add_nav_portion_to_menu(self.menu)
self.add_settings_to_menu(self.menu)
self.menu.popup(None, None, None, None, 0, event.time)
return 1
def cb_build_full_nav_menu(self, obj, event, person_handle, family_handle):
"""
Builds the full menu (including Siblings, Spouses, Children,
and Parents) with navigation.
"""
self.menu = Gtk.Menu()
self.menu.set_reserve_toggle_size(False)
person = self.dbstate.db.get_person_from_handle(person_handle)
if not person:
return 0
go_item = Gtk.MenuItem(label=name_displayer.display(person))
go_item.connect("activate", self.cb_childmenu_changed, person_handle)
go_item.show()
self.menu.append(go_item)
edit_item = Gtk.MenuItem.new_with_mnemonic(_('_Edit'))
edit_item.connect("activate", self.cb_edit_person, person_handle)
edit_item.show()
self.menu.append(edit_item)
clipboard_item = Gtk.MenuItem.new_with_mnemonic(_('_Copy'))
clipboard_item.connect("activate", self.cb_copy_person_to_clipboard,
person_handle)
clipboard_item.show()
self.menu.append(clipboard_item)
# collect all spouses, parents and children
linked_persons = []
# Go over spouses and build their menu
item = Gtk.MenuItem(label=_("Spouses"))
fam_list = person.get_family_handle_list()
no_spouses = 1
for fam_id in fam_list:
family = self.dbstate.db.get_family_from_handle(fam_id)
if family.get_father_handle() == person.get_handle():
sp_id = family.get_mother_handle()
else:
sp_id = family.get_father_handle()
spouse = None
if sp_id:
spouse = self.dbstate.db.get_person_from_handle(sp_id)
if not spouse:
continue
if no_spouses:
no_spouses = 0
item.set_submenu(Gtk.Menu())
sp_menu = item.get_submenu()
sp_menu.set_reserve_toggle_size(False)
sp_item = Gtk.MenuItem(label=name_displayer.display(spouse))
linked_persons.append(sp_id)
sp_item.connect("activate", self.cb_childmenu_changed, sp_id)
sp_item.show()
sp_menu.append(sp_item)
if no_spouses:
item.set_sensitive(0)
item.show()
self.menu.append(item)
# Go over siblings and build their menu
item = Gtk.MenuItem(label=_("Siblings"))
pfam_list = person.get_parent_family_handle_list()
no_siblings = 1
for pfam in pfam_list:
fam = self.dbstate.db.get_family_from_handle(pfam)
sib_list = fam.get_child_ref_list()
for sib_ref in sib_list:
sib_id = sib_ref.ref
if sib_id == person.get_handle():
continue
sib = self.dbstate.db.get_person_from_handle(sib_id)
if not sib:
continue
if no_siblings:
no_siblings = 0
item.set_submenu(Gtk.Menu())
sib_menu = item.get_submenu()
sib_menu.set_reserve_toggle_size(False)
if find_children(self.dbstate.db, sib):
label = Gtk.Label(label='<b><i>%s</i></b>'
% escape(name_displayer.display(sib)))
else:
label = Gtk.Label(label=escape(name_displayer.display(sib)))
sib_item = Gtk.MenuItem()
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
sib_item.add(label)
linked_persons.append(sib_id)
sib_item.connect("activate", self.cb_childmenu_changed, sib_id)
sib_item.show()
sib_menu.append(sib_item)
if no_siblings:
item.set_sensitive(0)
item.show()
self.menu.append(item)
# Go over children and build their menu
item = Gtk.MenuItem(label=_("Children"))
no_children = 1
childlist = find_children(self.dbstate.db, person)
for child_handle in childlist:
child = self.dbstate.db.get_person_from_handle(child_handle)
if not child:
continue
if no_children:
no_children = 0
item.set_submenu(Gtk.Menu())
child_menu = item.get_submenu()
child_menu.set_reserve_toggle_size(False)
if find_children(self.dbstate.db, child):
label = Gtk.Label(label='<b><i>%s</i></b>'
% escape(name_displayer.display(child)))
else:
label = Gtk.Label(label=escape(name_displayer.display(child)))
child_item = Gtk.MenuItem()
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
child_item.add(label)
linked_persons.append(child_handle)
child_item.connect("activate", self.cb_childmenu_changed,
child_handle)
child_item.show()
child_menu.append(child_item)
if no_children:
item.set_sensitive(0)
item.show()
self.menu.append(item)
# Go over parents and build their menu
item = Gtk.MenuItem(label=_("Parents"))
no_parents = 1
par_list = find_parents(self.dbstate.db, person)
for par_id in par_list:
par = None
if par_id:
par = self.dbstate.db.get_person_from_handle(par_id)
if not par:
continue
if no_parents:
no_parents = 0
item.set_submenu(Gtk.Menu())
par_menu = item.get_submenu()
par_menu.set_reserve_toggle_size(False)
if find_parents(self.dbstate.db, par):
label = Gtk.Label(label='<b><i>%s</i></b>'
% escape(name_displayer.display(par)))
else:
label = Gtk.Label(label=escape(name_displayer.display(par)))
par_item = Gtk.MenuItem()
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
par_item.add(label)
linked_persons.append(par_id)
par_item.connect("activate", self.cb_childmenu_changed, par_id)
par_item.show()
par_menu.append(par_item)
if no_parents:
if self.tree_style == 2 and not self.show_unknown_people:
item.set_submenu(Gtk.Menu())
par_menu = item.get_submenu()
par_menu.set_reserve_toggle_size(False)
par_item = Gtk.MenuItem(label=_("Add New Parents..."))
par_item.connect("activate", self.cb_add_parents, person_handle,
family_handle)
par_item.show()
par_menu.append(par_item)
else:
item.set_sensitive(0)
item.show()
self.menu.append(item)
# Go over parents and build their menu
item = Gtk.MenuItem(label=_("Related"))
no_related = 1
for p_id in find_witnessed_people(self.dbstate.db, person):
#if p_id in linked_persons:
# continue # skip already listed family members
per = self.dbstate.db.get_person_from_handle(p_id)
if not per:
continue
if no_related:
no_related = 0
item.set_submenu(Gtk.Menu())
per_menu = item.get_submenu()
per_menu.set_reserve_toggle_size(False)
label = Gtk.Label(label=escape(name_displayer.display(per)))
per_item = Gtk.MenuItem()
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
per_item.add(label)
per_item.connect("activate", self.cb_childmenu_changed, p_id)
per_item.show()
per_menu.append(per_item)
if no_related:
item.set_sensitive(0)
item.show()
self.menu.append(item)
# Add separator line
item = Gtk.SeparatorMenuItem()
item.show()
self.menu.append(item)
# Add history-based navigation
self.add_nav_portion_to_menu(self.menu)
self.add_settings_to_menu(self.menu)
self.menu.popup(None, None, None, None, 0, event.time)
return 1
def cb_build_relation_nav_menu(self, obj, event, family_handle):
"""Builds the menu for a parents-child relation line."""
self.menu = Gtk.Menu()
self.menu.set_reserve_toggle_size(False)
family = self.dbstate.db.get_family_from_handle(family_handle)
if not family:
return 0
edit_item = Gtk.MenuItem.new_with_mnemonic(_('_Edit'))
edit_item.connect("activate", self.cb_edit_family, family_handle)
edit_item.show()
self.menu.append(edit_item)
clipboard_item = Gtk.MenuItem.new_with_mnemonic(_('_Copy'))
clipboard_item.connect("activate", self.cb_copy_family_to_clipboard,
family_handle)
clipboard_item.show()
self.menu.append(clipboard_item)
# Add separator
item = Gtk.SeparatorMenuItem()
item.show()
self.menu.append(item)
# Add history-based navigation
self.add_nav_portion_to_menu(self.menu)
self.add_settings_to_menu(self.menu)
self.menu.popup(None, None, None, None, 0, event.time)
return 1
def cb_update_show_tags(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the tags setting.
"""
if entry == 'True':
self.show_tag_color = True
else:
self.show_tag_color = False
self.rebuild_trees(self.get_active())
def cb_update_show_images(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the images setting.
"""
if entry == 'True':
self.show_images = True
else:
self.show_images = False
self.rebuild_trees(self.get_active())
def cb_update_show_marriage(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the marriage data setting.
"""
if entry == 'True':
self.show_marriage_data = True
else:
self.show_marriage_data = False
self.rebuild_trees(self.get_active())
def cb_update_show_unknown_people(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the unknown people setting.
"""
if entry == 'True':
self.show_unknown_people = True
else:
self.show_unknown_people = False
self.rebuild_trees(self.get_active())
def cb_update_layout(self, obj, constant):
"""
Called when the configuration menu changes the layout.
"""
entry = obj.get_active()
self._config.set(constant, entry)
self.tree_style = int(entry)
adj = self.config_size_slider.get_adjustment()
if entry == 1: # Limit tree size to 5 for the compact style
adj.set_upper(5)
if self.force_size > 5:
self.force_size = 5
adj.set_value(5)
else:
adj.set_upper(9)
adj.emit("changed")
self.rebuild_trees(self.get_active())
def cb_update_tree_direction(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the tree direction.
"""
self.tree_direction = int(entry)
self.rebuild_trees(self.get_active())
def cb_update_tree_size(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the tree size.
"""
self.force_size = int(entry)
self.rebuild_trees(self.get_active())
def config_connect(self):
"""
Overwriten from :class:`~gui.views.pageview.PageView method
This method will be called after the ini file is initialized,
use it to monitor changes in the ini file
"""
self._config.connect('interface.pedview-show-images',
self.cb_update_show_images)
self._config.connect('interface.pedview-show-marriage',
self.cb_update_show_marriage)
self._config.connect('interface.pedview-show-tags',
self.cb_update_show_tags)
self._config.connect('interface.pedview-show-unknown-people',
self.cb_update_show_unknown_people)
self._config.connect('interface.pedview-tree-direction',
self.cb_update_tree_direction)
self._config.connect('interface.pedview-tree-size',
self.cb_update_tree_size)
def _get_configure_page_funcs(self):
"""
Return a list of functions that create gtk elements to use in the
notebook pages of the Configure dialog
:return: list of functions
"""
return [self.config_panel]
def config_panel(self, configdialog):
"""
Function that builds the widget in the configuration dialog
"""
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
configdialog.add_checkbox(grid,
_('Show images'),
0, 'interface.pedview-show-images')
#configdialog.add_checkbox(grid,
# _('Show marriage data'),
# 1, 'interface.pedview-show-marriage')
#configdialog.add_checkbox(grid,
# _('Show unknown people'),
# 2, 'interface.pedview-show-unknown-people')
configdialog.add_checkbox(grid,
_('Show tags'),
1, 'interface.pedview-show-tags')
#configdialog.add_combo(grid,
# _('Tree style'),
# 4, 'interface.pedview-layout',
# ((0, _('Standard')),
# (1, _('Compact')),
# (2, _('Expanded'))),
# callback=self.cb_update_layout)
#configdialog.add_combo(grid,
# _('Tree direction'),
# 5, 'interface.pedview-tree-direction',
# ((0, _('Vertical (↓)')),
# (1, _('Vertical (↑)')),
# (2, _('Horizontal (→)')),
# (3, _('Horizontal (←)'))))
self.config_size_slider = configdialog.add_slider(grid,
_('Tree size'),
2, 'interface.pedview-tree-size',
(2, 6), width=10)
return _('Layout'), grid
|
sam-m888/addons-source
|
HtreePedigreeView/HtreePedigreeView.py
|
Python
|
gpl-2.0
| 83,738
|
[
"FLEUR"
] |
e65e8b509ecdd7005d8943c835f5ac07c8cfa10d245c7698a70a899633739631
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import numpy, time, cPickle, gzip, sys, os, copy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import logging
class MixtureDensityOutputLayer(object):
def __init__(self, rng, input, n_in, n_out, n_component):
self.input = input
W_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_out*n_component))
self.W_mu = theano.shared(value=numpy.asarray(W_value, dtype=theano.config.floatX), name='W_mu', borrow=True)
self.W_sigma = theano.shared(value=numpy.asarray(W_value.copy(), dtype=theano.config.floatX), name='W_sigma', borrow=True)
W_mix_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_component))
self.W_mix = theano.shared(value=numpy.asarray(W_mix_value, dtype=theano.config.floatX), name='W_mix', borrow=True)
self.mu = T.dot(self.input, self.W_mu) #assume linear output for mean vectors
self.sigma = T.nnet.softplus(T.dot(self.input, self.W_sigma)) # + 0.0001
#self.sigma = T.exp(T.dot(self.input, self.W_sigma)) # + 0.0001
self.mix = T.nnet.softmax(T.dot(self.input, self.W_mix))
self.delta_W_mu = theano.shared(value = numpy.zeros((n_in, n_out*n_component),
dtype=theano.config.floatX), name='delta_W_mu')
self.delta_W_sigma = theano.shared(value = numpy.zeros((n_in, n_out*n_component),
dtype=theano.config.floatX), name='delta_W_sigma')
self.delta_W_mix = theano.shared(value = numpy.zeros((n_in, n_component),
dtype=theano.config.floatX), name='delta_W_mix')
self.params = [self.W_mu, self.W_sigma, self.W_mix]
self.delta_params = [self.delta_W_mu, self.delta_W_sigma, self.delta_W_mix]
class LinearLayer(object):
def __init__(self, rng, input, n_in, n_out, W = None, b = None):
self.input = input
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
W_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_out))
W = theano.shared(value=numpy.asarray(W_value, dtype=theano.config.floatX), name='W', borrow=True)
if b is None:
b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
self.output = T.dot(self.input, self.W) + self.b
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
L = T.sum( (self.output-y)*(self.output-y), axis=1 )
errors = T.mean(L)
return (errors)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class SigmoidLayer(object):
def __init__(self, rng, input, n_in, n_out, W = None, b = None, activation = T.tanh):
self.input = input
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
W_value = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_value,
name='W', borrow=True)
if b is None:
b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
self.output = T.dot(self.input, self.W) + self.b
self.output = activation(self.output)
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
L = T.sum( (self.output-y)*(self.output-y), axis=1 )
errors = T.mean(L)
return (errors)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class GeneralLayer(object):
def __init__(self, rng, input, n_in, n_out, W = None, b = None, activation = 'linear'):
self.input = input
self.n_in = n_in
self.n_out = n_out
self.logger = logging.getLogger('general_layer')
# randomly initialise the activation weights based on the input size, as advised by the 'tricks of neural network book'
if W is None:
W_values = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
lin_output = T.dot(input, self.W) + self.b
if activation == 'sigmoid':
self.output = T.nnet.sigmoid(lin_output)
elif activation == 'tanh':
self.output = T.tanh(lin_output)
elif activation == 'linear':
self.output = lin_output
elif activation == 'ReLU': ## rectifier linear unit
self.output = T.maximum(0.0, lin_output)
elif activation == 'ReSU': ## rectifier smooth unit
self.output = numpy.log(1.0 + numpy.exp(lin_output))
else:
self.logger.critical('the input activation function: %s is not supported right now. Please modify layers.py to support' % (activation))
raise
# parameters of the model
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
errors = T.mean(T.sum((self.output-y)**2, axis=1))
return errors
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh, do_maxout = False, pool_size = 1,
do_pnorm = False, pnorm_order = 1):
""" Class for hidden layer """
self.input = input
self.n_in = n_in
self.n_out = n_out
if W is None:
W_values = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
lin_output = T.dot(input, self.W) + self.b
if do_maxout == True:
self.last_start = n_out - pool_size
self.tmp_output = lin_output[:,0:self.last_start+1:pool_size]
for i in range(1, pool_size):
cur = lin_output[:,i:self.last_start+i+1:pool_size]
self.tmp_output = T.maximum(cur, self.tmp_output)
self.output = activation(self.tmp_output)
elif do_pnorm == True:
self.last_start = n_out - pool_size
self.tmp_output = abs(lin_output[:,0:self.last_start+1:pool_size]) ** pnorm_order
for i in range(1, pool_size):
cur = abs(lin_output[:,i:self.last_start+i+1:pool_size]) ** pnorm_order
self.tmp_output = self.tmp_output + cur
self.tmp_output = self.tmp_output ** (1.0 / pnorm_order)
self.output = activation(self.tmp_output)
else:
self.output = (lin_output if activation is None
else activation(lin_output))
# self.output = self.rectifier_linear(lin_output)
# parameters of the model
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def rectifier_linear(self, x):
x = T.maximum(0.0, x)
return x
def rectifier_smooth(self, x):
x = numpy.log(1.0 + numpy.exp(x))
return x
class dA(object):
def __init__(self, numpy_rng, theano_rng = None, input = None,
n_visible= None, n_hidden= None, W = None, bhid = None,
bvis = None, firstlayer = 0, variance = None ):
self.n_visible = n_visible
self.n_hidden = n_hidden
# create a Theano random generator that gives symbolic random values
if not theano_rng :
theano_rng = RandomStreams(numpy_rng.randint(2**30))
if not W:
initial_W = numpy.asarray( numpy_rng.uniform(
low = -4*numpy.sqrt(6./(n_hidden+n_visible)),
high = 4*numpy.sqrt(6./(n_hidden+n_visible)),
size = (n_visible, n_hidden)),
dtype = theano.config.floatX)
W = theano.shared(value = initial_W, name ='W')
if not bvis:
bvis = theano.shared(value = numpy.zeros(n_visible,
dtype = theano.config.floatX))
if not bhid:
bhid = theano.shared(value = numpy.zeros(n_hidden,
dtype = theano.config.floatX), name ='b')
self.W = W
self.b = bhid
self.b_prime = bvis
self.W_prime = self.W.T
self.theano_rng = theano_rng
if input == None :
self.x = T.dmatrix(name = 'input')
else:
self.x = input
self.params = [self.W, self.b, self.b_prime]
# first layer, use Gaussian noise
self.firstlayer = firstlayer
if self.firstlayer == 1 :
if variance == None :
self.var = T.vector(name = 'input')
else :
self.var = variance
else :
self.var = None
def get_corrupted_input(self, input, corruption_level):
if self.firstlayer == 0 :
return self.theano_rng.binomial(
size = input.shape,
n = 1,
p = 1 - corruption_level,
dtype=theano.config.floatX) * input
else :
noise = self.theano_rng.normal( size = input.shape,
dtype = theano.config.floatX)
denoises = noise * self.var * corruption_level
return input+denoises
def get_hidden_values(self, input):
return T.nnet.sigmoid(T.dot(input, self.W) + self.b)
def get_reconstructed_input(self, hidden ):
if self.firstlayer == 1 :
return T.dot(hidden, self.W_prime) + self.b_prime
else :
return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)
def get_cost_updates(self, corruption_level, learning_rate):
tilde_x = self.get_corrupted_input(self.x, corruption_level)
y = self.get_hidden_values( tilde_x )
z = self.get_reconstructed_input(y)
L = T.sum ( (self.x-z) * (self.x-z), axis=1 )
cost = T.mean(L) / 2
gparams = T.grad(cost, self.params)
updates = {}
for param, gparam in zip(self.params, gparams):
updates[param] = param - learning_rate*gparam
return (cost, updates)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
def get_test_cost(self, corruption_level):
""" This function computes the cost and the updates for one trainng
step of the dA """
# tilde_x = self.get_corrupted_input(self.x, corruption_level, 0.5)
y = self.get_hidden_values( self.x )
z = self.get_reconstructed_input(y)
L = T.sum ( (self.x-z) * (self.x-z), axis=1)
cost = T.mean(L)
return cost
|
ronanki/merlin
|
src/layers/mdn_layers.py
|
Python
|
apache-2.0
| 16,415
|
[
"Gaussian"
] |
205a848cc8d428b56b857cb3b6ff393a89ac92c0539d85ad04ee4f3cf6661bcd
|
from asap3 import *
from ase.lattice.cubic import FaceCenteredCubic
from asap3.testtools import ReportTest
print "Making atoms"
atoms = FaceCenteredCubic(directions=[[1,0,0],[0,1,0],[0,0,1]],
size=(25,25,25), symbol="Cu")
memory_usage(atoms)
print "Attaching EMT potential"
atoms.set_calculator(EMT())
memory_usage(atoms)
print "Calculating forces"
atoms.get_forces()
memory_usage(atoms)
print "Test passed!"
|
auag92/n2dm
|
Asap-3.8.4/Test/PrintMemory.py
|
Python
|
mit
| 439
|
[
"ASE"
] |
adbf8c95505453d4a813213e189104350a5e328d4f1ea039c3bac2be7835c100
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Fever models built from tf.keras.Model module API (eg two tower ranker)."""
from language.serene import layers
import numpy as np
import tensorflow.compat.v2 as tf
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def parse_activation(
name):
if name == 'gelu':
return gelu
else:
return name
class TwoTowerRanker(tf.keras.Model):
"""Build a model of two towers that are joined with a matching function."""
# pyformat: disable
def __init__(
self,
vocab_size, *,
embedder_name,
tied_encoders,
matcher_name,
bidirectional,
contextualizer,
context_num_layers,
activation,
matcher_hidden_size,
word_emb_size,
hidden_size,
projection_dim,
bert_model_name,
bert_model_path,
bert_trainable,
bert_dropout,
dropout,
use_batch_norm,
classify_claim):
# pyformat: enable
"""TwoTower model.
Args:
vocab_size: Size of vocab
embedder_name: Which embedder, basic or bert
tied_encoders: Whether to tie the encoders
matcher_name: What kind of matcher to use
bidirectional: Whether context layer is bidirectional
contextualizer: Which contextualizer to use (e.g., GRU)
context_num_layers: Number of layers in contextualizer
activation: Activation function to use for feed forward layers
matcher_hidden_size: Hidden size of matcher, if it has one
word_emb_size: Word embedding hidden size
hidden_size: Hidden size of contextualizer
projection_dim: Dimension to project embeddings to
bert_model_name: Name of bert model (e.g., base vs large)
bert_model_path: Path to the bert checkpoint
bert_trainable: Whether bert part of model is trainable
bert_dropout: Dropout rate on bert embeddings
dropout: Dropout rate to use
use_batch_norm: Whether to use batch norm
classify_claim: Whether to classify the claim
"""
super().__init__(name='two_tower_ranker')
self._vocab_size = vocab_size
self._embedder_name = embedder_name
self._matcher_name = matcher_name
self._bidirectional = bidirectional
self._contextualizer = contextualizer
self._context_num_layers = context_num_layers
self._activation = activation
activation = parse_activation(activation)
self._matcher_hidden_size = matcher_hidden_size
self._word_emb_size = word_emb_size
self._hidden_size = hidden_size
self._projection_dim = projection_dim
self._bert_model_name = bert_model_name
self._bert_model_path = bert_model_path
self._bert_trainable = bert_trainable
self._bert_dropout = bert_dropout
self._dropout = dropout
self._use_batch_norm = use_batch_norm
self._classify_claim = classify_claim
self._tied_encoders = tied_encoders
if embedder_name == 'classic_embedder':
if self._tied_encoders:
self._claim_encoder = layers.ClassicEmbedder(
vocab_size=vocab_size,
word_emb_size=word_emb_size,
use_batch_norm=use_batch_norm,
hidden_size=hidden_size,
dropout=dropout,
bidirectional=bidirectional,
contextualizer=contextualizer,
context_num_layers=context_num_layers,
name='tied_encoder',
)
self._evidence_encoder = self._claim_encoder
else:
self._claim_encoder = layers.ClassicEmbedder(
vocab_size=vocab_size,
word_emb_size=word_emb_size,
use_batch_norm=use_batch_norm,
hidden_size=hidden_size,
dropout=dropout,
bidirectional=bidirectional,
contextualizer=contextualizer,
context_num_layers=context_num_layers,
name='claim_encoder',
)
self._evidence_encoder = layers.ClassicEmbedder(
vocab_size=vocab_size,
word_emb_size=word_emb_size,
use_batch_norm=use_batch_norm,
hidden_size=hidden_size,
dropout=dropout,
bidirectional=bidirectional,
contextualizer=contextualizer,
context_num_layers=context_num_layers,
name='evidence_encoder',
)
elif embedder_name == 'bert_embedder':
if self._tied_encoders:
self._claim_encoder = layers.BertEmbedder(
bert_model_name,
bert_model_path,
name='tied_encoder',
bert_trainable=bert_trainable,
bert_dropout=bert_dropout,
)
self._evidence_encoder = self._claim_encoder
else:
self._claim_encoder = layers.BertEmbedder(
bert_model_name,
bert_model_path,
name='claim_encoder',
projection_dim=projection_dim,
bert_trainable=bert_trainable,
bert_dropout=bert_dropout,
)
self._evidence_encoder = layers.BertEmbedder(
bert_model_name,
bert_model_path,
name='evidence_encoder',
projection_dim=projection_dim,
bert_trainable=bert_trainable,
bert_dropout=bert_dropout,
)
else:
raise ValueError('Invalid embedder')
matcher = layers.matcher_registry[matcher_name](
hidden_size=matcher_hidden_size,
dropout=dropout,
use_batch_norm=use_batch_norm,
activation=activation,
# Relevant or not
n_classes=1,
)
if self._tied_encoders:
self._claim_projector = tf.keras.layers.Dense(projection_dim)
self._evidence_projector = self._claim_projector
else:
self._claim_projector = tf.keras.layers.Dense(projection_dim)
self._evidence_projector = tf.keras.layers.Dense(projection_dim)
self._evidence_classifier = tf.keras.Sequential(name='evidence_classifier')
self._evidence_classifier.add(matcher)
self._evidence_classifier.add(tf.keras.layers.Activation('sigmoid'))
if classify_claim:
# This doesn't need to preserve property of being findable with dot
# product, so make it more powerful with bilinear matching.
claim_hidden = layers.matcher_registry['bilinear_matcher'](
hidden_size=matcher_hidden_size,
dropout=dropout,
use_batch_norm=use_batch_norm,
activation=activation,
# support, refute, not enough info
n_classes=3,
)
self._claim_classifier = tf.keras.Sequential(name='claim_classifier')
self._claim_classifier.add(claim_hidden)
self._claim_classifier.add(tf.keras.layers.Activation('softmax'))
else:
self._claim_classifier = None
def call(self,
inputs,
embed_claim=False,
embed_evidence=False,
training=None,
**kwargs):
"""Model forward pass.
Args:
inputs: Input dictionary, dependent on type of embedder used
embed_claim: Whether to embed the claim
embed_evidence: Whether to embed the evidence
training: Whether training mode is enabled
**kwargs: Satisfies API compat
Returns:
Prediction of the model if claim is relevant to evidence
"""
if embed_claim or embed_evidence:
# (batch_size, projection_dim), (batch_size, projection_dim)
return self.embed_only(
inputs, embed_claim=embed_claim, embed_evidence=embed_evidence)
else:
if self._embedder_name == 'classic_embedder':
# (batch_size, hidden_dim)
encoded_claim = self._claim_encoder({'tokens': inputs['claim_text']})
# (batch_size, hidden_dim)
encoded_evidence = self._evidence_encoder(
{'tokens': inputs['evidence_text']})
elif self._embedder_name == 'bert_embedder':
# (batch_size, hidden_dim)
encoded_claim = self._claim_encoder({
'word_ids': inputs['claim_text_word_ids'],
'mask': inputs['claim_text_mask'],
'segment_ids': inputs['claim_text_segment_ids'],
})
# (batch_size, hidden_dim)
encoded_evidence = self._evidence_encoder({
'word_ids': inputs['evidence_text_word_ids'],
'mask': inputs['evidence_text_mask'],
'segment_ids': inputs['evidence_text_segment_ids'],
})
else:
raise ValueError('invalid embedder')
out = {}
# (batch_size, projection_dim)
projected_claim = self._claim_projector(encoded_claim)
# (batch_size, projection_dim)
projected_evidence = self._evidence_projector(encoded_evidence)
# (batch_size, 1)
evidence_out = self._evidence_classifier(
(projected_claim, projected_evidence))
out['evidence_matching'] = tf.identity(
evidence_out, name='evidence_matching')
if self._classify_claim:
# The claim takes the full size embedding, not the projected one.
claim_out = self._claim_classifier((encoded_claim, encoded_evidence))
else:
# Predictions have three logits, one for each class
claim_out = tf.fill((tf.shape(encoded_claim)[0], 3), 0.0)
out['claim_classification'] = tf.identity(
claim_out, name='claim_classification')
return out
def embed_only(
self,
inputs,
*,
embed_claim,
embed_evidence,
project = True):
if self._embedder_name == 'classic_embedder':
if embed_claim:
# (batch_size, hidden_dim)
encoded_claim = self._claim_encoder({'tokens': inputs['claim_text']})
else:
encoded_claim = None
if embed_evidence:
# (batch_size, hidden_dim)
encoded_evidence = self._evidence_encoder(
{'tokens': inputs['evidence_text']})
else:
encoded_evidence = None
elif self._embedder_name == 'bert_embedder':
if embed_claim:
# (batch_size, hidden_dim)
encoded_claim = self._claim_encoder({
'word_ids': inputs['claim_text_word_ids'],
'mask': inputs['claim_text_mask'],
'segment_ids': inputs['claim_text_segment_ids'],
})
else:
encoded_claim = None
if embed_evidence:
# (batch_size, hidden_dim)
encoded_evidence = self._evidence_encoder({
'word_ids': inputs['evidence_text_word_ids'],
'mask': inputs['evidence_text_mask'],
'segment_ids': inputs['evidence_text_segment_ids'],
})
else:
encoded_evidence = None
else:
raise ValueError('invalid embedder')
if project:
if encoded_claim is not None:
encoded_claim = self._claim_projector(encoded_claim)
if encoded_evidence is not None:
encoded_evidence = self._evidence_projector(encoded_evidence)
return encoded_claim, encoded_evidence
def get_config(self):
config = super().get_config()
config.update({
'vocab_size': self._vocab_size,
'embedder_name': self._embedder_name,
'tied_encoders': self._tied_encoders,
'matcher_name': self._matcher_name,
'bidirectional': self._bidirectional,
'contextualizer': self._contextualizer,
'context_num_layers': self._context_num_layers,
'activation': self._activation,
'matcher_hidden_size': self._matcher_hidden_size,
'word_emb_size': self._word_emb_size,
'hidden_size': self._hidden_size,
'projection_dim': self._projection_dim,
'bert_model_name': self._bert_model_name,
'bert_model_path': self._bert_model_path,
'bert_trainable': self._bert_trainable,
'bert_dropout': self._bert_dropout,
'dropout': self._dropout,
'use_batch_norm': self._use_batch_norm,
'classify_claim': self._classify_claim,
})
return config
|
google-research/language
|
language/serene/model.py
|
Python
|
apache-2.0
| 12,697
|
[
"Gaussian"
] |
857ecc093abd5c516d05e212239610341121e381dd459149fe53d22449001db6
|
#!/usr/bin/env python3
try:
import http.client as httplib
except ImportError:
import httplib
import httplib2
import os
import sys
import errno
from time import sleep
from decimal import Decimal
from . import consts
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaFileUpload
from oauth2client.file import Storage
from oauth2client.tools import run_flow
from oauth2client.client import flow_from_clientsecrets
httplib2.RETRIES = 1
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead,
httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
RETRIABLE_STATUS_CODES = [500, 502, 504]
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload https://www.googleapis.com/auth/youtube https://www.googleapis.com/auth/youtube.readonly https://www.googleapis.com/auth/youtube.force-ssl"
YOUTUBE_PARTNER_SCOPE = "https://www.googleapis.com/auth/youtubepartner"
SPREADSHEETS_SCOPE = "https://www.googleapis.com/auth/spreadsheets"
PREFIXES = (consts.root, sys.prefix, os.path.join(sys.prefix, "local"), "/usr", os.path.join("/usr", "local"))
SUFFIXES = ("client_secrets.json", ".client_secrets.json", f"share/{consts.short_name}/client_secrets.json")
def upload(yt, body, file, notify=False):
vid = None
ret = None
retries = 0
while not vid and retries < 10:
insert_request = yt.videos().insert(
part=",".join(body.keys()),
body=body,
notifySubscribers=notify,
media_body=MediaFileUpload(file,
chunksize=104857600,
resumable=True),)
ret, vid = upload_service(insert_request)
retries += 1
return ret, vid
def upload_service(insert_request):
response = None
retry_exceptions = RETRIABLE_EXCEPTIONS
retry_status_codes = RETRIABLE_STATUS_CODES
ACCEPTABLE_ERRNO = (errno.EPIPE, errno.EINVAL, errno.ECONNRESET)
try:
ACCEPTABLE_ERRNO += (errno.WSAECONNABORTED,)
except AttributeError:
pass # Not windows
while True:
try:
status, response = insert_request.next_chunk()
if status is not None:
percent = Decimal(int(status.resumable_progress) / int(status.total_size))
print(f"{round(100 * percent, 2)}% uploaded")
except HttpError as e:
if e.resp.status in retry_status_codes:
print(f"A retriable HTTP error {e.resp.status} occurred:\n{e.content}")
elif b"503" in e.content:
print("Backend Error: will attempt to retry upload")
return False, None
elif b"uploadLimitExceeded" in e.content:
print("You have exceeded the YouTube Upload Limit")
print("Waiting 10 minutes before retrying to avoid the limit")
sleep(600)
else:
print(e)
return False, None
except retry_exceptions as e:
print(f"A retriable error occurred: {e}")
except Exception as e:
if e in ACCEPTABLE_ERRNO:
print("Retriable Error occured, retrying now")
else:
print(e)
pass
if response:
if "id" in response:
print(f"Video link is https://www.youtube.com/watch?v={response['id']}")
return True, response['id']
else:
print(response)
print(status)
return False, None
def test_get_service(scope, service, secret=None):
CLIENT_SECRETS_FILE = get_secrets(PREFIXES, SUFFIXES) if not secret else secret
if not CLIENT_SECRETS_FILE:
return None
flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS_FILE, scopes=scope)
storage = Storage(os.path.join(consts.root, f".{consts.abbrv}-oauth2-{service}.json"))
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = flow.run_local_server(host='localhost',
port=8080,
authorization_prompt_message='Please visit this URL: {url}',
success_message='The auth flow is complete; you may close this window.',
open_browser=True)
storage.put(credentials)
return credentials
def get_service(scope, service, secret=None):
CLIENT_SECRETS_FILE = get_secrets(PREFIXES, SUFFIXES) if not secret else secret
print(f"Using {CLIENT_SECRETS_FILE}")
if not CLIENT_SECRETS_FILE:
return None
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=scope)
flow.user_agent = consts.long_name
storage = Storage(os.path.join(consts.root, f".{consts.abbrv}-oauth2-{service}.json"))
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage)
return credentials
def get_youtube_service():
credentials = get_service(YOUTUBE_UPLOAD_SCOPE, "youtube")
if not credentials:
return None
http = httplib2.Http()
try:
http.redirect_codes = set(http.redirect_codes) - {308} # https://github.com/googleapis/google-api-python-client/issues/803
except:
pass
return build("youtube", "v3", http=credentials.authorize(http))
def get_partner_service():
CLIENT_SECRETS_FILE = get_secrets((consts.root,), ("client_secrets.json", ".client_secrets.json"))
credentials = get_service(YOUTUBE_PARTNER_SCOPE + YOUTUBE_UPLOAD_SCOPE, "partner", CLIENT_SECRETS_FILE)
if not credentials:
return None
http = httplib2.Http()
try:
http.redirect_codes = set(http.redirect_codes) - {308} # https://github.com/googleapis/google-api-python-client/issues/803
except:
pass
return build("youtubePartner", "v1", http=credentials.authorize(http))
def get_spreadsheet_service():
credentials = get_service(SPREADSHEETS_SCOPE, "spreadsheet")
if not credentials:
return None
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?version=v4')
http = httplib2.Http()
try:
http.redirect_codes = set(http.redirect_codes) - {308} # https://github.com/googleapis/google-api-python-client/issues/803
except:
pass
return build('sheets', 'v4', http=credentials.authorize(http), discoveryServiceUrl=discoveryUrl)
def add_to_playlist(pID, vID):
consts.youtube.playlistItems().insert(
part="snippet",
body=dict(
snippet=dict(
playlistId=pID,
resourceId=dict(
kind='youtube#video',
videoId=vID)))
).execute()
print("Added to playlist")
def get_secrets(prefixes, relative_paths):
"""
Taken from https://github.com/tokland/youtube-upload/blob/master/youtube_upload/main.py
Get the first existing filename of relative_path seeking on prefixes directories.
"""
try:
return os.path.join(sys._MEIPASS, relative_paths[-1])
except Exception:
for prefix in prefixes:
for relative_path in relative_paths:
path = os.path.join(prefix, relative_path)
if os.path.exists(path):
return path
else:
return None
|
NikhilNarayana/FRC-CLI-Youtube-Uploader
|
frcuploader/youtube.py
|
Python
|
gpl-3.0
| 7,809
|
[
"VisIt"
] |
fd99a76a6e8957c27579855ff41ec115a944f87563d6e7a5bbd69c32b9c8d231
|
import pygame as pg
from .fastfont import Fastfont
black = (0, 0, 0)
white = (255,255,255)
darkgrey = (25, 25, 48)
grey = (84, 84, 114)
darkblue = (25, 25, 64, 100)
white = (255, 255, 255)
green = (0, 255, 0)
blue = (0, 0, 255)
red = (255, 0, 0)
lightgreyblue = (130, 150, 190) # waypoint symbol color
lightgreygreen = (149, 215, 179) # grid color
lightcyan = (0, 255, 255) # FIR boundaries
amber = (255,163,71) # Conflicting aircraft
magenta = (255,0,255) # Used for route
class Console:
"""
Console (aka EditWin) class definition : Edit window & console class
Methods:
echo(msg) : Print a message
insert(message) : insert characters in current edit line
backspace() : process backspace
getline() : return current edit line
enter() : enter, end of edit line
scroll() : scroll up one line
update() : redraw update bitmap of edit window
Created by : Jacco M. Hoekstra (TU Delft)
"""
def __init__(self,win,nch,nlin,winx,winy):
# Was Helvetica,14
self.fontedit = Fastfont(win,'Courier New',14,white,False,False) # name, size, bold,italic
# Edit window: 6 line of 64 chars
self.content = []
self.nch = nch # number of chars per line
self.nlin = nlin # number of lines in windows
self.winx = winx # x-coordinate in pixels of left side
self.winy = winy - self.nlin*self.fontedit.linedy # y-coordinate in pixels of top
self.msg = [] # Messages in edit window
for i in range(self.nlin):
line= self.nch*[' ']
self.content.append(line)
self.content0 = self.content
self.xcursor = 0
self.xedit = 0
# self.printeditwin('Testing 1,2,3')
self.bmpdy = self.nlin*self.fontedit.linedy
self.bmpdx = int(self.nch*self.fontedit.linedy*10/17) + 2 # Guess max aspect ratio
self.bmp = pg.Surface([self.bmpdx,self.bmpdy],
pg.SRCALPHA, 32)
self.bmp.fill(darkblue)
self.rect = pg.Rect(self.winx,self.winy,
self.bmpdx,self.bmpdy)
self.redraw = True
return
def echo(self,msg):
"""print a message to console window"""
if self.xedit==self.xcursor:
self.insert(msg)
j = int(self.xcursor/self.nch)
self.xcursor = (j+1)*self.nch
self.xedit = self.xcursor
# Check for End of window
if self.xedit >= (self.nch-1)*(self.nlin-1):
del self.content[0]
self.content.append(self.nch*[' '])
self.xcursor = j*self.nch
self.xedit = self.xcursor
else:
self.msg.append(msg) # buffer
return
def insert(self,message):
i = self.xcursor%self.nch
j = int(self.xcursor/self.nch)
for ich in range(len(message)):
self.content[j][i]=message[ich]
i = i+1
# Check for end-of line
if i>=self.nch:
i = 0
j = j+1
# Check for end-of edit window
if j>=self.nlin:
self.scroll()
j = j-1
self.xcursor = j*self.nch+i
self.redraw = True
return
def backspace(self):
if self.xcursor>self.xedit:
self.xcursor = self.xcursor-1
self.redraw = True
i = self.xcursor%self.nch
j = int(self.xcursor/self.nch)
self.content[j][i]=" "
return
def getline(self): # enter was pressed ro we need current command line
line = ""
for idx in range(self.xedit,self.xcursor):
i = idx%self.nch
j = int(idx/self.nch)
line = line+self.content[j][i]
return line
def enter(self):
j = int(self.xcursor/self.nch)
self.xcursor = (j+1)*self.nch
self.xedit = self.xcursor
# End of window
if self.xedit >= (self.nch-1)*(self.nlin-1):
del self.content[0]
self.content.append(self.nch*[' '])
self.xcursor = j*self.nch
self.xedit = self.xcursor
# Print buffered messages
self.redraw = True
while len(self.msg)>0:
self.echo(self.msg[0]) # No endless recursion becasue xedit==xcursor
del self.msg[0]
return
def scroll(self):
"""Scroll window"""
del self.content[0]
self.content.append(self.nch*[' '])
self.xcursor = self.xcursor-self.nch
self.xedit = self.xedit-self.nch
def update(self):
"""Update: Draw a new frame"""
# Draw edit window
if self.redraw:
self.bmp.fill(darkgrey)
for j in range(self.nlin):
for i in range(self.nch):
if True or self.content[j][i] != self.content0[j][i]:
x = i*int(self.fontedit.linedy*10/17) + 1
y = j*self.fontedit.linedy+int(self.fontedit.linedy/6)
self.fontedit.printat(self.bmp,
x,y,
self.content[j][i])
self.content0[j][i]=self.content[j][i]
# Draw cursor
i = self.xcursor%self.nch
j = int(self.xcursor/self.nch)
x = i*int(self.fontedit.linedy*10/17)
y = j*self.fontedit.linedy+int(self.fontedit.linedy/6)
self.fontedit.printat(self.bmp,x,y,"_")
self.bmp.set_alpha(127)
self.redraw = False
return
|
ProfHoekstra/bluesky
|
bluesky/ui/pygame/console.py
|
Python
|
gpl-3.0
| 5,866
|
[
"Amber"
] |
7c965ff2245f13e7c284ec31befbf00a3585aab9a9575747d87f0f5a5c0136ff
|
from . import core, utils
import cgt
import ctypes, os.path as osp, hashlib, numpy as np, sys, subprocess, string, os, time, traceback, cPickle
from collections import defaultdict, namedtuple
from StringIO import StringIO
import logging
def function(inputs, outputs, dbg=None, updates=None, givens=None):
assert isinstance(inputs, list), "Inputs must be a list"
assert all(el.is_argument() for el in inputs), "Invalid input: should be a list of Argument nodes"
if isinstance(outputs, list):
assert all(isinstance(el, core.Node) for el in outputs), "Invalid output: should all be symbolic variables"
return _function_listout(inputs, outputs, dbg, updates, givens)
elif isinstance(outputs, core.Node):
f_listout = _function_listout(inputs, [outputs], dbg, updates, givens)
return lambda *args : f_listout(*args)[0]
else:
raise ValueError("Expected `outputs` to be a Node or a list of Nodes. Got an object of type %s"%type(outputs))
def _function_listout(inputs, outputs, dbg = None, updates=None, givens=None):
if updates is None: updates = []
else: assert (isinstance(updates, list) and
all(isinstance(a,tuple) and len(a)==2
and isinstance(a[0], core.Node) and isinstance(a[1], core.Node)
for a in updates)), "updates should be a list of pairs (before, after)"
if givens is None: givens = []
else: assert all(before.is_data() for (before,_) in updates), "lhs of updates must be Data instances"
if dbg: raise core.Todo("debug functionality is broken")
outputs = [cgt.make_tuple(*x) if isinstance(x, tuple) else x for x in outputs]
interp = run_compilation_pipeline(inputs, outputs, updates, givens)
return interp
# ================================================================
# Execution
# ================================================================
def python_only():
return not hasattr(cgt,"cycgt")
def determine_devices(nodes_sorted, updatetarg2src):
# Op definitions (available impls, inplace-ness, etc) define constraints
# on possible devices for a node
if python_only():
return {node:Device() for node in nodes_sorted}
# (1) Get available devices for nodes, determined by which impls are available and node types
compile_info = get_compile_info()
cuda_enabled = compile_info["CGT_ENABLE_CUDA"]
node2dev = {}
home_device = core.Device(devtype="cpu", idx=0)
for node in nodes_sorted:
default_device = node.props.get("default_device", home_device)
if node in updatetarg2src:
device = node2dev[updatetarg2src[node]]
elif node.is_data():
device = node.op.device
elif node.is_argument():
device = home_device
else:
if "native_gpu" in node.op.available_impls and (default_device.devtype == "gpu" or "native_cpu" not in node.op.available_impls):
assert cuda_enabled, "trying to put op on gpu but cuda is disabled"
device = core.Device("gpu", default_device.idx)
else:
device = core.Device(devtype="cpu", idx=default_device.idx)
node2dev[node] = device
return node2dev
def is_tensor(x):
return isinstance(x.typ, core.TensorType)
def is_tuple(x):
return isinstance(x.typ, core.TupleType)
def create_interpreter(inputs, outputs, eg, node2memloc):
assert isinstance(eg, ExecutionGraph)
input_types = [input.typ for input in inputs] #pylint: disable=W0622
output_locs = [node2memloc[node] for node in outputs]
config = cgt.get_config()
backend = config["backend"]
parallel = config["parallel"]
if backend == "python":
if parallel:
raise NotImplementedError("For parallel=True, set backend=native")
# return ParallelInterpreter(eg, output_locs, input_types)
else:
return SequentialInterpreter(eg, output_locs, input_types)
elif backend == "native":
if parallel:
return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, config["num_threads"])
else:
return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, 0)
else:
raise NotImplementedError("invalid backend %s"%backend)
def topsorted_shapes_first(outputs, node2shape):
# Almost identical to topsorted(...) function
# But we also need to visit the shape elements of an in-place node
# before visiting that node
marks = {}
out = []
stack = []
for x in outputs:
stack.append((x,0))
while stack:
(i,jidx) = stack.pop()
if jidx == 0:
m = marks.get(i,0)
if m == 0:
marks[i] = 1
elif m == 1:
raise ValueError("not a dag")
else:
continue
ps = i.parents
###### Changed part ######
if i.ndim > 0 and not i.is_input() and i.op.return_type=="byref":
if i in node2shape:
shpels = node2shape[i]
else:
raise core.Unreachable
# shpels = i.op.shp_apply(i.parents)
ps = ps + shpels
elif is_tuple(i):
for arrshp in node2shape[i]:
ps = ps + arrshp
##########################
if jidx == len(ps):
marks[i] = 2
out.append(i)
else:
stack.append((i,jidx+1))
j = ps[jidx]
stack.append((j,0))
return out
def determine_memowner(nodes_sorted, updates, node2dev):
# First determine how many "child" nodes each node has
node2child = defaultdict(list)
for node in nodes_sorted:
for parent in node.parents:
node2child[parent].append(node)
# Now traverse graph again and see where we can use the same memory
node2memowner = {} # mapping node x -> the node that owns its memory
# For updates, memlocation(RHS) = memlocation(LHS)
after2before = {after:before for (before,after) in updates}
enable_inplace_opt = core.get_config()["enable_inplace_opt"]
for node in nodes_sorted:
base = node # by default,
if node.is_argument():
pass
elif node.op.writes_to_input >= 0:
base = node2memowner[node.parents[node.op.writes_to_input]]
elif node in after2before:
base = after2before[node]
elif enable_inplace_opt and node.op.return_type == "byref": # TODO think about if we need any other conditions
nodeshape = node.op.shp_apply(node.parents)
for parent in node.parents:
if (len(node2child[parent])==1
and nodeshape==cgt.shape(parent) # XXX not a very robust way to check
and node.dtype == parent.dtype
and _is_data_mutable(parent)):
base = parent
break
# TODO: add optimization for in-place incrementing
node2memowner[node] = base
return node2memowner
class MemCounter(object):
"""
returns `MemLocation`s with indices 0,1,...
`count` member indicates how many have been returned thus far
"""
def __init__(self):
self.count=0
def new_memloc(self, devtype):
out = MemLocation(self.count, devtype)
self.count += 1
return out
def create_execution_graph(inputs, nodes_sorted, node2shape, node2memowner, node2dev):
# node2impltype = copy.copy(node2impltype) # we'll insert transport ops
instrs = []
counter = MemCounter()
node2memloc = {}
for node in nodes_sorted:
if node not in node2dev: node2dev[node] = core.Device(devtype="cpu",idx=node2dev[node.parents[0]].idx if len(node.parents)>0 else 0)
if node.is_argument():
write_loc = counter.new_memloc(node2dev[node].devtype)
node2memloc[node] = write_loc
i = inputs.index(node)
instrs.append(LoadArgument(i, write_loc))
else:
read_locs = [node2memloc[parent] for parent in node.parents]
if node.op.return_type == "byref":
if node2memowner[node] is node:
if is_tensor(node): # just make one memory location for output
nodeshape = node2shape[node] if node.ndim > 0 else []
shape_locs = [node2memloc[shpel] for shpel in nodeshape]
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(Alloc(node.dtype, shape_locs, write_loc))
else: # if it's a tuple, we need to allocate all of the components, then build tuple
nodeshape = node2shape[node]
assert isinstance(nodeshape, tuple)
arr_locs = []
for (arrshp, arrtyp) in utils.safezip(nodeshape, node.typ):
arr_loc = counter.new_memloc(node2dev[node].devtype)
shape_locs = [node2memloc[shpel] for shpel in arrshp]
instrs.append(Alloc(arrtyp.dtype, shape_locs, arr_loc))
arr_locs.append(arr_loc)
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(BuildTup(node.typ, arr_locs, write_loc))
else:
# If this node writes to another node's memory, the devices must be the same
# this should have been enforced in determine_devices()
assert node2dev[node] == node2dev[node2memowner[node]]
write_loc = node2memloc[node2memowner[node]]
instrs.append(ReturnByRef(node.op, [par.typ for par in node.parents], read_locs, write_loc, node_props=node.props))
else:
assert node.op.return_type == "byval"
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(ReturnByVal(node.op, [par.typ for par in node.parents], read_locs, write_loc, node_props=node.props))
node2memloc[node] = write_loc
return ExecutionGraph(instrs, len(inputs), counter.count), node2memloc
def get_callable(op, input_types, devtype, prefer_python=False):
assert op.available_impls, "need to set op.available_impls"
config = core.get_config()
if (prefer_python or config["force_python_impl"]) and "python" in op.available_impls:
return op.get_py_callable(input_types)
elif config["backend"] == "python":
if "python" in op.available_impls:
return op.get_py_callable(input_types)
else:
assert devtype=="cpu", "can't use devtype=gpu with python backend"
if "native_cpu" in op.available_impls:
return get_native_callable(op, input_types, "cpu")
else:
raise RuntimeError("Can't find an implementation of %s suitable for python backend. Just have available_impls=%s"%(op,op.available_impls))
else: # backend = native
if devtype == "cpu":
if "native_cpu" in op.available_impls:
return get_native_callable(op, input_types, "cpu")
else:
print "using python impl for",op
return op.get_py_callable(input_types)
else:
if "native_gpu" in op.available_impls:
return get_native_callable(op, input_types, "gpu")
else:
raise RuntimeError("Tried to put Op %s on the GPU but I only have a python impl :("%op)
def get_native_callable(op, input_types, devtype):
nci = op.get_native_compile_info(input_types, devtype)
nci.op_str = str(op)
nci.return_type = op.return_type
nci.n_in = len(input_types)
return nci2callable(nci)
def add_transports(nodelist, node2dev, node2shape):
node2child = defaultdict(list)
for node in nodelist:
for par in node.parents:
node2child[par].append(node)
# XXX look at native compilation info, gpu deref mask
for node in nodelist:
dev = node2dev[node]
dev2copy = {}
for child in node2child[node]:
childdev = node2dev[child]
if not childdev == dev:
if childdev not in dev2copy:
nodecopy = core.Result(core.Transport(childdev), [node])
node2dev[nodecopy] = childdev
dev2copy[childdev] = nodecopy
node2shape[nodecopy] = node2shape[node]
replace_parents(child, node, dev2copy[childdev])
def replace_parents(node, before, after):
for (i,p) in enumerate(node.parents):
if p is before:
node.parents[i] = after
def run_compilation_pipeline(inputs, outputs, updates, givens):
"""
Compiles the expression graph into an execution graph.
"""
config = core.get_config()
# Phase 1: simplification and analysis of expression graph
# ------------------------------------------------------
# Add add update targets to outputs
outputs_updatetargs = outputs + [after for (_before, after) in updates]
if givens: outputs_updatetargs = core.clone(outputs_updatetargs, dict(givens))
# Do simplification + analysis pass on expression graph
outputs_updatetargs_simple, analysis, _ = \
core.simplify_and_analyze(outputs_updatetargs) if config["enable_simplification"] \
else (outputs_updatetargs, core.analyze(outputs_updatetargs), {})
# Phase 2: device targeting
# ------------------------------------------------------
outputs_updatetargs_simple = cgt.core.clone(outputs_updatetargs_simple)
analysis = core.analyze(outputs_updatetargs_simple)
# XXX inefficient to just copy the graph and redo analysis
nodelist = core.topsorted(outputs_updatetargs_simple)
updatesrcs = [before for (before, _) in updates]
updatetargs_simple = outputs_updatetargs_simple[len(outputs):]
node2dev = determine_devices(nodelist, {targ:src for (src,targ) in zip(updatesrcs, updatetargs_simple)})
add_transports(nodelist, node2dev, analysis["node2shape"])
# Phase 3: build execution graph
# ------------------------------------------------------
# Sort nodes so that shape elements appear before a given node
nodes_sorted = topsorted_shapes_first(outputs_updatetargs_simple, analysis["node2shape"]) # XXX don't need shapes for byval ops
# For each node, figure out if its output should be written to a previous node's memory
# (memowner : "memory owner")
updatetargs_simple = outputs_updatetargs_simple[len(outputs):]
node2memowner = determine_memowner(nodes_sorted, zip(updatesrcs, updatetargs_simple), node2dev)
# Find the outputs we want to return
outputs_simple = outputs_updatetargs_simple[:len(outputs)] # get rid
# Generate execution graph
eg, node2memloc = create_execution_graph(
inputs, nodes_sorted, analysis["node2shape"], node2memowner, node2dev)
# print execution graph
if config["verbose"]:
print 'begin'
print '\n'.join(str(i)+'.) \t'+repr(instr) for (i,instr) in enumerate(eg.instrs))
print 'end'
# Phase 3: create C or Python interpreter for graph
# ------------------------------------------------------
interp = create_interpreter(inputs, outputs_simple, eg, node2memloc)
# Done!
return interp
# ================================================================
# Simple numeric eval via traversal
# ================================================================
def numeric_eval(output, arg2val):
"""
Numerically evaluates symbolic variable without any compilation,
by associating each argument with a value (via `arg2val`) and traversing the
computation graph
Inputs
------
output: symbolic variable or list of variables we would like to evaluate
arg2val: dictionary assigning each argument that output depends on to a numerical value
Returns
-------
Numeric value or list of numeric values of variables corresponding to output
"""
if isinstance(output, list):
assert all(isinstance(x, core.Node) for x in output), "expected a list of Nodes"
return _numeric_eval_listout(output, arg2val)
elif isinstance(output, core.Node):
return _numeric_eval_listout([output], arg2val)[0]
else:
raise ValueError("expected `output` to be a Node or a list of Nodes. Got an object of type %s"%type(output))
def _numeric_eval_listout(outputs, arg2val):
"""
Evaluate outputs numerically. arg2val is a dictionary mapping arguments to numerical values
"""
assert isinstance(outputs, list)
assert isinstance(arg2val, dict)
nodes = list(core.topsorted(outputs))
node2val = {}
for node in nodes:
if node.is_argument():
node2val[node] = core.as_valid_array(arg2val[node])
elif node.is_data():
node2val[node] = node.op.get_value()
else:
parentvals = [node2val[par] for par in node.parents]
node2val[node] = core.py_numeric_apply(node, parentvals)
# assert node.get_ndim() == np.array(node2val[node]).ndim
numeric_outputs = [node2val[node] for node in outputs]
return numeric_outputs
################################################################
### Execution graph
################################################################
MemInfo = namedtuple("MemInfo",["loc","access"])
MEM_OVERWRITE = "overwrite"
MEM_INCREMENT = "increment"
class ExecutionGraph(object):
def __init__(self, instrs, n_args, n_locs):
self.instrs = instrs
self.n_args = n_args
self.n_locs = n_locs
class MemLocation(object):
def __init__(self, idx, devtype):
assert isinstance(idx, int) and devtype in ["cpu", "gpu"]
self.index = idx
self.devtype = devtype
# TODO: dtype
def __repr__(self):
return "%%%i/%s" % (self.index, self.devtype)
# ================================================================
# Instructions
# ================================================================
class Instr(object):
def fire(self, interp):
raise NotImplementedError
class LoadArgument(Instr):
def __init__(self, ind, write_loc):
self.ind = ind
self.write_loc = write_loc
def fire(self, interp):
interp.set(self.write_loc, interp.getarg(self.ind))
def __repr__(self):
return "%s = LoadArg ind:%i" % (self.write_loc, self.ind)
class Alloc(Instr):
def __init__(self, dtype, read_locs, write_loc):
self.dtype = dtype
self.read_locs = read_locs
self.write_loc = write_loc
def fire(self, interp):
shp = tuple(interp.get(mem) for mem in self.read_locs)
prevarr = interp.get(self.write_loc)
if prevarr is None or prevarr.shape != shp:
interp.set(self.write_loc, np.ones(shp, self.dtype))
def __repr__(self):
return "%s = Alloc shp:%s dtype:%s" % (self.write_loc, str(self.read_locs), self.dtype)
class BuildTup(Instr):
def __init__(self, typ, read_locs, write_loc):
self.typ = typ
self.read_locs = read_locs
self.write_loc = write_loc
def fire(self, interp):
interp.set(self.write_loc, tuple(interp.get(loc) for loc in self.read_locs))
def __repr__(self):
return "%s = BuildTup args:%s" % (self.write_loc, str(self.read_locs))
class ReturnByRef(Instr):
def __init__(self, op, input_types, read_locs, write_loc, node_props=None):
self.op = op
self.input_types = input_types
self.read_locs = read_locs
self.write_loc = write_loc
self._callable = None
self.node_props=node_props
def fire(self, interp):
if self._callable is None: self._callable = self.get_callable()
self._callable.call(
[interp.get(mem) for mem in self.read_locs],
interp.get(self.write_loc))
def __repr__(self):
return "%s = ReturnByRef op:%s args:%s" % (self.write_loc, str(self.op), str(self.read_locs))
def get_callable(self):
return get_callable(self.op, self.input_types, self.write_loc.devtype)
class ReturnByVal(Instr):
def __init__(self, op, input_types, read_locs, write_loc, node_props=None):
self.op = op
self.input_types = input_types
self.read_locs = read_locs
self.write_loc = write_loc
self._callable = None
self.node_props=node_props
def fire(self, interp):
if self._callable is None: self._callable = self.get_callable()
interp.set(self.write_loc, self._callable.call([interp.get(mem) for mem in self.read_locs]))
def get_callable(self):
return get_callable(self.op, self.input_types, self.write_loc.devtype)
def __repr__(self):
return "%s = ReturnByVal op:%s args:%s" % (self.write_loc, str(self.op), str(self.read_locs))
# ================================================================
# Compiling native code
# ================================================================
def nci2callable(nci):
template_code = gen_templated_code(nci.includes, nci.closure_triples, nci.func_code)
compile_info = get_compile_info()
prefix = utils.hash_seq1(template_code, compile_info["CPP_FLAGS"], *(src.code for src in nci.extra_srcs))
d = dict(function=_funcname(prefix), closure=_closurename(prefix),setup=_setupname(prefix),teardown=_teardownname(prefix))
fn_srcfile = core.SrcFile("c++",string.Template(template_code).substitute(d))
srcfiles = [fn_srcfile]
srcfiles.extend(core.SrcFile(sf.lang, string.Template(sf.code).substitute(d)) for sf in nci.extra_srcs)
CACHE_ROOT = compile_info["CACHE_ROOT"]
libpath = osp.join(CACHE_ROOT, prefix+".so")
if not osp.exists(libpath):
tu = TranslationUnit(srcfiles, nci.link_flags)
tu.compile(prefix, libpath)
lib = get_or_load_lib(libpath)
fptr = getattr(lib, _funcname(prefix))
setup_fptr = getattr(lib, _setupname(prefix)) if nci.setup else None
teardown_fptr = getattr(lib, _teardownname(prefix)) if nci.teardown else None
cldata = _build_closure(nci.closure_triples)
return core.NativeCallable(nci.n_in, nci.return_type, nci.op_str, fptr, cldata=cldata, setup_fptr=setup_fptr, teardown_fptr=teardown_fptr,
store_objects=nci.store_objects)
def _funcname(prefix):
return "call_"+prefix
def _setupname(prefix):
return "setup_"+prefix
def _teardownname(prefix):
return "teardown_"+prefix
def _closurename(prefix):
return "closure_"+prefix
def gen_templated_code(includes, closure_info, func_code):
s = StringIO()
includes = ["cgt_common.h"] + includes
for fname in includes:
s.write('#include "%s"\n'%fname)
gen_struct_code(closure_info, s)
s.write(func_code)
return s.getvalue()
def gen_struct_code(triples, outstream):
if triples is None:
return
outstream.write("typedef struct $closure {\n")
for (fieldname,fieldtype,_val) in triples:
outstream.write(_ctypes2str[fieldtype])
outstream.write(" ")
outstream.write(fieldname)
outstream.write(";\n")
outstream.write("} $closure;\n")
_LIBRARIES = {}
def get_or_load_lib(libname):
if libname in _LIBRARIES:
return _LIBRARIES[libname]
else:
out = ctypes.cdll.LoadLibrary(libname)
_LIBRARIES[libname] = out
return out
class TranslationUnit(object):
"""All the input that goes into building a native binary for one or more ops"""
def __init__(self, srcfiles, link_flags):
self.srcfiles = srcfiles
self.link_flags = link_flags
def compile(self, prefix, libpath):
"""
Compiles all of the files, places them in the cache directory
Then links them creating prefix.so
"""
CACHE_ROOT = get_compile_info()["CACHE_ROOT"]
cmds = ["cd %s"%CACHE_ROOT]
objs = []
for (i,(lang,code)) in enumerate(self.srcfiles):
if lang=="c++":
srcpath = osp.join(CACHE_ROOT, prefix+"_%i.cpp"%i)
cmds.append(_make_cpp_compile_cmd(srcpath))
elif lang=="cuda":
srcpath = osp.join(CACHE_ROOT, prefix+"_%i.cu"%i)
cmds.append(_make_cuda_compile_cmd(srcpath))
else:
raise NotImplementedError
with open(srcpath,"w") as fh: fh.write(code)
objs.append(srcpath+".o")
cmds.append(_make_link_cmd(objs, self.link_flags, libpath))
bigcmd = " && ".join(cmds)
call_and_print(bigcmd)
_COMPILE_CONFIG = None
def get_compile_info():
global _COMPILE_CONFIG
if _COMPILE_CONFIG is None:
config = core.get_config()
CGT_BUILD_ROOT = cgt.cycgt.cgt_build_root() #pylint: disable=E1101
cmake_info = {}
with open(osp.join(CGT_BUILD_ROOT,"build_info.txt")) as fh:
lines = fh.readlines()
for line in lines:
if ":=" not in line: print "skipping",line
lhs,rhs = line.split(":=")
lhs = lhs.strip()
rhs = rhs.strip()
cmake_info[lhs] = rhs
CUDA_ROOT = cmake_info["CUDA_ROOT"]
CGT_ENABLE_CUDA = cmake_info["CGT_ENABLE_CUDA"] in ["1","ON"]
CGT_ENABLE_CUDNN = cmake_info["CGT_ENABLE_CUDNN"] in ["1","ON"]
DEFINITIONS = "-DENABLE_CUDA" if CGT_ENABLE_CUDA else ""
CUDNN_ROOT = cmake_info["CUDNN_ROOT"]
_COMPILE_CONFIG = dict(
OPENBLAS_INCLUDE_DIR = osp.join(CGT_BUILD_ROOT,"OpenBLAS"),
CGT_INCLUDE_DIR = cmake_info["CGT_INCLUDE_DIR"],
CGT_LIBRARY_DIR = osp.join(CGT_BUILD_ROOT,"lib"),
CUDA_LIBRARY_DIR = osp.join(CUDA_ROOT,"lib"),
CUDA_INCLUDE_DIR = osp.join(CUDA_ROOT,"include"),
CUDA_LIBRARIES = cmake_info["CUDA_LIBRARIES"],
DEFINITIONS = DEFINITIONS,
CUDA_ROOT = CUDA_ROOT,
CUDNN_ROOT = CUDNN_ROOT,
CACHE_ROOT = osp.expanduser(config["cache_dir"]),
CGT_ENABLE_CUDA = CGT_ENABLE_CUDA,
CGT_ENABLE_CUDNN = CGT_ENABLE_CUDNN,
# CGT_LIBRARY = cmake_info["CGT_LIBRARY"],
)
includes = "-I"+_COMPILE_CONFIG["CGT_INCLUDE_DIR"]
includes += " -I"+_COMPILE_CONFIG["OPENBLAS_INCLUDE_DIR"]
link_flags = ""
if _COMPILE_CONFIG["CGT_ENABLE_CUDA"]: includes += " -I"+_COMPILE_CONFIG["CUDA_INCLUDE_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDNN"]: includes += " -I"+_COMPILE_CONFIG["CUDNN_ROOT"]
_COMPILE_CONFIG["INCLUDES"] = includes
link_flags = "-lcgt -L"+_COMPILE_CONFIG["CGT_LIBRARY_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDA"]: link_flags += " -L"+_COMPILE_CONFIG["CUDA_LIBRARY_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDNN"]:
link_flags += " -L"+_COMPILE_CONFIG["CUDNN_ROOT"]
link_flags += " -Wl,-rpath,"+_COMPILE_CONFIG["CUDNN_ROOT"]
if sys.platform == "darwin":
link_flags += " -dynamiclib -Wl,-headerpad_max_install_names"
else:
link_flags += " -shared -rdynamic"
_COMPILE_CONFIG["LINK_FLAGS"] = link_flags
cpp_flags = "-fvisibility=hidden -std=c++11 -fPIC" + (" -O0 -g" if config["debug_cpp"] else " -O3 -DNDEBUG")
if sys.platform == "darwin": cpp_flags += " -stdlib=libc++"
_COMPILE_CONFIG["CPP_FLAGS"] = cpp_flags
CACHE_ROOT = _COMPILE_CONFIG["CACHE_ROOT"]
if not osp.exists(CACHE_ROOT):
os.makedirs(CACHE_ROOT)
return _COMPILE_CONFIG
def _make_cpp_compile_cmd(srcpath):
d = get_compile_info()
return "c++ %(cpp_flags)s %(srcpath)s -c -o %(srcpath)s.o %(includes)s %(definitions)s"%dict(
srcpath = srcpath, includes=d["INCLUDES"], definitions=d["DEFINITIONS"],
cpp_flags=d["CPP_FLAGS"], cacheroot=d["CACHE_ROOT"])
def _make_cuda_compile_cmd(srcpath):
d = get_compile_info()
return "nvcc %(srcpath)s -c -o %(srcpath)s.o -ccbin cc -m64 -Xcompiler -fPIC -Xcompiler -O3 -Xcompiler -arch -Xcompiler x86_64 %(includes)s %(definitions)s"%dict(
srcpath = srcpath, includes=d["INCLUDES"], definitions=d["DEFINITIONS"])
def _make_link_cmd(objs, extra_link_flags, libpath):
d = get_compile_info()
iname = "-install_name %s"%osp.basename(libpath) if sys.platform=="darwin" else ""
return r"c++ %(cpp_flags)s %(objnames)s %(link_flags)s %(iname)s -o %(libpath)s"%dict(
objnames=" ".join(objs), includes=d["INCLUDES"], cpp_flags=d["CPP_FLAGS"], libpath=libpath,
link_flags=d["LINK_FLAGS"]+" "+extra_link_flags, cacheroot=d["CACHE_ROOT"], iname=iname)
def call_and_print(cmd):
print "\x1b[32m%s\x1b[0m"%cmd
subprocess.check_call(cmd,shell=True)
_ctypes2str = {
ctypes.c_byte : "uint8_t",
ctypes.c_bool : "bool",
ctypes.c_char : "char",
ctypes.c_int : "int",
ctypes.c_long : "long",
ctypes.c_void_p : "void*",
ctypes.c_double : "double",
ctypes.c_float : "float"
}
_struct_cache = {} # because creating ctypes.Structure class is slow for some reason
def _build_closure(triples):
if triples is None:
return ctypes.c_void_p(0)
vals = []
fields = []
for (fieldname,fieldtype,val) in triples:
vals.append(val)
fields.append((fieldname,fieldtype))
try:
key = cPickle.dumps(fields)
S = _struct_cache[key]
except KeyError:
class S(ctypes.Structure):
_fields_ = fields
_struct_cache[key] = S
closure = S(*vals)
return closure
################################################################
### Interpreters
################################################################
class Interpreter(object):
def __call__(self, args):
raise NotImplementedError
def get(self, mem):
raise NotImplementedError
def set(self, mem, val):
raise NotImplementedError
def getarg(self, i):
raise NotImplementedError
class SequentialInterpreter(Interpreter):
"""
Runs an execution graph
"""
def __init__(self, eg, output_locs, input_types, copy_outputs=True):
self.eg = eg
self.input_types = input_types
self.output_locs = output_locs
self.storage = [None for _ in xrange(self.eg.n_locs)]
self.args = None
self.copy_outputs = copy_outputs
def __call__(self, *args):
assert len(args) == len(self.input_types), "Wrong number of inputs provided"
self.args = tuple(core.as_valid_array(arg, intype) for (arg, intype) in zip(args, self.input_types))
for instr in self.eg.instrs:
if profiler.on: tstart = time.time()
try:
instr.fire(self)
except Exception as e:
traceback.print_exc()
if isinstance(instr, (ReturnByRef,ReturnByVal)):
if core.get_config()["debug"]:
assert "stack" in instr.node_props
utils.colorprint(utils.Color.MAGENTA, "HERE'S THE STACK WHEN THE OFFENDING NODE WAS CREATED\n",o=sys.stderr)
print>>sys.stderr, ">>>>>>>>>>>>>>>>>>>>>>>>>>"
traceback.print_list(instr.node_props["stack"])
print>>sys.stderr, "<<<<<<<<<<<<<<<<<<<<<<<<<<"
raise e
else:
utils.error("Didn't save the stack so I can't give you a nice traceback :(. Try running with CGT_FLAGS=debug=True")
raise e
else:
utils.error("Oy vey, an exception occurred in a %s Instruction. I don't know how to help you debug this one right now :(."%type(instr))
raise e
if profiler.on: profiler.update(instr, time.time()-tstart)
outputs = [self.get(loc) for loc in self.output_locs]
if self.copy_outputs: outputs = map(_copy, outputs)
return outputs
# need to copy because otherwise we might mess up the data when we call func again
# todo: add option that prevents this behavior
def get(self, mem):
return self.storage[mem.index]
def set(self, mem, val):
self.storage[mem.index] = val
def getarg(self, i):
return self.args[i]
# ================================================================
# Profiler
# ================================================================
class _Profiler(object):
"""
Profiler for Python backend, i.e. Interpreter
"""
def __init__(self):
self.instr2stats = {}
self.on = False
self.t_total = 0.0
def start(self): self.on = True
def stop(self): self.on = False
def update(self, instr, elapsed):
(prevcount, prevtime) = self.instr2stats.get(instr, (0,0.0))
self.instr2stats[instr] = (prevcount+1, prevtime+elapsed)
self.t_total += elapsed
def print_stats(self):
op2stats = {}
# Collapse by Op, rather than instruction
for (instr,(count,t)) in self.instr2stats.iteritems():
if isinstance(instr, (ReturnByRef, ReturnByVal)):
opkey = str(instr.op)
elif isinstance(instr, Alloc):
opkey = "Alloc{dtype=%s,ndim=%i}"%(instr.dtype, len(instr.read_locs))
else:
opkey = instr.__class__.__name__
(prevcount, prevtime) = op2stats.get(opkey, (0, 0.0))
op2stats[opkey] = (prevcount+count, prevtime+t)
print "Total time elapsed: %.3g seconds"%self.t_total
# _print_heading("By instruction")
# _print_stats(self.instr2stats, self.t_total)
_print_heading("By Op")
_print_stats(op2stats, self.t_total)
def clear_stats(self):
self.instr2stats = {}
self.t_total = 0.0
profiler = _Profiler()
def _print_heading(heading):
heading = " " + heading + " "
width = 60
assert len(heading) < width-10
print
print "*"*width
padleft = (width-len(heading))//2
padright = width-len(heading)-padleft
print "*"*padleft + heading + "*"*padright
print "*"*width
def _print_stats(key2stats, t_total):
rows = []
for (key, (count,t)) in key2stats.iteritems():
rows.append([str(key), count, t, t/t_total])
rows = sorted(rows, key=lambda row: row[2], reverse=True)
cumsum = 0
for row in rows:
cumsum += row[3]
row.append(cumsum)
from thirdparty.tabulate import tabulate
print tabulate(rows, headers=["Instruction","Count","Time","Frac","Frac cumsum"])
def _copy(x):
if isinstance(x, np.ndarray): return x.copy()
elif isinstance(x, tuple): return tuple(el.copy() for el in x)
elif np.isscalar(x): return x # xxx is this case ok?
else: raise NotImplementedError
def typecheck_args(numargs, types):
assert len(numargs)==len(types), "wrong number of arguments. got %i, expected %i"%(len(numargs),len(types))
for (numarg,typ) in zip(numargs,types):
if isinstance(typ, core.TensorType):
assert numarg.dtype==typ.dtype and numarg.ndim==typ.ndim
# ================================================================
# Utils
# ================================================================
def _list_to_json(xs):
return [x.to_json() for x in xs]
def _is_data_mutable(node):
return not node.is_input() and not isinstance(node.op, core.Constant)
|
jameshensman/cgt
|
cgt/compilation.py
|
Python
|
mit
| 35,903
|
[
"VisIt"
] |
da7f20f6d94d11d02266f0a564ef036b818588b9aba828a2159df8d0e17535d9
|
#------------------------------------------------------------------------------
#
# This file is part of the SternheimerGW code.
#
# Copyright (C) 2010 - 2018
# Henry Lambert, Martin Schlipf, and Feliciano Giustino
#
# SternheimerGW is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SternheimerGW is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SternheimerGW. If not, see
# http://www.gnu.org/licenses/gpl.html .
#
#------------------------------------------------------------------------------
import os
import module
os.chdir('..')
def goto_directory_of_library_to_get_string(struct):
string = ''
for key in struct:
os.chdir(key)
string += generate_library_string(struct[key], key)
os.chdir('..')
return string
def generate_library_string(struct, key):
string = key.upper() + '_LIB = ' + os.getcwd()
if module.depend in struct:
string += '/src/lib' + key.lower() + '.a\n'
else:
string += '/lib' + key.lower() + '.a\n'
string += goto_directory_of_library_to_get_string(struct)
return string
string = 'BASE_LIB = $(ESPRESSO)/Modules/libqemod.a $(ESPRESSO)/FFTXlib/libqefft.a \\\n'
string += ' $(ESPRESSO)/KS_Solvers/Davidson/libdavid.a $(ESPRESSO)/KS_Solvers/CG/libcg.a \\\n'
string += ' $(ESPRESSO)/LAXlib/libqela.a $(ESPRESSO)/UtilXlib/libutil.a \\\n'
string += ' $(ESPRESSO)/dft-d3/libdftd3qe.a $(ESPRESSO)/clib/clib.a $(ESPRESSO)/iotk/src/libiotk.a\n'
string += 'PW_LIB = $(ESPRESSO)/PW/src/libpw.a\n'
string += 'LR_LIB = $(ESPRESSO)/LR_Modules/liblrmod.a\n'
string += 'VENDOR_LIB = ' + os.getcwd() + '/vendor/libvendor.a\n'
string += goto_directory_of_library_to_get_string(module.structure)
library_file = open('library', 'w')
library_file.write(string)
library_file.close()
|
mmdg-oxford/SternheimerGW
|
install/generate_library.py
|
Python
|
gpl-3.0
| 2,232
|
[
"ESPResSo"
] |
63d20e44cbfdb02f75c6d1bc6245ac54df318925bfbec5af6745aa9fa43edd07
|
"""
Copyright (c) 2015 Andreea Georgescu
Created on Fri May 8 15:56:32 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import
from __future__ import division
import numpy as np
#from scipy.interpolate import interp1d
from interp import interp1d
#from scipy.optimize import curve_fit
#import matplotlib.pyplot as plt
pi = np.pi
name = "CDMSSi2012"
modulated = False
energy_resolution_type = "Gaussian"
def EnergyResolution(e):
return np.sqrt(0.085849 + 0.003136 * e)
FFSD = 'GaussianFFSD'
FFSI = 'HelmFF'
FF = {'SI': FFSI,
'SDPS': FFSD,
'SDAV': FFSD,
}
target_nuclide_AZC_list = np.array([[28, 14, 0.918663943428171], [29, 14, 0.04833558589888038],
[30, 14, 0.03300047067294847]])
target_nuclide_JSpSn_list = np.array([[0, 0, 0], [1./2, -0.0019 * np.sqrt(3./(2 * pi)),
.1334 * np.sqrt(3./(2 * pi))], [0, 0, 0]])
target_nuclide_mass_list = np.array([26.0603, 26.9914, 27.9204])
num_target_nuclides = target_nuclide_mass_list.size
def QuenchingFactor(e):
return np.ones_like(e)
Ethreshold = 7.
Emaximum = 100.
ERmaximum = 100.
x = np.array([6.88073000e+00, 6.93807000e+00, 7.33945000e+00, 7.91284000e+00, 8.25688000e+00,
8.48624000e+00, 9.11697000e+00, 9.74771000e+00, 1.05505000e+01, 1.14106000e+01,
1.23853000e+01, 1.33028000e+01, 1.41628000e+01, 1.47362000e+01, 1.49083000e+01,
1.49083000e+01, 1.49656000e+01, 1.56537000e+01, 1.63991000e+01, 1.72592000e+01,
1.82913000e+01, 1.91514000e+01, 1.97821000e+01, 1.98968000e+01, 2.12156000e+01,
2.26491000e+01, 2.97592000e+01, 3.00459000e+01, 3.19954000e+01, 3.51491000e+01,
3.83028000e+01, 4.18578000e+01, 4.67890000e+01, 5.36697000e+01, 1.00057000e+02])
y = np.array([4.17071000e-02, 8.43841000e-02, 1.10572000e-01, 1.29971000e-01, 1.41610000e-01,
1.55189000e-01, 1.70708000e-01, 1.84287000e-01, 1.97866000e-01, 2.07565000e-01,
2.16295000e-01, 2.22114000e-01, 2.28904000e-01, 2.32784000e-01, 2.38603000e-01,
2.49273000e-01, 2.54122000e-01, 2.60912000e-01, 2.66731000e-01, 2.73521000e-01,
2.79340000e-01, 2.84190000e-01, 2.86130000e-01, 3.23957000e-01, 3.32687000e-01,
3.40446000e-01, 3.80213000e-01, 3.90883000e-01, 4.02522000e-01, 4.15131000e-01,
4.23860000e-01, 4.31620000e-01, 4.41319000e-01, 4.52958000e-01, 5.19884000e-01])
Efficiency_interp = interp1d(x, y)
def Efficiency(e):
return Efficiency_interp(e) if e >= Ethreshold else 0.
def Efficiency_ER(er):
return np.ones_like(er)
Exposure = 140.2
ERecoilList = np.array([10])
mu_BKG_i = np.array([0.0208345])
NBKG = 0.7
|
SamWitte/Codds_DarkMatter
|
src/Data/CDMSSiArtif.py
|
Python
|
gpl-2.0
| 3,319
|
[
"Gaussian"
] |
85a14272692c8b36c80547f2e806f85743c8b3e6bbd18197a196d562ba963616
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os,unittest,numpy as np
from pyscf.nao import mf
class KnowValues(unittest.TestCase):
def test_exc(self):
""" Compute exchange-correlation energy """
from timeit import default_timer as timer
sv = mf(label='water', cd=os.path.dirname(os.path.abspath(__file__)))
dm = sv.make_rdm1()
exc = sv.exc(dm, xc_code='1.0*LDA,1.0*PZ', level=4)
#self.assertAlmostEqual(exc, -4.1422234271159333) ? redone water?
self.assertAlmostEqual(exc, -4.1422239276270201)
if __name__ == "__main__": unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0032_exc.py
|
Python
|
apache-2.0
| 1,202
|
[
"PySCF"
] |
d43fe519337c7cfa4e2a9a7de0732e2c0ab40591d0a84f3292425b620dd43ac1
|
import operator
from functools import partial
import numpy as np
from .. import Variable
from ..conventions import pop_to, cf_encoder
from ..core import indexing
from ..core.utils import (FrozenOrderedDict, NDArrayMixin,
close_on_error, is_remote_uri)
from ..core.pycompat import iteritems, basestring, OrderedDict
from .common import AbstractWritableDataStore, robust_getitem
from .netcdf3 import (encode_nc3_attr_value, encode_nc3_variable,
maybe_convert_to_char_array)
# This lookup table maps from dtype.byteorder to a readable endian
# string used by netCDF4.
_endian_lookup = {'=': 'native',
'>': 'big',
'<': 'little',
'|': 'native'}
class NetCDF4ArrayWrapper(NDArrayMixin):
def __init__(self, array, is_remote=False):
self.array = array
self.is_remote = is_remote
@property
def dtype(self):
dtype = self.array.dtype
if dtype is str:
# return object dtype because that's the only way in numpy to
# represent variable length strings; it also prevents automatic
# string concatenation via conventions.decode_cf_variable
dtype = np.dtype('O')
return dtype
def __getitem__(self, key):
if self.is_remote: # pragma: no cover
getitem = partial(robust_getitem, catch=RuntimeError)
else:
getitem = operator.getitem
data = getitem(self.array, key)
if self.ndim == 0:
# work around for netCDF4-python's broken handling of 0-d
# arrays (slicing them always returns a 1-dimensional array):
# https://github.com/Unidata/netcdf4-python/pull/220
data = np.asscalar(data)
return data
def _nc4_values_and_dtype(var):
if var.dtype.kind == 'U':
# this entire clause should not be necessary with netCDF4>=1.0.9
if len(var) > 0:
var = var.astype('O')
dtype = str
elif var.dtype.kind in ['i', 'u', 'f', 'S']:
# use character arrays instead of unicode, because unicode suppot in
# netCDF4 is still rather buggy
data, dims = maybe_convert_to_char_array(var.data, var.dims)
var = Variable(dims, data, var.attrs, var.encoding)
dtype = var.dtype
else:
raise ValueError('cannot infer dtype for netCDF4 variable')
return var, dtype
def _nc4_group(ds, group, mode):
if group in set([None, '', '/']):
# use the root group
return ds
else:
# make sure it's a string
if not isinstance(group, basestring):
raise ValueError('group must be a string or None')
# support path-like syntax
path = group.strip('/').split('/')
for key in path:
try:
ds = ds.groups[key]
except KeyError as e:
if mode != 'r':
ds = ds.createGroup(key)
else:
# wrap error to provide slightly more helpful message
raise IOError('group not found: %s' % key, e)
return ds
def _ensure_fill_value_valid(data, attributes):
# work around for netCDF4/scipy issue where _FillValue has the wrong type:
# https://github.com/Unidata/netcdf4-python/issues/271
if data.dtype.kind == 'S' and '_FillValue' in attributes:
attributes['_FillValue'] = np.string_(attributes['_FillValue'])
def _force_native_endianness(var):
# possible values for byteorder are:
# = native
# < little-endian
# > big-endian
# | not applicable
# Below we check if the data type is not native or NA
if var.dtype.byteorder not in ['=', '|']:
# if endianness is specified explicitly, convert to the native type
data = var.data.astype(var.dtype.newbyteorder('='))
var = Variable(var.dims, data, var.attrs, var.encoding)
# if endian exists, remove it from the encoding.
var.encoding.pop('endian', None)
# check to see if encoding has a value for endian its 'native'
if not var.encoding.get('endian', 'native') is 'native':
raise NotImplementedError("Attempt to write non-native endian type, "
"this is not supported by the netCDF4 python "
"library.")
return var
class NetCDF4DataStore(AbstractWritableDataStore):
"""Store for reading and writing data via the Python-NetCDF4 library.
This store supports NetCDF3, NetCDF4 and OpenDAP datasets.
"""
def __init__(self, filename, mode='r', clobber=True, diskless=False,
persist=False, format='NETCDF4', group=None):
import netCDF4 as nc4
ds = nc4.Dataset(filename, mode=mode, clobber=clobber,
diskless=diskless, persist=persist,
format=format)
with close_on_error(ds):
self.ds = _nc4_group(ds, group, mode)
self.format = format
self.is_remote = is_remote_uri(filename)
self._filename = filename
def store(self, variables, attributes):
# All NetCDF files get CF encoded by default, without this attempting
# to write times, for example, would fail.
cf_variables, cf_attrs = cf_encoder(variables, attributes)
AbstractWritableDataStore.store(self, cf_variables, cf_attrs)
def open_store_variable(self, var):
var.set_auto_maskandscale(False)
dimensions = var.dimensions
data = indexing.LazilyIndexedArray(NetCDF4ArrayWrapper(
var, self.is_remote))
attributes = OrderedDict((k, var.getncattr(k))
for k in var.ncattrs())
_ensure_fill_value_valid(data, attributes)
# netCDF4 specific encoding; save _FillValue for later
encoding = {}
filters = var.filters()
if filters is not None:
encoding.update(filters)
chunking = var.chunking()
if chunking is not None:
if chunking == 'contiguous':
encoding['contiguous'] = True
encoding['chunksizes'] = None
else:
encoding['contiguous'] = False
encoding['chunksizes'] = tuple(chunking)
# TODO: figure out how to round-trip "endian-ness" without raising
# warnings from netCDF4
# encoding['endian'] = var.endian()
pop_to(attributes, encoding, 'least_significant_digit')
# save source so __repr__ can detect if it's local or not
encoding['source'] = self._filename
return Variable(dimensions, data, attributes, encoding)
def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(v))
for k, v in iteritems(self.ds.variables))
def get_attrs(self):
return FrozenOrderedDict((k, self.ds.getncattr(k))
for k in self.ds.ncattrs())
def get_dimensions(self):
return FrozenOrderedDict((k, len(v))
for k, v in iteritems(self.ds.dimensions))
def set_dimension(self, name, length):
self.ds.createDimension(name, size=length)
def set_attribute(self, key, value):
if self.format != 'NETCDF4':
value = encode_nc3_attr_value(value)
self.ds.setncattr(key, value)
def prepare_variable(self, name, variable):
attrs = variable.attrs.copy()
variable = _force_native_endianness(variable)
if self.format == 'NETCDF4':
variable, datatype = _nc4_values_and_dtype(variable)
else:
variable = encode_nc3_variable(variable)
datatype = variable.dtype
self.set_necessary_dimensions(variable)
fill_value = attrs.pop('_FillValue', None)
if fill_value in ['', '\x00']:
# these are equivalent to the default FillValue, but netCDF4
# doesn't like setting fill_value to an empty string
fill_value = None
encoding = variable.encoding
nc4_var = self.ds.createVariable(
varname=name,
datatype=datatype,
dimensions=variable.dims,
zlib=encoding.get('zlib', False),
complevel=encoding.get('complevel', 4),
shuffle=encoding.get('shuffle', True),
fletcher32=encoding.get('fletcher32', False),
contiguous=encoding.get('contiguous', False),
chunksizes=encoding.get('chunksizes'),
endian='native',
least_significant_digit=encoding.get('least_significant_digit'),
fill_value=fill_value)
nc4_var.set_auto_maskandscale(False)
for k, v in iteritems(attrs):
# set attributes one-by-one since netCDF4<1.0.10 can't handle
# OrderedDict as the input to setncatts
nc4_var.setncattr(k, v)
return nc4_var, variable.data
def sync(self):
self.ds.sync()
def close(self):
ds = self.ds
# netCDF4 only allows closing the root group
while ds.parent is not None:
ds = ds.parent
ds.close()
|
clarkfitzg/xray
|
xray/backends/netCDF4_.py
|
Python
|
apache-2.0
| 9,252
|
[
"NetCDF"
] |
8d121b86de3a5e2836197dbdd2f06361d7353afc53d690d19747189122786140
|
# -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max-Planck-Institute of Neurobiology, Munich, Germany
# Authors: Philipp Schubert
import time
from multiprocessing import Queue, Process
from typing import Optional
import networkx as nx
import numpy as np
from syconn import global_params
from syconn.handler.basics import chunkify
from syconn.handler.config import initialize_logging
from syconn.mp import batchjob_utils as qu
from syconn.proc.graphs import create_ccsize_dict
from syconn.reps.segmentation import SegmentationDataset
from syconn.reps.segmentation_helper import find_missing_sv_views
from syconn.reps.super_segmentation import SuperSegmentationDataset
from syconn.reps.super_segmentation import SuperSegmentationObject
from syconn.reps.super_segmentation_helper import find_incomplete_ssv_views
def _run_neuron_rendering_small_helper(max_n_jobs: Optional[int] = None):
"""
Render the default views as defined in ``global_params`` [WIP] of small
neuron reconstructions. Helper method of :func:`~run_neuron_rendering`.
Args:
max_n_jobs: Number of parallel jobs.
Notes:
Requires :func:`~syconn.exec.exec_init.run_create_neuron_ssd`.
"""
if max_n_jobs is None:
max_n_jobs = global_params.config.ngpu_total * 10 if \
global_params.config['pyopengl_platform'] == 'egl' \
else global_params.config.ncore_total * 4
log = initialize_logging('neuron_view_rendering_small',
global_params.config.working_dir + '/logs/')
# view rendering prior to glia removal, choose SSD accordingly
ssd = SuperSegmentationDataset(working_dir=global_params.config.working_dir)
# TODO: use actual size criteria, e.g. number of sampling locations
nb_svs_per_ssv = np.array([len(ssd.mapping_dict[ssv_id])
for ssv_id in ssd.ssv_ids])
# render normal size SSVs
size_mask = nb_svs_per_ssv <= global_params.config['glia']['rendering_max_nb_sv']
if 'example' in global_params.config.working_dir and np.sum(~size_mask) == 0:
# generate at least one (artificial) huge SSV
size_mask[:1] = False
size_mask[1:] = True
multi_params = ssd.ssv_ids[size_mask]
# sort ssv ids according to their number of SVs (descending)
ordering = np.argsort(ssd.load_numpy_data('size')[size_mask])
multi_params = multi_params[ordering[::-1]]
multi_params = chunkify(multi_params, max_n_jobs)
# list of SSV IDs and SSD parameters need to be given to a single QSUB job
multi_params = [(ixs, global_params.config.working_dir) for ixs in multi_params]
log.info('Started rendering of {} SSVs. '.format(np.sum(size_mask)))
if global_params.config['pyopengl_platform'] == 'osmesa': # utilize all CPUs
qu.batchjob_script(multi_params, "render_views", log=log, suffix='_small',
remove_jobfolder=True)
elif global_params.config['pyopengl_platform'] == 'egl': # utilize 1 GPU per task
# run EGL on single node: 20 parallel jobs
if not qu.batchjob_enabled():
n_cores = 1
qu.batchjob_script(multi_params, "render_views", suffix='_small', log=log,
additional_flags="--gres=gpu:2", disable_batchjob=True,
n_cores=n_cores, remove_jobfolder=True)
# run on whole cluster
else:
n_cores = global_params.config['ncores_per_node'] // global_params.config['ngpus_per_node']
qu.batchjob_script(multi_params, "render_views_egl", suffix='_small', log=log,
additional_flags="--gres=gpu:1",
n_cores=n_cores, remove_jobfolder=True)
else:
raise RuntimeError('Specified OpenGL platform "{}" not supported.'
''.format(global_params.config['pyopengl_platform']))
log.info('Finished rendering of {}/{} SSVs.'.format(len(ordering),
len(nb_svs_per_ssv)))
def _run_neuron_rendering_big_helper(max_n_jobs: Optional[int] = None):
"""
Render the default views as defined in ``global_params`` [WIP] of huge
neuron reconstructions. Helper method of :func:`~run_neuron_rendering`.
Args:
max_n_jobs: Number of parallel jobs.
Notes:
Requires :py:func:`~syconn.exec.exec_init.run_create_neuron_ssd`.
"""
if max_n_jobs is None:
max_n_jobs = global_params.config['nnodes_total'] * 2
log = initialize_logging('neuron_view_rendering_big',
global_params.config.working_dir + '/logs/')
# view rendering prior to glia removal, choose SSD accordingly
ssd = SuperSegmentationDataset(working_dir=global_params.config.working_dir)
# TODO: use actual size criteria, e.g. number of sampling locations
nb_svs_per_ssv = np.array([len(ssd.mapping_dict[ssv_id])
for ssv_id in ssd.ssv_ids])
# render normal size SSVs
size_mask = nb_svs_per_ssv <= global_params.config['glia']['rendering_max_nb_sv']
if 'example' in global_params.config.working_dir and np.sum(~size_mask) == 0:
# generate at least one (artificial) huge SSV
size_mask[:1] = False
size_mask[1:] = True
# sort ssv ids according to their number of SVs (descending)
# list of SSV IDs and SSD parameters need to be given to a single QSUB job
if np.sum(~size_mask) > 0:
log.info('{} huge SSVs will be rendered on the cluster.'.format(np.sum(~size_mask)))
# identify huge SSVs and process them individually on whole cluster
big_ssv = ssd.ssv_ids[~size_mask]
# render normal views only
n_cores = global_params.config['ncores_per_node'] // global_params.config['ngpus_per_node']
# sort ssv ids according to their number of SVs (descending)
multi_params = big_ssv[np.argsort(ssd.load_numpy_data('size')[~size_mask])[::-1]]
multi_params = chunkify(multi_params, max_n_jobs)
# list of SSV IDs and SSD parameters need to be given to a single QSUB job
multi_params = [(ixs, global_params.config.working_dir) for ixs in multi_params]
qu.batchjob_script(multi_params, "render_views_egl", suffix='_big', log=log,
additional_flags="--gres=gpu:1",
n_cores=n_cores, remove_jobfolder=True)
log.info('Finished rendering of {}/{} SSVs.'.format(len(big_ssv),
len(nb_svs_per_ssv)))
def run_neuron_rendering(max_n_jobs: Optional[int] = None):
"""
Render the default views as defined in ``global_params`` [WIP].
Args:
max_n_jobs: Number of parallel jobs.
Notes:
Requires :func:`~syconn.exec.exec_init.run_create_neuron_ssd`.
"""
log = initialize_logging('neuron_rendering',
global_params.config.working_dir + '/logs/')
ps = [Process(target=_run_neuron_rendering_big_helper, args=(max_n_jobs,)),
Process(target=_run_neuron_rendering_small_helper, args=(max_n_jobs,))]
for p in ps:
p.start()
time.sleep(10)
for p in ps:
p.join()
if p.exitcode != 0:
raise Exception(f'Worker {p.name} stopped unexpectedly with exit '
f'code {p.exitcode}.')
p.close()
log.info('Finished rendering of all SSVs. Checking completeness.')
ssd = SuperSegmentationDataset(working_dir=global_params.config.working_dir)
res = find_incomplete_ssv_views(ssd, woglia=True, n_cores=global_params.config['ncores_per_node'])
if len(res) != 0:
msg = "Not all SSVs were rendered! {}/{} missing. Example IDs:\n" \
"{}".format(len(res), len(ssd.ssv_ids),
res[:10])
log.error(msg)
raise RuntimeError(msg)
log.info('Success.')
def _run_huge_ssv_render_worker(q: Queue, q_out: Queue):
"""
Helper method of :func:`~run_astrocyte_rendering`.
Args:
q: Input queue.
q_out: Output queue.
"""
while True:
inp = q.get()
if inp == -1:
break
kk, g, version = inp
# Create SSV object
sv_ixs = np.sort(list(g.nodes()))
sso = SuperSegmentationObject(sv_ixs[0], working_dir=global_params.config.working_dir,
version=version, create=False, sv_ids=sv_ixs)
# nodes of sso._rag need to be SV
new_G = nx.Graph()
for e in g.edges():
new_G.add_edge(sso.get_seg_obj("sv", e[0]),
sso.get_seg_obj("sv", e[1]))
sso._rag = new_G
sso.render_views(add_cellobjects=False, cellobjects_only=False,
skip_indexviews=True, woglia=False, overwrite=True)
q_out.put(0)
def run_astrocyte_rendering(max_n_jobs: Optional[int] = None):
"""
Uses the pruned RAG at ``global_params.config.pruned_svgraph_path``
(stored as edge list .bz2 file) which is computed in
:func:`~syconn.exec.exec_init.init_cell_subcell_sds` to aggregate the
rendering context from the underlying supervoxel graph.
Args:
max_n_jobs: Number of parallel jobs.
"""
if max_n_jobs is None:
max_n_jobs = global_params.config.ngpu_total * 4 if \
global_params.config['pyopengl_platform'] == 'egl' \
else global_params.config.ncore_total * 4
log = initialize_logging('glia_separation', global_params.config.working_dir + '/logs/',
overwrite=True)
sds = SegmentationDataset("sv", working_dir=global_params.config.working_dir)
# precompute rendering locations
multi_params = chunkify(sds.so_dir_paths, global_params.config.ncore_total * 2)
so_kwargs = dict(working_dir=global_params.config.working_dir, obj_type='sv')
multi_params = [[par, so_kwargs] for par in multi_params]
# TODO: remove comment as soon as astrocyte separation supports on the fly view generation
# if not global_params.config.use_onthefly_views:
_ = qu.batchjob_script(multi_params, "sample_location_caching", remove_jobfolder=True, log=log)
log.info("Preparing RAG.")
np.random.seed(0)
# view rendering prior to glia removal, choose SSD accordingly
# glia removal is based on the initial RAG and does not require explicitly stored SSVs
version = "tmp"
G = nx.read_edgelist(global_params.config.pruned_svgraph_path, nodetype=np.uint64)
cc_gs = sorted(list((G.subgraph(c) for c in nx.connected_components(G))), key=len, reverse=True)
all_sv_ids_in_rag = np.array(list(G.nodes()), dtype=np.uint64)
# generate parameter for view rendering of individual SSV
sv_size_dict = {}
bbs = sds.load_numpy_data('bounding_box') * sds.scaling
for ii in range(len(sds.ids)):
sv_size_dict[sds.ids[ii]] = bbs[ii]
ccsize_dict = create_ccsize_dict(cc_gs, sv_size_dict,
is_connected_components=True)
multi_params = cc_gs
big_ssv = []
small_ssv = []
for g in multi_params:
if g.number_of_nodes() > global_params.config['glia']['rendering_max_nb_sv']:
big_ssv.append(g)
elif ccsize_dict[list(g.nodes())[0]] < global_params.config['min_cc_size_ssv']:
pass # ignore this CC
else:
small_ssv.append(g)
log.info("View rendering for astrocyte separation started.")
# # identify huge SSVs and process them on the entire cluster
if len(big_ssv) > 0:
n_threads = 2
log.info("Processing {} huge SSVs in {} threads on the entire cluster"
".".format(len(big_ssv), n_threads))
q_in = Queue()
q_out = Queue()
for kk, g in enumerate(big_ssv):
q_in.put((kk, g, version))
for _ in range(n_threads):
q_in.put(-1)
ps = [Process(target=_run_huge_ssv_render_worker, args=(q_in, q_out))
for _ in range(n_threads)]
for p in ps:
p.start()
time.sleep(0.5)
q_in.close()
q_in.join_thread()
for p in ps:
p.join()
if p.exitcode != 0:
raise Exception(f'Worker {p.name} stopped unexpectedly with exit '
f'code {p.exitcode}.')
p.close()
if q_out.qsize() != len(big_ssv):
msg = 'Not all `_run_huge_ssv_render_worker` jobs completed ' \
'successfully.'
log.error(msg)
raise ValueError(msg)
# render small SSV without overhead and single cpus on whole cluster
multi_params = small_ssv
np.random.shuffle(multi_params)
multi_params = chunkify(multi_params, max_n_jobs)
# list of SSV IDs and SSD parameters need to be given to a single QSUB job
multi_params = [(ixs, global_params.config.working_dir, version) for ixs in multi_params]
_ = qu.batchjob_script(
multi_params, "render_views_glia_removal", log=log,
n_cores=global_params.config['ncores_per_node'] // global_params.config['ngpus_per_node'],
additional_flags="--gres=gpu:1", remove_jobfolder=True)
# check completeness
log.info('Finished view rendering for astrocyte separation. Checking completeness.')
sd = SegmentationDataset("sv", working_dir=global_params.config.working_dir)
res = find_missing_sv_views(sd, woglia=False, n_cores=global_params.config['ncores_per_node'])
missing_not_contained_in_rag = np.setdiff1d(res, all_sv_ids_in_rag) # TODO: report at least.
missing_contained_in_rag = np.intersect1d(res, all_sv_ids_in_rag)
if len(missing_contained_in_rag) != 0:
msg = "Not all SVs were rendered completely! {}/{} missing:\n" \
"{}".format(len(missing_contained_in_rag), len(all_sv_ids_in_rag),
missing_contained_in_rag[:100])
log.error(msg)
raise ValueError(msg)
else:
log.info('All SVs now contain views required for glia prediction.')
# TODO: remove temporary SSV datasets
|
StructuralNeurobiologyLab/SyConn
|
syconn/exec/exec_render.py
|
Python
|
gpl-2.0
| 14,228
|
[
"NEURON"
] |
ef194b6568a5709fb7dab8fafab9668ddc9b93288c8350198dd8fd423d6a7f73
|
#!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# vizplotgui_pymol.py
# Purpose: viz running LAMMPS simulation via PyMol with plot and GUI
# Syntax: vizplotgui_pymol.py in.lammps Nfreq compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point and viz shapshot every this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
# IMPORTANT: this script cannot yet be run in parallel via Pypar,
# because I can't seem to do a MPI-style broadcast in Pypar
import sys,time
sys.path.append("./pizza")
# methods called by GUI
def run():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# method called by timestep loop every Nfreq steps
# read dump snapshot and viz it, update plot with compute value
def update(ntimestep):
d.next()
d.unscale()
p.single(ntimestep)
pm.load("tmp.pdb")
pm.forward()
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
gn.plot(xaxis,yaxis)
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: vizplotgui_pymol.py in.lammps Nfreq compute-ID"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
compute = sys.argv[3]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate initial 1-point plot, dump file, and image
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
breakflag = 0
runflag = 0
temptarget = 1.0
# wrapper on PyMol
# just proc 0 handles reading of dump file and viz
if me == 0:
import pymol
pymol.finish_launching()
from dump import dump
from pdbfile import pdbfile
from pymol import cmd as pm
d = dump("tmp.dump",0)
p = pdbfile(d)
d.next()
d.unscale()
p.single(ntimestep)
pm.load("tmp.pdb")
pm.show("spheres","tmp")
# display GUI with run/stop buttons and slider for temperature
if me == 0:
from Tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Run",command=run).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# wrapper on GnuPlot via Pizza.py gnu tool
if me == 0:
from gnu import gnu
gn = gnu()
gn.plot(xaxis,yaxis)
gn.title(compute,"Timestep","Temperature")
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 12345" % (temp,temp))
running = 0
if runflag and running:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif not runflag and running:
lmp.command("run %d pre no post yes" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
|
ganzenmg/lammps_current
|
python/examples/vizplotgui_pymol.py
|
Python
|
gpl-2.0
| 4,404
|
[
"LAMMPS",
"PyMOL"
] |
3bca16212357ba35ecc3d0b0d3c71ee167a66a52084a578fe14e114ebb1c239c
|
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (C) 2016 Daniel Standage <daniel.standage@gmail.com>
#
# This file is part of tag (http://github.com/standage/tag) and is licensed
# under the BSD 3-clause license: see LICENSE.
# -----------------------------------------------------------------------------
import glob
import pytest
import tag
import tag.__main__
from tag.tests import data_file, data_stream
import sys
def test_cli_args(capsys):
with pytest.raises(SystemExit) as se:
args = tag.cli.parser().parse_args(['-v'])
terminal = capsys.readouterr()
assert tag.__version__ in terminal.out or tag.__version__ in terminal.err
with pytest.raises(SystemExit) as se:
args = tag.cli.parser().parse_args(['gff3', '-h'])
terminal = capsys.readouterr()
assert 'input file in GFF3 format' in terminal.out
arglist = ['gff3', '-r', data_file('mito-trna.gff3')]
args = tag.cli.parser().parse_args(arglist)
tag.__main__.main(args)
terminal = capsys.readouterr()
testout = data_stream('mito-trna-out.gff3').read()
assert terminal.out == testout
with pytest.raises(SystemExit):
tag.__main__.main()
def test_gff3_strict(capsys):
arglist = ['gff3', data_file('mito-trna.gff3')]
args = tag.cli.parser().parse_args(arglist)
with pytest.raises(AssertionError) as ae:
tag.cli.gff3.main(args)
assert 'not contained within its span' in str(ae)
terminal = capsys.readouterr()
args.strict = False
tag.cli.gff3.main(args)
terminal = capsys.readouterr()
testout = data_stream('mito-trna-out.gff3').read()
assert terminal.out == testout
@pytest.mark.parametrize('gff3,ftype,expected_output', [
('oluc-20kb.gff3', 'CDS', '14100\n'),
('bogus-aligns.gff3', 'cDNA_match', '7006\n'),
('bogus-genes.gff3', 'gene', '18000\n'),
('bogus-genes.gff3', 'mRNA', '18000\n'),
('bogus-genes.gff3', 'exon', '11000\n'),
])
def test_occ(gff3, ftype, expected_output, capsys):
arglist = ['occ', data_file(gff3), ftype]
args = tag.cli.parser().parse_args(arglist)
tag.cli.occ.main(args)
terminal = capsys.readouterr()
assert terminal.out == expected_output
def test_pmrna(capsys):
arglist = ['pmrna', data_file('nanosplice.gff3')]
args = tag.cli.parser().parse_args(arglist)
tag.cli.pmrna.main(args)
terminal = capsys.readouterr()
exp_out = data_stream('nanosplice-primary.gff3').read()
assert terminal.out.strip() == exp_out.strip()
def test_sum(capsys):
infile = data_file('GCF_001639295.1_ASM163929v1_genomic.gff.gz')
args = tag.cli.parser().parse_args(['sum', infile])
tag.cli.sum.main(args)
terminal = capsys.readouterr()
out = terminal.out.strip().split('\n')[1:]
exp_out = data_stream('sum-test-out.txt').read().strip().split('\n')[1:]
assert out == exp_out
def test_merge(capsys):
infiles = glob.glob(data_file('ex-red-?.gff3'))
arglist = ['merge'] + infiles
args = tag.cli.parser().parse_args(arglist)
tag.cli.merge.main(args)
terminal = capsys.readouterr()
exp_out = data_stream('ex-red-merged.gff3').read()
assert terminal.out.strip() == exp_out.strip()
def test_locuspocus(capsys):
infiles = glob.glob(data_file('Ye.*.min.gff3.gz'))
arglist = ['locuspocus'] + infiles
args = tag.cli.parser().parse_args(arglist)
tag.cli.locuspocus.main(args)
terminal = capsys.readouterr()
exp_out = data_stream('Ye.loci.gff3').read()
assert terminal.out.strip() == exp_out.strip()
def test_pep2nuc(capsys):
arglist = [
'pep2nuc', '-k', 'protein', data_file('Ypes-abinit.gff3.gz'),
data_file('Ypes-signalp-prot.gff3.gz')
]
args = tag.cli.parser().parse_args(arglist)
tag.cli.pep2nuc.main(args)
terminal = capsys.readouterr()
exp_out = data_stream('Ypes-signalp-nucl.gff3.gz').read()
assert terminal.out.strip() == exp_out.strip()
def test_pep2nuc_nokeep(capsys):
arglist = [
'pep2nuc', data_file('Ypes-abinit.gff3.gz'),
data_file('Ypes-signalp-prot.gff3.gz')
]
args = tag.cli.parser().parse_args(arglist)
tag.cli.pep2nuc.main(args)
terminal = capsys.readouterr()
exp_out = data_stream('Ypes-signalp-nucl-nk.gff3.gz').read()
assert terminal.out.strip() == exp_out.strip()
def test_pep2nuc_missing_id(capsys):
arglist = [
'pep2nuc', data_file('Ypes-abinit-minus1.gff3.gz'),
data_file('Ypes-signalp-prot.gff3.gz')
]
args = tag.cli.parser().parse_args(arglist)
tag.cli.pep2nuc.main(args)
terminal = capsys.readouterr()
msg = '[tag::pep2nuc] WARNING: protein identifier "cds000008" not defined'
assert msg in terminal.err
|
standage/tag
|
tag/tests/test_cli.py
|
Python
|
bsd-3-clause
| 4,767
|
[
"ABINIT"
] |
6bd7b363e1a6885f55a03df24871024fc5b443e7ee4665d33ce7cfd753cdec71
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
# Do not modify this file! It is auto-generated by the document_elements
# script, from psi4topdir/include/cov_radii.h
psi_cov_radii = {}
psi_cov_radii['BE'] = 0.96 #
psi_cov_radii['BA'] = 2.15 #
psi_cov_radii['BI'] = 1.48 #
psi_cov_radii['BR'] = 1.20 #
psi_cov_radii['RU'] = 1.46 #
psi_cov_radii['RE'] = 1.51 #
psi_cov_radii['LU'] = 1.87 #
psi_cov_radii['RA'] = 2.21 #
psi_cov_radii['RB'] = 2.20 #
psi_cov_radii['RN'] = 1.50 #
psi_cov_radii['RH'] = 1.42 #
psi_cov_radii['H'] = 0.31 #
psi_cov_radii['P'] = 1.07 #
psi_cov_radii['GE'] = 1.20 #
psi_cov_radii['GD'] = 1.96 #
psi_cov_radii['GA'] = 1.22 #
psi_cov_radii['OS'] = 1.44 #
psi_cov_radii['C'] = 0.76 #
psi_cov_radii['HO'] = 1.92 #
psi_cov_radii['HF'] = 1.75 #
psi_cov_radii['HG'] = 1.32 #
psi_cov_radii['HE'] = 0.28 #
psi_cov_radii['PR'] = 2.03 #
psi_cov_radii['PT'] = 1.36 #
psi_cov_radii['PU'] = 1.87 #
psi_cov_radii['PB'] = 1.46 #
psi_cov_radii['PA'] = 2.00 #
psi_cov_radii['PD'] = 1.39 #
psi_cov_radii['PO'] = 1.40 #
psi_cov_radii['PM'] = 1.99 #
psi_cov_radii['ZN'] = 1.22 #
psi_cov_radii['K'] = 2.03 #
psi_cov_radii['O'] = 0.66 #
psi_cov_radii['S'] = 1.05 #
psi_cov_radii['W'] = 1.62 #
psi_cov_radii['EU'] = 1.98 #
psi_cov_radii['ZR'] = 1.75 #
psi_cov_radii['ER'] = 1.89 #
psi_cov_radii['MG'] = 1.41 #
psi_cov_radii['MO'] = 1.54 #
psi_cov_radii['MN'] = 1.61 #
psi_cov_radii['U'] = 1.96 #
psi_cov_radii['FR'] = 2.60 #
psi_cov_radii['FE'] = 1.52 #
psi_cov_radii['NI'] = 1.24 #
psi_cov_radii['NA'] = 1.66 #
psi_cov_radii['NB'] = 1.64 #
psi_cov_radii['ND'] = 2.01 #
psi_cov_radii['NE'] = 0.58 #
psi_cov_radii['NP'] = 1.90 #
psi_cov_radii['B'] = 0.84 #
psi_cov_radii['CO'] = 1.50 #
psi_cov_radii['CL'] = 1.02 #
psi_cov_radii['CA'] = 1.76 #
psi_cov_radii['CE'] = 2.04 #
psi_cov_radii['N'] = 0.71 #
psi_cov_radii['V'] = 1.53 #
psi_cov_radii['CS'] = 2.44 #
psi_cov_radii['CR'] = 1.39 #
psi_cov_radii['CU'] = 1.32 #
psi_cov_radii['SR'] = 1.95 #
psi_cov_radii['KR'] = 1.16 #
psi_cov_radii['SI'] = 1.11 #
psi_cov_radii['SN'] = 1.39 #
psi_cov_radii['SM'] = 1.98 #
psi_cov_radii['SC'] = 1.70 #
psi_cov_radii['SB'] = 1.39 #
psi_cov_radii['SE'] = 1.20 #
psi_cov_radii['YB'] = 1.87 #
psi_cov_radii['DY'] = 1.92 #
psi_cov_radii['LA'] = 2.07 #
psi_cov_radii['F'] = 0.57 #
psi_cov_radii['LI'] = 1.28 #
psi_cov_radii['TL'] = 1.45 #
psi_cov_radii['TM'] = 1.90 #
psi_cov_radii['TH'] = 2.06 #
psi_cov_radii['TI'] = 1.60 #
psi_cov_radii['TE'] = 1.38 #
psi_cov_radii['TB'] = 1.94 #
psi_cov_radii['TC'] = 1.47 #
psi_cov_radii['TA'] = 1.70 #
psi_cov_radii['AC'] = 2.15 #
psi_cov_radii['AG'] = 1.45 #
psi_cov_radii['I'] = 1.39 #
psi_cov_radii['IR'] = 1.41 #
psi_cov_radii['AM'] = 1.80 #
psi_cov_radii['AL'] = 1.21 #
psi_cov_radii['AS'] = 1.19 #
psi_cov_radii['AR'] = 1.06 #
psi_cov_radii['AU'] = 1.36 #
psi_cov_radii['AT'] = 1.50 #
psi_cov_radii['IN'] = 1.42 #
psi_cov_radii['Y'] = 1.90 #
psi_cov_radii['CD'] = 1.44 #
psi_cov_radii['XE'] = 1.40 #
|
kratman/psi4public
|
psi4/driver/qcdb/cov_radii.py
|
Python
|
gpl-2.0
| 5,363
|
[
"Psi4"
] |
e82efd4b62d3737bf17af0a5952c10a228308ad5bb1e70a6c1dc97ab9e478229
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
""" Ban one or more Storage Elements for usage
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
read = True
write = True
check = True
remove = True
site = ''
mute = False
Script.setUsageMessage( """
Ban one or more Storage Elements for usage
Usage:
%s SE1 [SE2 ...]
""" % Script.scriptName )
Script.registerSwitch( "r" , "BanRead" , " Ban only reading from the storage element" )
Script.registerSwitch( "w" , "BanWrite", " Ban writing to the storage element" )
Script.registerSwitch( "k" , "BanCheck", " Ban check access to the storage element" )
Script.registerSwitch( "v" , "BanRemove", " Ban remove access to the storage element" )
Script.registerSwitch( "m" , "Mute" , " Do not send email" )
Script.registerSwitch( "S:", "Site=" , " Ban all SEs associate to site (note that if writing is allowed, check is always allowed)" )
Script.parseCommandLine( ignoreErrors = True )
ses = Script.getPositionalArgs()
for switch in Script.getUnprocessedSwitches():
if switch[0].lower() == "r" or switch[0].lower() == "banread":
write = False
check = False
remove = False
if switch[0].lower() == "w" or switch[0].lower() == "banwrite":
read = False
check = False
remove = False
if switch[0].lower() == "k" or switch[0].lower() == "bancheck":
read = False
write = False
remove = False
if switch[0].lower() == "v" or switch[0].lower() == "banremove":
read = False
write = False
check = False
if switch[0].lower() == "m" or switch[0].lower() == "mute":
mute = True
if switch[0] == "S" or switch[0].lower() == "site":
site = switch[1]
#from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC import gConfig, gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
#csAPI = CSAPI()
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
setup = gConfig.getValue( '/DIRAC/Setup', '' )
if not setup:
print 'ERROR: Could not contact Configuration Service'
exitCode = 2
DIRAC.exit( exitCode )
res = getProxyInfo()
if not res[ 'OK' ]:
gLogger.error( 'Failed to get proxy information', res[ 'Message' ] )
DIRAC.exit( 2 )
userName = res['Value'].get( 'username' )
if not userName:
gLogger.error( 'Failed to get username for proxy' )
DIRAC.exit( 2 )
if site:
res = Resources().getStorageElements( site )
if not res[ 'OK' ]:
gLogger.error( 'The provided site (%s) is not known.' % site )
DIRAC.exit( -1 )
ses.extend( res[ 'Value' ][ 'SE' ].replace( ' ', '' ).split( ',' ) )
if not ses:
gLogger.error( 'There were no SEs provided' )
DIRAC.exit( -1 )
readBanned = []
writeBanned = []
checkBanned = []
removeBanned = []
resourceStatus = ResourceStatus()
res = resourceStatus.getStorageStatus( ses )
if not res['OK']:
gLogger.error( "Storage Element %s does not exist" % ses )
DIRAC.exit( -1 )
reason = 'Forced with dirac-admin-ban-se by %s' % userName
for se, seOptions in res[ 'Value' ].items():
resW = resC = resR = { 'OK' : False }
# Eventually, we will get rid of the notion of InActive, as we always write Banned.
if read and seOptions.has_key( 'ReadAccess' ):
if not seOptions[ 'ReadAccess' ] in [ 'Active', 'Degraded', 'Probing' ]:
gLogger.notice( 'Read option for %s is %s, instead of %s' % ( se, seOptions[ 'ReadAccess' ], [ 'Active', 'Degraded', 'Probing' ] ) )
gLogger.notice( 'Try specifying the command switches' )
else:
resR = resourceStatus.setStorageElementStatus( se, 'ReadAccess', 'Banned', reason, userName )
#res = csAPI.setOption( "%s/%s/ReadAccess" % ( storageCFGBase, se ), "InActive" )
if not resR['OK']:
gLogger.error( 'Failed to update %s read access to Banned' % se )
else:
gLogger.notice( 'Successfully updated %s read access to Banned' % se )
readBanned.append( se )
# Eventually, we will get rid of the notion of InActive, as we always write Banned.
if write and seOptions.has_key( 'WriteAccess' ):
if not seOptions[ 'WriteAccess' ] in [ 'Active', 'Degraded', 'Probing' ]:
gLogger.notice( 'Write option for %s is %s, instead of %s' % ( se, seOptions[ 'WriteAccess' ], [ 'Active', 'Degraded', 'Probing' ] ) )
gLogger.notice( 'Try specifying the command switches' )
else:
resW = resourceStatus.setStorageElementStatus( se, 'WriteAccess', 'Banned', reason, userName )
#res = csAPI.setOption( "%s/%s/WriteAccess" % ( storageCFGBase, se ), "InActive" )
if not resW['OK']:
gLogger.error( "Failed to update %s write access to Banned" % se )
else:
gLogger.notice( "Successfully updated %s write access to Banned" % se )
writeBanned.append( se )
# Eventually, we will get rid of the notion of InActive, as we always write Banned.
if check and seOptions.has_key( 'CheckAccess' ):
if not seOptions[ 'CheckAccess' ] in [ 'Active', 'Degraded', 'Probing' ]:
gLogger.notice( 'Check option for %s is %s, instead of %s' % ( se, seOptions[ 'CheckAccess' ], [ 'Active', 'Degraded', 'Probing' ] ) )
gLogger.notice( 'Try specifying the command switches' )
else:
resC = resourceStatus.setStorageElementStatus( se, 'CheckAccess', 'Banned', reason, userName )
#res = csAPI.setOption( "%s/%s/CheckAccess" % ( storageCFGBase, se ), "InActive" )
if not resC['OK']:
gLogger.error( "Failed to update %s check access to Banned" % se )
else:
gLogger.notice( "Successfully updated %s check access to Banned" % se )
checkBanned.append( se )
# Eventually, we will get rid of the notion of InActive, as we always write Banned.
if remove and seOptions.has_key( 'RemoveAccess' ):
if not seOptions[ 'RemoveAccess' ] in [ 'Active', 'Degraded', 'Probing' ]:
gLogger.notice( 'Remove option for %s is %s, instead of %s' % ( se, seOptions[ 'RemoveAccess' ], [ 'Active', 'Degraded', 'Probing' ] ) )
gLogger.notice( 'Try specifying the command switches' )
else:
resC = resourceStatus.setStorageElementStatus( se, 'RemoveAccess', 'Banned', reason, userName )
# res = csAPI.setOption( "%s/%s/CheckAccess" % ( storageCFGBase, se ), "InActive" )
if not resC['OK']:
gLogger.error( "Failed to update %s remove access to Banned" % se )
else:
gLogger.notice( "Successfully updated %s remove access to Banned" % se )
removeBanned.append( se )
if not( resR['OK'] or resW['OK'] or resC['OK'] ):
DIRAC.exit( -1 )
if not ( writeBanned or readBanned or checkBanned or removeBanned ):
gLogger.notice( "No storage elements were banned" )
DIRAC.exit( -1 )
if mute:
gLogger.notice( 'Email is muted by script switch' )
DIRAC.exit( 0 )
subject = '%s storage elements banned for use' % len( writeBanned + readBanned + checkBanned + removeBanned )
addressPath = 'EMail/Production'
address = Operations().getValue( addressPath, '' )
body = ''
if read:
body = "%s\n\nThe following storage elements were banned for reading:" % body
for se in readBanned:
body = "%s\n%s" % ( body, se )
if write:
body = "%s\n\nThe following storage elements were banned for writing:" % body
for se in writeBanned:
body = "%s\n%s" % ( body, se )
if check:
body = "%s\n\nThe following storage elements were banned for check access:" % body
for se in checkBanned:
body = "%s\n%s" % ( body, se )
if remove:
body = "%s\n\nThe following storage elements were banned for remove access:" % body
for se in removeBanned:
body = "%s\n%s" % ( body, se )
if not address:
gLogger.notice( "'%s' not defined in Operations, can not send Mail\n" % addressPath, body )
DIRAC.exit( 0 )
res = diracAdmin.sendMail( address, subject, body )
gLogger.notice( 'Notifying %s' % address )
if res[ 'OK' ]:
gLogger.notice( res[ 'Value' ] )
else:
gLogger.notice( res[ 'Message' ] )
DIRAC.exit( 0 )
|
coberger/DIRAC
|
DataManagementSystem/scripts/dirac-admin-ban-se.py
|
Python
|
gpl-3.0
| 8,432
|
[
"DIRAC"
] |
56548845b47d4b93307cf2db7f8e5a2470c9e033fc24561323fef4e566a452ec
|
"""
Use these tags and filter when you're rolling your own search results.
This is intended to be a whole templatetags module. I keep it in my apps
as templatetags/search.py. These should not be used to perform search
queries, but rather render the results.
Basics
-------
There are three functions, each has both a tag and a filter of the same name.
These functions accept, at a minimum, a body of text and a list of search terms:
searchexcerpt: Truncate the text so that each search term is shown,
surrounded by some number of words of context.
highlight: Wrap all found search terms in an HTML span that can be styled to
highlight the terms.
hits: Count the occurrences of the search terms in the text.
The filters provide the most basic functionality as described above, while the tags
offer more options as arguments, such as case sensitivity, whole word search, and
saving the results to a context variable.
Settings
---------
Defaults for both the tags and filters can be changed with the following
settings. Note that these settings are merely a convenience for the tags,
which accept
these as arguments, but are necessary for changing behavior of the filters.
* SEARCH_CONTEXT_WORDS: Number of words to show on the left and right of
each search term. Default: 10
* SEARCH_IGNORE_CASE: False for case sensitive, True otherwise. Default: True
* SEARCH_WORD_BOUNDARY: Find whole words and not strings in the middle of
words. Default: False
* SEARCH_HIGHLIGHT_CLASS: The class to give the HTML span element when
wrapping highlighted search terms. Default: "highlight"
Examples
---------
Suppose you have a list flatpages resulting from a search query, and the
search terms (split into a list) are in the context variable terms. This will
show 5 words of context around each term and highlight matches in the title:
```
{% for page in flatpages %}
<h3>{{ page.title|highlight:terms }}</h3>
<p>
{% searchexcerpt terms 5 %}
{{ page.content|striptags }}
{% endsearchexcerpt %}
</p>
{% endfor %}
```
Add highlighting to the excerpt, and use a custom span class (the two flags
are for case insensitivity and respecting word boundaries):
```
{% highlight 1 1 "match" %}
{% searchexcerpt terms 5 1 1 %}
{{ page.content|striptags }}
{% endsearchexcerpt %}
{% endhighlight %}
```
Show the number of hits in the body:
```
<h3>{{ page.title }}
(Hits: {{ page.content|striptags|hits:terms }})
</h3>
```
All tags support an as name suffix, in which case an object will be stored in
the template context with the given name; output will be suppressed.
This is more efficient when you want both the excerpt and the number of hits.
The stored object depends on the tag:
* searchexcerpt: A dictionary with keys "original" (the text searched),
"excerpt" (the summarized text with search terms),
and "hits" (the number of hits in the text).
* searchcontext: A dictionary with keys "original", "highlighted", and "hits",
with obvious values.
* hits: Just the number of hits, nothing special.
Getting both the hits and the excerpt with "as":
```
{% searchexcerpt terms 3 as content %}
{{ page.content|striptags }}
{% endsearchexcerpt %}
<p>Hits: {{ content.hits }}<br>{{ content.excerpt }}</p>
```
More
----
For more examples see [Brian Beck's Text Adventure][1].
[1]: http://blog.brianbeck.com/post/29707610
"""
from itertools import ifilter
import re
from django import template
from django.conf import settings
from django.template import Node, TemplateSyntaxError
from django.utils.safestring import mark_safe
register = template.Library()
SETTINGS_PREFIX = 'SEARCH_'
SETTINGS_DEFAULTS = {
'CONTEXT_WORDS': 10,
'IGNORE_CASE': True,
'WORD_BOUNDARY': False,
'HIGHLIGHT_CLASS': "match"
}
def get_setting(name):
return getattr(settings, SETTINGS_PREFIX + name, SETTINGS_DEFAULTS[name])
def searchexcerpt(text, phrases, context_words=None, ignore_case=None, word_boundary=None):
if isinstance(phrases, basestring):
phrases = [phrases]
if context_words is None:
context_words = get_setting('CONTEXT_WORDS')
if ignore_case is None:
ignore_case = get_setting('IGNORE_CASE')
if word_boundary is None:
word_boundary = get_setting('WORD_BOUNDARY')
phrases = map(re.escape, phrases)
flags = ignore_case and re.I or 0
exprs = [re.compile(r"^%s$" % p, flags) for p in phrases]
whitespace = re.compile(r'\s+')
re_template = word_boundary and r"\b(%s)\b" or r"(%s)"
pieces = re.compile(re_template % "|".join(phrases), flags).split(text)
matches = {}
word_lists = []
index = {}
for i, piece in enumerate(pieces):
word_lists.append(whitespace.split(piece))
if i % 2:
index[i] = expr = ifilter(lambda e: e.match(piece), exprs).next()
matches.setdefault(expr, []).append(i)
def merge(lists):
merged = []
lists = [i for i in lists if i]
for words in lists:
if merged:
merged[-1] += words[0]
del words[0]
merged.extend(words)
return merged
i = 0
merged = []
for j in map(min, matches.itervalues()):
merged.append(merge(word_lists[i:j]))
merged.append(word_lists[j])
i = j + 1
merged.append(merge(word_lists[i:]))
output = []
for i, words in enumerate(merged):
omit = None
if i == len(merged) - 1:
omit = slice(max(1, 2 - i) * context_words + 1, None)
elif i == 0:
omit = slice(-context_words - 1)
elif not i % 2:
omit = slice(context_words + 1, -context_words - 1)
if omit and words[omit]:
words[omit] = ["..."]
output.append(" ".join(words))
return dict(original=text, excerpt="".join(output), hits=len(index))
class FunctionProxyNode(Node):
def __init__(self, nodelist, args, variable_name=None):
self.nodelist = nodelist
self.args = args
self.variable_name = variable_name
def render(self, context):
args = [arg.resolve(context) for arg in self.args]
text = self.nodelist.render(context)
value = self.get_value(text, *args)
if self.variable_name:
context[self.variable_name] = value
return ""
else:
return self.string_value(value)
def get_value(self, *args):
raise NotImplementedError
def string_value(self, value):
return value
class SearchContextNode(FunctionProxyNode):
def get_value(self, *args):
return searchexcerpt(*args)
def string_value(self, value):
return value['excerpt']
@register.tag(name='searchexcerpt')
def searchexcerpt_tag(parser, token):
"""
{% searchexcerpt search_terms [context_words] [ignore_case] [word_boundary] [as name] %}
...text...
{% endsearchexcerpt %}
"""
bits = list(token.split_contents())
if not 3 <= len(bits) <= 8:
usage = searchexcerpt_tag.__doc__.strip()
raise TemplateSyntaxError("%r expected usage: %s" % (bits[0], usage))
if len(bits) > 4 and bits[-2] == "as":
args, name = bits[1:-2], bits[-1]
else:
args, name = bits[1:], None
nodelist = parser.parse(('endsearchexcerpt',))
parser.delete_first_token()
return SearchContextNode(nodelist, map(parser.compile_filter, args), name)
@register.filter(name='searchexcerpt')
def searchexcerpt_filter(value, arg):
return searchexcerpt(value, arg)['excerpt']
searchexcerpt_filter.is_safe = True
def highlight(text, phrases, ignore_case=None, word_boundary=None, class_name=None):
if isinstance(phrases, basestring):
phrases = [phrases]
if ignore_case is None:
ignore_case = get_setting('IGNORE_CASE')
if word_boundary is None:
word_boundary = get_setting('WORD_BOUNDARY')
if class_name is None:
class_name = get_setting('HIGHLIGHT_CLASS')
phrases = map(re.escape, phrases)
flags = ignore_case and re.I or 0
re_template = word_boundary and r"\b(%s)\b" or r"(%s)"
expr = re.compile(re_template % "|".join(phrases), flags)
inner_expr = re.compile('<a[^>]+?href="[^>]*?(%s)$' % "|".join(phrases), flags)
template = '<span class="%s">%%s</span>' % class_name
matches = []
def replace(match):
if not word_boundary:
span = match.span()
if inner_expr.search(text, span[0] - 100, span[1]):
return match.group(0)
matches.append(match)
return template % match.group(0)
highlighted = mark_safe(expr.sub(replace, text))
count = len(matches)
return dict(original=text, highlighted=highlighted, hits=count)
class HighlightNode(FunctionProxyNode):
def get_value(self, *args):
return highlight(*args)
def string_value(self, value):
return value['highlighted']
@register.tag(name='highlight')
def highlight_tag(parser, token):
"""
{% highlight search_terms [ignore_case] [word_boundary] [class_name] [as name] %}
...text...
{% endhighlight %}
"""
bits = list(token.split_contents())
if not 2 <= len(bits) <= 7:
usage = highlight_tag.__doc__.strip()
raise TemplateSyntaxError("%r expected usage: %s" % (bits[0], usage))
if len(bits) > 3 and bits[-2] == "as":
args, name = bits[1:-2], bits[-1]
else:
args, name = bits[1:], None
nodelist = parser.parse(('endhighlight',))
parser.delete_first_token()
return HighlightNode(nodelist, map(parser.compile_filter, args), name)
@register.filter(name='highlight')
def highlight_filter(value, arg):
return highlight(value, arg)['highlighted']
def hits(text, phrases, ignore_case=None, word_boundary=None):
if isinstance(phrases, basestring):
phrases = [phrases]
if ignore_case is None:
ignore_case = get_setting('IGNORE_CASE')
if word_boundary is None:
word_boundary = get_setting('WORD_BOUNDARY')
phrases = map(re.escape, phrases)
flags = ignore_case and re.I or 0
re_template = word_boundary and r"\b(%s)\b" or r"(%s)"
expr = re.compile(re_template % "|".join(phrases), flags)
return len(expr.findall(text))
class HitsNode(FunctionProxyNode):
def get_value(self, *args):
return hits(*args)
def string_value(self, value):
return "%d" % value
@register.tag(name='hits')
def hits_tag(parser, token):
"""
{% hits search_terms [ignore_case] [word_boundary] [as name] %}
...text...
{% endhits %}
"""
bits = list(token.split_contents())
if not 2 <= len(bits) <= 6:
usage = hits_tag.__doc__.strip()
raise TemplateSyntaxError("%r expected usage: %s" % (bits[0], usage))
if len(bits) > 3 and bits[-2] == "as":
args, name = bits[1:-2], bits[-1]
else:
args, name = bits[1:], None
nodelist = parser.parse(('endhits',))
parser.delete_first_token()
return HitsNode(nodelist, map(parser.compile_filter, args), name)
@register.filter(name='hits')
def hits_filter(value, arg):
return hits(value, arg)
hits.is_safe = True
|
Alir3z4/django-kewl
|
django_kewl/templatetags/search.py
|
Python
|
bsd-3-clause
| 11,201
|
[
"Brian"
] |
3236f18ae7210a603d42e0e567c39e1fc6b6f4cbda13f012f7567cffa602b84f
|
# Copyright 2011 Luc Saffre
# License: BSD (see file COPYING for details)
"""
This is for writing fixtures that import data from an MS-Access
database (:xfile:`.mdb`) into Lino.
Usage examples see
:mod:`lino.projects.pcsw.fixtures.pp2lino`
and
:mod:`lino.projects.crl.fixtures.hs2lino`.
It uses `mdb-export` to extract data from the :xfile:`.mdb`
file to :xfile:`.csv`, then reads these csv files.
`mdb-export` was written by Brian Bruns and is part
of the `mdbtools` Debian package. To install it::
aptitude install mdbtools
Usage of `mdbtools` command line::
Usage: mdb-export [options] <file> <table>
where options are:
-H supress header row
-Q don't wrap text-like fields in quotes
-d <delimiter> specify a column delimiter
-R <delimiter> specify a row delimiter
-I INSERT statements (instead of CSV)
-D <format> set the date format (see strftime(3) for details)
-S Sanitize names (replace spaces etc. with underscore)
-q <char> Use <char> to wrap text-like fields. Default is ".
-X <char> Use <char> to escape quoted characters within a field. Default is doubling.
Thanks to http://farismadi.wordpress.com/2008/07/13/encoding-of-mdb-tool/
for explanations on the environment variables used by `mdb-export`.
The function :func:`check_output` in this module is a copy from Python 2.7
which we include here to make it usable in Python 2.6 too.
"""
from __future__ import print_function
from builtins import object
import logging
logger = logging.getLogger(__name__)
# ~ ENCODING = 'latin1' # the encoding used by the mdb file
ENCODING = 'utf8'
#~ MDB_FILE = 'PPv5MasterCopie.mdb'
MDBTOOLS_EXPORT = 'mdb-export'
import os
import sys
#~ ENCODING = sys.stdout.encoding
#~ import csv
import codecs
import datetime
from django.conf import settings
from lino.utils import ucsv
from lino.utils import dblogger
# ~ ENCODING = 'latin1' # the encoding used by the mdb file
ENCODING = 'utf8'
#~ MDB_FILE = 'PPv5MasterCopie.mdb'
MDBTOOLS_EXPORT = 'mdb-export'
try:
from subprocess import check_output
except ImportError:
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(
stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
class Loader(object):
mdb_file = None
table_name = None
model = None
def __iter__(self):
fn = self.table_name + ".csv"
if os.path.exists(fn):
logger.warning("Not re-extracting %s since it exists.", fn)
else:
args = [MDBTOOLS_EXPORT, '-D', "%Y-%m-%d %H:%M:%S",
self.mdb_file, self.table_name]
s = check_output(args, executable=MDBTOOLS_EXPORT,
env=dict(
MDB_ICONV='utf-8',
MDB_JET_CHARSET='utf-8'))
#~ print ENCODING
fd = open(fn, 'w')
fd.write(s)
fd.close()
logger.info("Extracted file %s", fn)
reader = ucsv.UnicodeReader(open(fn, 'r'), encoding=ENCODING)
headers = next(reader)
if not headers == self.headers:
raise Exception("%r != %r" % (headers, self.headers))
n = 0
for values in reader:
row = {}
for i, h in enumerate(self.headers):
row[h] = values[i]
n += 1
if False:
if int(row['IDClient']) == 967:
print(row)
raise Exception("20110609")
if False:
if n < 10:
print(n, ':', row)
else:
raise Exception("20110609")
for obj in self.row2obj(row):
yield obj
def parsedate(self, s):
if not s:
return None
dt = s.split()
if len(dt) != 2:
raise Exception("Unexpected datetime string %r" % s)
d = dt[0]
#~ t = dt[1]
a = [int(i) for i in d.split('-')]
return datetime.date(year=a[0], month=a[1], day=a[2])
def parsetime(self, s):
if not s:
return None
dt = s.split()
if len(dt) != 2:
raise Exception("Unexpected datetime string %r" % s)
t = dt[1]
return t[:5]
#~ a = [int(i) for i in t.split(':')]
#~ return datetime.time(hour=a[0],minute=a[1],second=a[2])
|
khchine5/lino
|
lino/utils/mdbtools.py
|
Python
|
bsd-2-clause
| 5,749
|
[
"Brian"
] |
ac32260d9256d4fca5a047179dccbde432614d546ec66c92caf95305b07eb09b
|
import numpy as np
import pytest
import logging
import skimage
import tempfile
import nibabel
from spinalcordtoolbox.scripts import sct_compute_snr
logger = logging.getLogger(__name__)
# Declare the signal intensity in the object created in the dummy_*() functions. This object will be used to compute
# the SNR using various methods.
SIGNAL_OBJECT = 1000
# Define a scaling factor for a slice. This scaling factor mimics inhomogeneous transmit in MRI, or intensity bias field
# correction filer.
SCALING_SLICE = 100
def dummy_3d_data(scale_mid_slice=0):
"""
Create 3d image with object in the middle and Rayleigh noise distribution. Outputs a nibabel object.
:param scale_mid_slice: If non-null, the last slice is scaled. This is to mimic MRI scans with variable transmit
profile.
:return:
"""
data = np.ones([32, 32, 32], dtype=np.float)
# Add an object with representative intensity in the middle of the image
data[9:24, 9:24, 9:24] = SIGNAL_OBJECT
# Add Gaussian noise with unit variance on two separate images
data1 = skimage.util.random_noise(data, mode='gaussian', clip=False, mean=0, var=1)
data2 = skimage.util.random_noise(data, mode='gaussian', clip=False, mean=0, var=1)
if scale_mid_slice:
data1[..., 16] *= scale_mid_slice
data2[..., 16] *= scale_mid_slice
# Compute the square root of the sum of squares to obtain a Rayleigh (equivalent to Chi) distribution. This
# distribution is a more realistic representation of noise in magnitude MRI data, which is obtained by combining
# imaginary and real channels (each having Gaussian distribution).
data = np.sqrt(data1**2 + data2**2)
return data
@pytest.fixture(scope="session")
def dummy_3d_nib():
nii = nibabel.nifti1.Nifti1Image(dummy_3d_data(), np.eye(4))
filename = tempfile.NamedTemporaryFile(suffix='.nii.gz', delete=False).name
nibabel.save(nii, filename)
return filename
@pytest.fixture(scope="session")
def dummy_3d_nib_scaled():
"""Create 3D volume with one slice in the middle scaled by a factor SCALING_SLICE"""
nii = nibabel.nifti1.Nifti1Image(dummy_3d_data(scale_mid_slice=SCALING_SLICE), np.eye(4))
filename = tempfile.NamedTemporaryFile(suffix='.nii.gz', delete=False).name
nibabel.save(nii, filename)
return filename
@pytest.fixture(scope="session")
def dummy_4d_nib():
"""Create 4D volume. We need sufficient volumes to compute reliable standard deviation along the 4th dimension."""
data = np.stack([dummy_3d_data() for i in range(50)], axis=3)
affine = np.eye(4)
nib = nibabel.nifti1.Nifti1Image(data, affine)
filename = tempfile.NamedTemporaryFile(suffix='.nii.gz', delete=False).name
nibabel.save(nib, filename)
return filename
@pytest.fixture(scope="session")
def dummy_4d_nib_scaled():
"""Create 4D volume with one slice in the middle scaled."""
data = np.stack([dummy_3d_data(scale_mid_slice=SCALING_SLICE) for i in range(50)], axis=3)
affine = np.eye(4)
nib = nibabel.nifti1.Nifti1Image(data, affine)
filename = tempfile.NamedTemporaryFile(suffix='.nii.gz', delete=False).name
nibabel.save(nib, filename)
return filename
@pytest.fixture(scope="session")
def dummy_3d_mask_nib():
data = np.zeros([32, 32, 32], dtype=np.uint8)
data[9:24, 9:24, 9:24] = 1
nii = nibabel.nifti1.Nifti1Image(data, np.eye(4))
filename = tempfile.NamedTemporaryFile(suffix='.nii.gz', delete=False).name
nibabel.save(nii, filename)
return filename
@pytest.fixture(scope="session")
def dummy_3d_mask_noise_nib():
data = np.zeros([32, 32, 32], dtype=np.uint8)
data[0:5, 0:5, 9:24] = 1
nii = nibabel.nifti1.Nifti1Image(data, np.eye(4))
filename = tempfile.NamedTemporaryFile(suffix='.nii.gz', delete=False).name
nibabel.save(nii, filename)
return filename
@pytest.fixture(scope="session")
def dummy_3d_mask_noise_notconsistent_nib():
"""Slices are non-null while they are null on the image to compute SNR from."""
data = np.zeros([32, 32, 32], dtype=np.uint8)
data[0:5, 0:5, 0:32] = 1
nii = nibabel.nifti1.Nifti1Image(data, np.eye(4))
filename = tempfile.NamedTemporaryFile(suffix='.nii.gz', delete=False).name
nibabel.save(nii, filename)
return filename
@pytest.mark.parametrize('method', ['diff', 'mult'])
def test_sct_compute_snr_check_dimension(dummy_3d_nib, method):
with pytest.raises(ValueError):
sct_compute_snr.main(argv=['-i', dummy_3d_nib, '-m', dummy_3d_nib, '-method', method, '-vol', '0,5'])
def test_sct_compute_snr_check_dimension_mask(dummy_4d_nib):
with pytest.raises(ValueError):
sct_compute_snr.main(argv=['-i', dummy_4d_nib, '-m', dummy_4d_nib, '-method', 'mult'])
def test_sct_compute_snr_check_dimension_mask_noise(dummy_3d_nib, dummy_4d_nib):
with pytest.raises(ValueError):
sct_compute_snr.main(argv=['-i', dummy_3d_nib, '-m', dummy_3d_nib, '-m-noise', dummy_4d_nib,
'-method', 'single'])
def test_sct_compute_snr_check_vol_param(dummy_4d_nib, dummy_3d_nib):
with pytest.raises(ValueError):
sct_compute_snr.main(argv=['-i', dummy_4d_nib, '-m', dummy_3d_nib, '-m-noise', dummy_3d_nib,
'-vol', '0,1,2', '-method', 'single'])
def test_sct_compute_snr_missing_mask(dummy_4d_nib, dummy_3d_nib):
for args in (['-i', dummy_4d_nib, '-m', dummy_3d_nib, '-method', 'single'],
['-i', dummy_4d_nib, '-method', 'diff']):
with pytest.raises(SystemExit) as e:
sct_compute_snr.main(argv=args)
assert e.value.code == 2
def test_sct_compute_snr_notconsistent_mask_noise_mask(dummy_3d_nib, dummy_3d_mask_nib,
dummy_3d_mask_noise_notconsistent_nib):
with pytest.raises(ValueError):
sct_compute_snr.main(argv=['-i', dummy_3d_nib, '-m', dummy_3d_mask_nib,
'-m-noise', dummy_3d_mask_noise_notconsistent_nib, '-method', 'single'])
def test_sct_compute_snr_mult(dummy_4d_nib, dummy_3d_mask_nib):
filename = tempfile.NamedTemporaryFile(prefix='snr_mult_', suffix='.txt', delete=False).name
sct_compute_snr.main(
argv=['-i', dummy_4d_nib, '-m', dummy_3d_mask_nib, '-method', 'mult', '-o', filename])
with open(filename, "r") as f:
snr = float(f.read())
# We need a large tolerance because of the randomization
assert snr == pytest.approx(np.sqrt(2*SIGNAL_OBJECT**2), rel=0.05)
def test_sct_compute_snr_mult_vol(dummy_4d_nib, dummy_3d_mask_nib):
filename = tempfile.NamedTemporaryFile(prefix='snr_mult_', suffix='.txt', delete=False).name
sct_compute_snr.main(
argv=['-i', dummy_4d_nib, '-m', dummy_3d_mask_nib, '-method', 'mult', '-vol', '0:40', '-o', filename])
with open(filename, "r") as f:
snr = float(f.read())
# We need a large tolerance because of the randomization
assert snr == pytest.approx(np.sqrt(2*SIGNAL_OBJECT**2), rel=0.05)
def test_sct_compute_snr_mult_scaled(dummy_4d_nib_scaled, dummy_3d_mask_nib):
filename = tempfile.NamedTemporaryFile(prefix='snr_mult_', suffix='.txt', delete=False).name
sct_compute_snr.main(
argv=['-i', dummy_4d_nib_scaled, '-m', dummy_3d_mask_nib, '-method', 'mult', '-o', filename])
with open(filename, "r") as f:
snr = float(f.read())
# We need a large tolerance because of the randomization
assert snr == pytest.approx(np.sqrt(2*SIGNAL_OBJECT**2), rel=0.05)
def test_sct_compute_snr_diff(dummy_4d_nib, dummy_3d_mask_nib):
filename = tempfile.NamedTemporaryFile(prefix='snr_diff_', suffix='.txt', delete=False).name
sct_compute_snr.main(
argv=['-i', dummy_4d_nib, '-m', dummy_3d_mask_nib, '-method', 'diff', '-vol', '0,1', '-o', filename])
with open(filename, "r") as f:
snr = float(f.read())
# We need a large tolerance because of the randomization
assert snr == pytest.approx(np.sqrt(2*SIGNAL_OBJECT**2), rel=0.05)
def test_sct_compute_snr_diff_vol_versus_not_vol(dummy_4d_nib, dummy_3d_mask_nib):
"""Make sure that if vol is not specified, it uses the first 2 volumes"""
filename = tempfile.NamedTemporaryFile(prefix='snr_diff_', suffix='.txt', delete=False).name
sct_compute_snr.main(
argv=['-i', dummy_4d_nib, '-m', dummy_3d_mask_nib, '-method', 'diff', '-vol', '0,1', '-o', filename])
filename_no_vol = tempfile.NamedTemporaryFile(prefix='snr_diff_', suffix='.txt', delete=False).name
sct_compute_snr.main(
argv=['-i', dummy_4d_nib, '-m', dummy_3d_mask_nib, '-method', 'diff', '-o', filename_no_vol])
with open(filename, "r") as f:
snr = float(f.read())
with open(filename_no_vol, "r") as f:
snr_no_vol = float(f.read())
# We need a large tolerance because of the randomization
assert snr == snr_no_vol
def test_sct_compute_snr_diff_scaled(dummy_4d_nib_scaled, dummy_3d_mask_nib):
filename = tempfile.NamedTemporaryFile(prefix='snr_diff_', suffix='.txt', delete=False).name
sct_compute_snr.main(
argv=['-i', dummy_4d_nib_scaled, '-m', dummy_3d_mask_nib, '-method', 'diff', '-vol', '0,1',
'-o', filename])
with open(filename, "r") as f:
snr = float(f.read())
# We need a large tolerance because of the randomization
assert snr == pytest.approx(np.sqrt(2*SIGNAL_OBJECT**2), rel=0.05)
def test_sct_compute_snr_single_3d(dummy_3d_nib, dummy_3d_mask_nib, dummy_3d_mask_noise_nib):
filename = tempfile.NamedTemporaryFile(prefix='snr_single_', suffix='.txt', delete=False).name
sct_compute_snr.main(
argv=['-i', dummy_3d_nib, '-m', dummy_3d_mask_nib, '-m-noise', dummy_3d_mask_noise_nib, '-method', 'single',
'-o', filename])
with open(filename, "r") as f:
snr = float(f.read())
# We need a large tolerance because of the randomization
# TODO: Need to figure out what the problem is with the strong bias (~30% less than the "real" SNR)
# see: https://github.com/spinalcordtoolbox/spinalcordtoolbox/issues/3486
assert snr == pytest.approx(np.sqrt(2*SIGNAL_OBJECT**2), rel=0.5)
def test_sct_compute_snr_single_4d(dummy_4d_nib, dummy_3d_mask_nib, dummy_3d_mask_noise_nib):
filename = tempfile.NamedTemporaryFile(prefix='snr_single_', suffix='.txt', delete=False).name
sct_compute_snr.main(
argv=['-i', dummy_4d_nib, '-m', dummy_3d_mask_nib, '-m-noise', dummy_3d_mask_noise_nib, '-method', 'single',
'-vol', '0', '-o', filename])
with open(filename, "r") as f:
snr = float(f.read())
# We need a large tolerance because of the randomization
# TODO: Need to figure out what the problem is with the strong bias (~30% less than the "real" SNR)
assert snr == pytest.approx(np.sqrt(2*SIGNAL_OBJECT**2), rel=0.5)
def test_sct_compute_snr_single_3d_scaled(dummy_3d_nib_scaled, dummy_3d_mask_nib, dummy_3d_mask_noise_nib):
filename = tempfile.NamedTemporaryFile(prefix='snr_single_', suffix='.txt', delete=False).name
sct_compute_snr.main(
argv=['-i', dummy_3d_nib_scaled, '-m', dummy_3d_mask_nib, '-m-noise', dummy_3d_mask_noise_nib,
'-method', 'single', '-o', filename])
with open(filename, "r") as f:
snr = float(f.read())
# We need a large tolerance because of the randomization
# TODO: Need to figure out what the problem is with the strong bias (~30% less than the "real" SNR)
assert snr == pytest.approx(np.sqrt(2*SIGNAL_OBJECT**2), rel=0.5)
def test_sct_compute_snr_single_3d_mask_object(dummy_3d_nib, dummy_3d_mask_nib):
"""Compute noise statistics in the object ROI. In this case, noise distribution is considered Gaussian,
not Rayleigh"""
filename = tempfile.NamedTemporaryFile(prefix='snr_single_', suffix='.txt', delete=False).name
sct_compute_snr.main(
argv=['-i', dummy_3d_nib, '-m', dummy_3d_mask_nib, '-m-noise', dummy_3d_mask_nib, '-method', 'single',
'-rayleigh', 0, '-o', filename])
with open(filename, "r") as f:
snr = float(f.read())
# We need a large tolerance because of the randomization
assert snr == pytest.approx(np.sqrt(2*SIGNAL_OBJECT**2), rel=0.05)
|
neuropoly/spinalcordtoolbox
|
testing/cli/test_cli_sct_compute_snr.py
|
Python
|
mit
| 12,233
|
[
"Gaussian"
] |
b4bcac46acce297b08837d7efa3b89ad9f8d18e23be24e44822554c253efffe1
|
# (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: osx_say
type: notification
requirements:
- whitelisting in configuration
- the '/usr/bin/say' command line program (standard on macOS)
short_description: oneline Ansible screen output
version_added: historical
description:
- This plugin will use the 'say' program to "speak" about play events.
'''
import subprocess
import os
from ansible.plugins.callback import CallbackBase
FAILED_VOICE = "Zarvox"
REGULAR_VOICE = "Trinoids"
HAPPY_VOICE = "Cellos"
LASER_VOICE = "Princess"
SAY_CMD = "/usr/bin/say"
class CallbackModule(CallbackBase):
"""
makes Ansible much more exciting on macOS.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'osx_say'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
# plugin disable itself if say is not present
# ansible will not call any callback if disabled is set to True
if not os.path.exists(SAY_CMD):
self.disabled = True
self._display.warning("%s does not exist, plugin %s disabled" % (SAY_CMD, os.path.basename(__file__)))
def say(self, msg, voice):
subprocess.call([SAY_CMD, msg, "--voice=%s" % (voice)])
def runner_on_failed(self, host, res, ignore_errors=False):
self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_ok(self, host, res):
self.say("pew", LASER_VOICE)
def runner_on_skipped(self, host, item=None):
self.say("pew", LASER_VOICE)
def runner_on_unreachable(self, host, res):
self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_async_ok(self, host, res, jid):
self.say("pew", LASER_VOICE)
def runner_on_async_failed(self, host, res, jid):
self.say("Failure on host %s" % host, FAILED_VOICE)
def playbook_on_start(self):
self.say("Running Playbook", REGULAR_VOICE)
def playbook_on_notify(self, host, handler):
self.say("pew", LASER_VOICE)
def playbook_on_task_start(self, name, is_conditional):
if not is_conditional:
self.say("Starting task: %s" % name, REGULAR_VOICE)
else:
self.say("Notifying task: %s" % name, REGULAR_VOICE)
def playbook_on_setup(self):
self.say("Gathering facts", REGULAR_VOICE)
def playbook_on_play_start(self, name):
self.say("Starting play: %s" % name, HAPPY_VOICE)
def playbook_on_stats(self, stats):
self.say("Play complete", HAPPY_VOICE)
|
valentin-krasontovitsch/ansible
|
lib/ansible/plugins/callback/osx_say.py
|
Python
|
gpl-3.0
| 2,870
|
[
"exciting"
] |
083f199462bf1e7ee919db16203363c063ca0a38757da3656493c780aea51fa4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.